versionhq 1.2.3.8__py3-none-any.whl → 1.2.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -32,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
32
32
  from versionhq.task_graph.draft import workflow
33
33
 
34
34
 
35
- __version__ = "1.2.3.8"
35
+ __version__ = "1.2.4.1"
36
36
  __all__ = [
37
37
  "Agent",
38
38
 
@@ -0,0 +1,147 @@
1
+ from typing import Any, Dict, List
2
+ from textwrap import dedent
3
+
4
+
5
+ class Prompt:
6
+ """A class to format, store, and manage a prompt."""
7
+
8
+ task: Any = None
9
+ agent: Any = None
10
+ context: str = None
11
+
12
+
13
+ def __init__(self, task, agent):
14
+ from versionhq.task.model import Task
15
+ from versionhq.agent.model import Agent
16
+
17
+ self.task = task if isinstance(task, Task) else Task(description=str(task))
18
+ self.agent = agent if isinstance(agent, Agent) else Agent(role=str(agent))
19
+
20
+
21
+ def _draft_output_prompt(self) -> str:
22
+ output_prompt = ""
23
+
24
+ if self.task.pydantic_output:
25
+ output_prompt = f"""Your response MUST STRICTLY follow the given repsonse format:
26
+ JSON schema: {str(self.task.pydantic_output)}
27
+ """
28
+
29
+ elif self.task.response_fields:
30
+ output_prompt, output_formats_to_follow = "", dict()
31
+ response_format = str(self.task._structure_response_format(model_provider=self.agent.llm.provider))
32
+ for item in self.task.response_fields:
33
+ if item:
34
+ output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
35
+
36
+ output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
37
+ Response format: {response_format}
38
+ Ref. Output image: {output_formats_to_follow}
39
+ """
40
+ else:
41
+ output_prompt = "You MUST Return your response as a valid JSON serializable string, enclosed in double quotes. Do not use single quotes, trailing commas, or other non-standard JSON syntax."
42
+
43
+ return dedent(output_prompt)
44
+
45
+
46
+ def _draft_context_prompt(self) -> str:
47
+ """
48
+ Create a context prompt from the given context in any format: a task object, task output object, list, dict.
49
+ """
50
+ from versionhq.task.model import Task, TaskOutput
51
+
52
+ context_to_add = None
53
+ if not self.context:
54
+ # Logger().log(level="error", color="red", message="Missing a context to add to the prompt. We'll return ''.")
55
+ return
56
+
57
+ match self.context:
58
+ case str():
59
+ context_to_add = self.context
60
+
61
+ case Task():
62
+ if not self.context.output:
63
+ res = self.context.execute()
64
+ context_to_add = res._to_context_prompt()
65
+
66
+ else:
67
+ context_to_add = self.context.output.raw
68
+
69
+ case TaskOutput():
70
+ context_to_add = self.context._to_context_prompt()
71
+
72
+
73
+ case dict():
74
+ context_to_add = str(self.context)
75
+
76
+ case list():
77
+ res = ", ".join([self._draft_context_prompt(context=item) for item in self.context])
78
+ context_to_add = res
79
+
80
+ case _:
81
+ pass
82
+
83
+ return dedent(context_to_add)
84
+
85
+
86
+ def _draft_user_prompt(self) -> str:
87
+ output_prompt = self._draft_output_prompt()
88
+ task_slices = [self.task.description, output_prompt, ]
89
+
90
+ if self.context:
91
+ context_prompt = self._draft_context_prompt()
92
+ task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_prompt}")
93
+
94
+ return "\n".join(task_slices)
95
+
96
+
97
+ def _format_content_prompt(self) -> Dict[str, str]:
98
+ """Formats and returns image_url content added to the messages."""
99
+
100
+ from versionhq._utils import is_valid_url
101
+
102
+ content_messages = {}
103
+
104
+ if self.task.image:
105
+ if is_valid_url(self.task.image):
106
+ content_messages.update({ "type": "image_url", "image_url": self.task.image })
107
+ else:
108
+ content_messages.update({ "type": "image_url", "image_url": { "url": self.task.image }})
109
+
110
+ if self.task.file:
111
+ if is_valid_url(self.task.file):
112
+ content_messages.update({ "type": "image_url", "image_url": self.task.file })
113
+ else:
114
+ content_messages.update({ "type": "image_url", "image_url": { "url": self.task.file }})
115
+
116
+ if self.task.audio:
117
+ from pathlib import Path
118
+ import base64
119
+
120
+ audio_bytes = Path(self.audio_file_path).read_bytes()
121
+ encoded_data = base64.b64encode(audio_bytes).decode("utf-8")
122
+ content_messages.update({ "type": "image_url", "image_url": "data:audio/mp3;base64,{}".format(encoded_data)})
123
+
124
+ return content_messages
125
+
126
+ @property
127
+ def messages(self) -> List[Dict[str, str]]:
128
+ user_prompt = self._draft_user_prompt()
129
+ content_prompt = self._format_content_prompt()
130
+
131
+ messages = []
132
+ messages.append(
133
+ {
134
+ "role": "user",
135
+ "content": [
136
+ {
137
+ "type": "text",
138
+ "text": user_prompt
139
+ },
140
+ content_prompt,
141
+ ]
142
+ })
143
+
144
+ if self.use_developer_prompt:
145
+ messages.append({ "role": "developer", "content": self.backstory })
146
+
147
+ return messages
@@ -1,3 +1,4 @@
1
1
  from versionhq._utils.logger import Logger
2
2
  from versionhq._utils.process_config import process_config
3
3
  from versionhq._utils.vars import KNOWLEDGE_DIRECTORY, MAX_FILE_NAME_LENGTH
4
+ from versionhq._utils.is_valid_url import is_valid_url
@@ -0,0 +1,15 @@
1
+ from urllib.parse import urlparse
2
+
3
+
4
+ def is_valid_url(url: str) -> bool:
5
+ try:
6
+ result = urlparse(url)
7
+ return all(
8
+ [
9
+ result.scheme in ("http", "https", "gs"),
10
+ result.netloc,
11
+ len(result.netloc.split(".")) >= 2, # Ensure domain has TLD
12
+ ]
13
+ )
14
+ except Exception:
15
+ return False
versionhq/agent/model.py CHANGED
@@ -12,8 +12,7 @@ from versionhq.tool.model import Tool, ToolSet, BaseTool
12
12
  from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
13
13
  from versionhq.memory.contextual_memory import ContextualMemory
14
14
  from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
15
- from versionhq._utils.logger import Logger
16
- from versionhq._utils.process_config import process_config
15
+ from versionhq._utils import Logger, process_config, is_valid_url
17
16
 
18
17
 
19
18
  load_dotenv(override=True)
@@ -217,7 +216,7 @@ class Agent(BaseModel):
217
216
  if isinstance(item, BaseKnowledgeSource):
218
217
  knowledge_sources.append(item)
219
218
 
220
- elif isinstance(item, str) and "http" in item and DoclingSource._validate_url(url=item) == True:
219
+ elif isinstance(item, str) and "http" in item and is_valid_url(url=item) == True:
221
220
  docling_fp.append(item)
222
221
 
223
222
  elif isinstance(item, str):
@@ -369,7 +368,7 @@ class Agent(BaseModel):
369
368
 
370
369
  def _invoke(
371
370
  self,
372
- prompts: str,
371
+ messages: List[Dict[str, str]] = None,
373
372
  response_format: Optional[Dict[str, Any]] = None,
374
373
  tools: Optional[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = None,
375
374
  tool_res_as_final: bool = False,
@@ -385,11 +384,6 @@ class Agent(BaseModel):
385
384
  iterations = 0
386
385
  raw_response = None
387
386
 
388
- messages = []
389
- messages.append({ "role": "user", "content": prompts })
390
- if self.use_developer_prompt:
391
- messages.append({ "role": "developer", "content": self.backstory })
392
-
393
387
  try:
394
388
  if self._rpm_controller and self.max_rpm:
395
389
  self._rpm_controller.check_or_wait()
@@ -480,7 +474,7 @@ class Agent(BaseModel):
480
474
  return self
481
475
 
482
476
 
483
- def start(self, context: Any = None, tool_res_as_final: bool = False) -> Any | None:
477
+ def start(self, context: Any = None, tool_res_as_final: bool = False, image: str = None, file: str = None, audio: str = None) -> Any | None:
484
478
  """
485
479
  Defines and executes a task when it is not given and returns TaskOutput object.
486
480
  """
@@ -498,6 +492,9 @@ class Agent(BaseModel):
498
492
  description=f"Generate a simple result in a sentence to achieve the goal: {self.goal if self.goal else self.role}. If needed, list up necessary steps in concise manner.",
499
493
  pydantic_output=Output,
500
494
  tool_res_as_final=tool_res_as_final,
495
+ image=image, #REFINEME - query memory/knowledge or self create
496
+ file=file,
497
+ audio=audio,
501
498
  )
502
499
  res = task.execute(agent=self, context=context)
503
500
  return res
@@ -520,20 +517,20 @@ class Agent(BaseModel):
520
517
  if self.max_rpm and self._rpm_controller:
521
518
  self._rpm_controller._reset_request_count()
522
519
 
523
- task_prompt = task._prompt(model_provider=self.llm.provider, context=context)
520
+ user_prompt = task._user_prompt(model_provider=self.llm.provider, context=context)
524
521
 
525
522
  if self._knowledge:
526
- agent_knowledge = self._knowledge.query(query=[task_prompt,], limit=5)
523
+ agent_knowledge = self._knowledge.query(query=[user_prompt,], limit=5)
527
524
  if agent_knowledge:
528
525
  agent_knowledge_context = extract_knowledge_context(knowledge_snippets=agent_knowledge)
529
526
  if agent_knowledge_context:
530
- task_prompt += agent_knowledge_context
527
+ user_prompt += agent_knowledge_context
531
528
 
532
529
  if rag_tools:
533
530
  for item in rag_tools:
534
531
  rag_tool_context = item.run(agent=self, query=task.description)
535
532
  if rag_tool_context:
536
- task_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
533
+ user_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
537
534
 
538
535
  if self.with_memory == True:
539
536
  contextual_memory = ContextualMemory(
@@ -543,18 +540,39 @@ class Agent(BaseModel):
543
540
  query = f"{task.description} {context_str}".strip()
544
541
  memory = contextual_memory.build_context_for_task(query=query)
545
542
  if memory.strip() != "":
546
- task_prompt += memory.strip()
543
+ user_prompt += memory.strip()
547
544
 
548
545
  ## comment out for now
549
546
  # if self.networks and self.networks._train:
550
- # task_prompt = self._training_handler(task_prompt=task_prompt)
547
+ # user_prompt = self._training_handler(user_prompt=user_prompt)
551
548
  # else:
552
- # task_prompt = self._use_trained_data(task_prompt=task_prompt)
549
+ # user_prompt = self._use_trained_data(user_prompt=user_prompt)
550
+
551
+ content_prompt = task._format_content_prompt()
552
+
553
+ messages = []
554
+ if content_prompt:
555
+ messages.append(
556
+ {
557
+ "role": "user",
558
+ "content": [
559
+ {
560
+ "type": "text",
561
+ "text": user_prompt
562
+ },
563
+ content_prompt,
564
+ ]
565
+ })
566
+ else:
567
+ messages.append({ "role": "user", "content": user_prompt })
568
+
569
+ if self.use_developer_prompt:
570
+ messages.append({ "role": "developer", "content": self.backstory })
553
571
 
554
572
  try:
555
573
  self._times_executed += 1
556
574
  raw_response = self._invoke(
557
- prompts=task_prompt,
575
+ messages=messages,
558
576
  response_format=task._structure_response_format(model_provider=self.llm.provider),
559
577
  tools=tools,
560
578
  tool_res_as_final=task.tool_res_as_final,
@@ -47,9 +47,12 @@ def form_agent_network(
47
47
  Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
48
48
  formation = None
49
49
 
50
- case int() | float():
50
+ case int():
51
51
  formation = Formation(int(formation))
52
52
 
53
+ case float():
54
+ formation = Formation(round(formation))
55
+
53
56
  case _:
54
57
  Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
55
58
  formation = None
@@ -64,14 +67,14 @@ def form_agent_network(
64
67
  prompt_expected_outcome = expected_outcome if isinstance(expected_outcome, str) else str(expected_outcome.model_dump()) if type(expected_outcome) == BaseModel else ""
65
68
 
66
69
  class Outcome(BaseModel):
67
- formation: Enum
70
+ formation: str
68
71
  agent_roles: list[str]
69
72
  task_descriptions: list[str]
70
73
  task_outcomes: list[list[str]]
71
74
  leader_agent: str
72
75
 
73
76
  vhq_task = Task(
74
- description=f"Design a team of specialized agents to fully automate the following task and achieve the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the team formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
77
+ description=f"Design a team of specialized agents to fully automate the following task and achieve the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
75
78
  pydantic_output=Outcome
76
79
  )
77
80
 
@@ -80,8 +83,12 @@ def form_agent_network(
80
83
 
81
84
  res = vhq_task.execute(agent=vhq_formation_planner, context=context)
82
85
 
83
- formation_keys = ([k for k in Formation._member_map_.keys() if k == res.pydantic.formation.upper()]
84
- if res.pydantic else [k for k in Formation._member_map_.keys() if k == res.json_dict["formation"].upper()])
86
+ formation_keys = []
87
+ if hasattr(res.pydantic, "formation"):
88
+ formation_keys = [k for k in Formation._member_map_.keys() if k == res.pydantic.formation.upper()]
89
+ elif "formation" in res.json_dict:
90
+ formation_keys = [k for k in Formation._member_map_.keys() if k == res.json_dict["formation"].upper()]
91
+
85
92
  _formation = Formation[formation_keys[0]] if formation_keys else Formation.SUPERVISING
86
93
 
87
94
  network_tasks = []
@@ -306,12 +306,8 @@ class AgentNetwork(BaseModel):
306
306
  return res, task_graph
307
307
 
308
308
 
309
- def launch(
310
- self, kwargs_pre: Optional[Dict[str, str]] = None, kwargs_post: Optional[Dict[str, Any]] = None, start_index: int = None
311
- ) -> Tuple[TaskOutput, TaskGraph]:
312
- """
313
- Launch the agent network - executing tasks and recording their outputs.
314
- """
309
+ def launch(self, kwargs_pre: Optional[Dict[str, str]] = None, kwargs_post: Optional[Dict[str, Any]] = None, start_index: int = None) -> Tuple[TaskOutput, TaskGraph]:
310
+ """Launches agent network by executing tasks in the network and recording the outputs"""
315
311
 
316
312
  self._assign_tasks()
317
313
 
@@ -1,6 +1,5 @@
1
1
  from pathlib import Path
2
- from typing import Iterator, List, Optional
3
- from urllib.parse import urlparse
2
+ from typing import Iterator, List
4
3
 
5
4
  try:
6
5
  from docling.datamodel.base_models import InputFormat
@@ -20,7 +19,7 @@ from pydantic import Field
20
19
 
21
20
  from versionhq.knowledge.source import BaseKnowledgeSource
22
21
  from versionhq.storage.utils import fetch_db_storage_path
23
- from versionhq._utils.vars import KNOWLEDGE_DIRECTORY
22
+ from versionhq._utils import KNOWLEDGE_DIRECTORY, is_valid_url
24
23
 
25
24
 
26
25
  class DoclingSource(BaseKnowledgeSource):
@@ -83,21 +82,6 @@ class DoclingSource(BaseKnowledgeSource):
83
82
  yield chunk.text
84
83
 
85
84
 
86
- @staticmethod
87
- def _validate_url(url: str) -> bool:
88
- try:
89
- result = urlparse(url)
90
- return all(
91
- [
92
- result.scheme in ("http", "https"),
93
- result.netloc,
94
- len(result.netloc.split(".")) >= 2, # Ensure domain has TLD
95
- ]
96
- )
97
- except Exception:
98
- return False
99
-
100
-
101
85
  def model_post_init(self, _) -> None:
102
86
  self.valid_file_paths = self.validate_content()
103
87
  self.content.extend(self._load_content())
@@ -109,7 +93,7 @@ class DoclingSource(BaseKnowledgeSource):
109
93
  if isinstance(path, str):
110
94
  if path.startswith(("http://", "https://")):
111
95
  try:
112
- if self._validate_url(path):
96
+ if is_valid_url(path):
113
97
  processed_paths.append(path)
114
98
  else:
115
99
  raise ValueError(f"Invalid URL format: {path}")
versionhq/llm/model.py CHANGED
@@ -8,12 +8,13 @@ from contextlib import contextmanager
8
8
  from typing import Any, Dict, List, Optional
9
9
  from typing_extensions import Self
10
10
 
11
+ import litellm
11
12
  from litellm import JSONSchemaValidationError, get_supported_openai_params
12
13
  from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
13
14
 
14
15
  from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS, PROVIDERS, ENDPOINT_PROVIDERS, ENV_VARS
15
16
  from versionhq.tool.model import Tool, ToolSet
16
- from versionhq._utils.logger import Logger
17
+ from versionhq._utils import Logger
17
18
 
18
19
 
19
20
  load_dotenv(override=True)
@@ -47,7 +48,6 @@ class FilteredStream:
47
48
  @contextmanager
48
49
  def suppress_warnings():
49
50
  with warnings.catch_warnings():
50
- import litellm
51
51
  litellm.set_verbose = False
52
52
  warnings.filterwarnings(action="ignore")
53
53
  old_stdout = sys.stdout
@@ -102,7 +102,6 @@ class LLM(BaseModel):
102
102
  """
103
103
  Validate the given model, provider, interface provider.
104
104
  """
105
- import litellm
106
105
  litellm.drop_params = True
107
106
 
108
107
  self._init_model_name = self.model
@@ -180,7 +179,6 @@ class LLM(BaseModel):
180
179
  """
181
180
  Set up valid config params after setting up a valid model, provider, interface provider names.
182
181
  """
183
- import litellm
184
182
  litellm.drop_params = True
185
183
 
186
184
  self._tokens = 0
@@ -208,9 +206,7 @@ class LLM(BaseModel):
208
206
  valid_config, valid_keys = dict(), list()
209
207
 
210
208
  if self.model:
211
- valid_keys = get_supported_openai_params(
212
- model=self.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion"
213
- )
209
+ valid_keys = get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
214
210
 
215
211
  if not valid_keys:
216
212
  valid_keys = PARAMS.get("common")
@@ -269,7 +265,6 @@ class LLM(BaseModel):
269
265
 
270
266
 
271
267
  def _supports_stop_words(self) -> bool:
272
- import litellm
273
268
  supported_params = get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider)
274
269
  return "stop" in supported_params if supported_params else False
275
270
 
@@ -282,7 +277,6 @@ class LLM(BaseModel):
282
277
 
283
278
 
284
279
  def _set_callbacks(self, callbacks: List[Any]):
285
- import litellm
286
280
  callback_types = [type(callback) for callback in callbacks]
287
281
  for callback in litellm.success_callback[:]:
288
282
  if type(callback) in callback_types:
@@ -306,7 +300,6 @@ class LLM(BaseModel):
306
300
  """
307
301
  Execute LLM based on the agent's params and model params.
308
302
  """
309
- import litellm
310
303
  litellm.drop_params = True
311
304
  litellm.set_verbose = True
312
305
 
@@ -318,8 +311,12 @@ class LLM(BaseModel):
318
311
  res, tool_res = None, ""
319
312
  cred = self._set_env_vars()
320
313
 
321
- if not tools:
314
+ if self.provider == "gemini":
315
+ self.response_format = { "type": "json_object" } if not tools else None
316
+ else:
322
317
  self.response_format = response_format
318
+
319
+ if not tools:
323
320
  params = self._create_valid_params(config=config)
324
321
  res = litellm.completion(model=self.model, messages=messages, stream=False, **params, **cred)
325
322
  self._tokens += int(res["usage"]["total_tokens"])
@@ -327,7 +324,6 @@ class LLM(BaseModel):
327
324
 
328
325
  else:
329
326
  try:
330
- self.response_format = { "type": "json_object" } if tool_res_as_final and self.provider != "gemini" else response_format
331
327
  self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
332
328
  params = self._create_valid_params(config=config)
333
329
  res = litellm.completion(model=self.model, messages=messages, **params, **cred)
@@ -16,7 +16,7 @@ class EvaluationItem(BaseModel):
16
16
  criteria: str
17
17
  suggestion: str
18
18
  score: float
19
- weight: int = 1
19
+ weight: int
20
20
 
21
21
 
22
22
  class Evaluation(BaseModel):
versionhq/task/model.py CHANGED
@@ -16,7 +16,7 @@ from pydantic_core import PydanticCustomError
16
16
  import versionhq as vhq
17
17
  from versionhq.task.evaluation import Evaluation, EvaluationItem
18
18
  from versionhq.tool.model import Tool, ToolSet
19
- from versionhq._utils import process_config, Logger
19
+ from versionhq._utils import process_config, Logger, is_valid_url
20
20
 
21
21
 
22
22
  class TaskExecutionType(enum.Enum):
@@ -187,7 +187,7 @@ class TaskOutput(BaseModel):
187
187
  evaluation: Optional[InstanceOf[Evaluation]] = Field(default=None, description="stores overall evaluation of the task output. stored in ltm")
188
188
 
189
189
 
190
- def to_context_prompt(self) -> str:
190
+ def _to_context_prompt(self) -> str:
191
191
  """
192
192
  Returns response in string as a prompt context.
193
193
  """
@@ -288,6 +288,10 @@ class Task(BaseModel):
288
288
  can_use_agent_tools: bool = Field(default=True, description="whether the agent can use their own tools when executing the task")
289
289
  tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
290
290
 
291
+ image: Optional[str] = Field(default=None, description="absolute file path or url in string")
292
+ file: Optional[str] = Field(default=None, description="absolute file path or url in string")
293
+ audio: Optional[str] = Field(default=None, description="absolute file path or url in string")
294
+
291
295
  # executing
292
296
  execution_type: TaskExecutionType = Field(default=TaskExecutionType.SYNC)
293
297
  allow_delegation: bool = Field(default=False, description="whether to delegate the task to another agent")
@@ -301,9 +305,10 @@ class Task(BaseModel):
301
305
 
302
306
  # recording
303
307
  _tokens: int = 0
308
+ _tool_errors: int = 0
309
+ _format_errors: int = 0
310
+ _delegations: int = 0
304
311
  processed_agents: Set[str] = Field(default_factory=set, description="store keys of the agents that executed the task")
305
- tool_errors: int = 0
306
- delegations: int = 0
307
312
  output: Optional[TaskOutput] = Field(default=None, description="store the final TaskOutput object")
308
313
 
309
314
 
@@ -346,19 +351,19 @@ class Task(BaseModel):
346
351
  return self
347
352
 
348
353
 
349
- def _draft_output_prompt(self, model_provider: str) -> str:
350
- """
351
- Draft prompts on the output format by converting `
352
- """
353
-
354
+ def _draft_output_prompt(self, model_provider: str = None) -> str:
354
355
  output_prompt = ""
355
356
 
356
357
  if self.pydantic_output:
357
- output_prompt = f"""
358
- Your response MUST STRICTLY follow the given repsonse format:
359
- JSON schema: {str(self.pydantic_output)}
360
- """
358
+ output_prompt, output_formats_to_follow = "", dict()
359
+ response_format = str(self._structure_response_format(model_provider=model_provider))
360
+ for k, v in self.pydantic_output.model_fields.items():
361
+ output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
361
362
 
363
+ output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
364
+ Response format: {response_format}
365
+ Ref. Output image: {output_formats_to_follow}
366
+ """
362
367
  elif self.response_fields:
363
368
  output_prompt, output_formats_to_follow = "", dict()
364
369
  response_format = str(self._structure_response_format(model_provider=model_provider))
@@ -366,13 +371,16 @@ JSON schema: {str(self.pydantic_output)}
366
371
  if item:
367
372
  output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
368
373
 
369
- output_prompt = f"""
370
- Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
374
+ output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
371
375
  Response format: {response_format}
372
376
  Ref. Output image: {output_formats_to_follow}
373
377
  """
378
+ # elif not self.tools or self.can_use_agent_tools == False:
374
379
  else:
375
- output_prompt = "You MUST Return your response as a valid JSON serializable string, enclosed in double quotes. Do not use single quotes, trailing commas, or other non-standard JSON syntax."
380
+ output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
381
+
382
+ # else:
383
+ # output_prompt = "You will return a response in a concise manner."
376
384
 
377
385
  return dedent(output_prompt)
378
386
 
@@ -394,13 +402,13 @@ Ref. Output image: {output_formats_to_follow}
394
402
  case Task():
395
403
  if not context.output:
396
404
  res = context.execute()
397
- context_to_add = res.to_context_prompt()
405
+ context_to_add = res._to_context_prompt()
398
406
 
399
407
  else:
400
408
  context_to_add = context.output.raw
401
409
 
402
410
  case TaskOutput():
403
- context_to_add = context.to_context_prompt()
411
+ context_to_add = context._to_context_prompt()
404
412
 
405
413
 
406
414
  case dict():
@@ -416,7 +424,7 @@ Ref. Output image: {output_formats_to_follow}
416
424
  return dedent(context_to_add)
417
425
 
418
426
 
419
- def _prompt(self, model_provider: str = None, context: Optional[Any] = None) -> str:
427
+ def _user_prompt(self, model_provider: str = None, context: Optional[Any] = None) -> str:
420
428
  """
421
429
  Format the task prompt and cascade it to the agent.
422
430
  """
@@ -430,11 +438,36 @@ Ref. Output image: {output_formats_to_follow}
430
438
  return "\n".join(task_slices)
431
439
 
432
440
 
441
+ def _format_content_prompt(self) -> Dict[str, str]:
442
+ """Formats content (file, image, audio) prompts that added to the messages sent to the LLM."""
443
+
444
+ from pathlib import Path
445
+ import base64
446
+
447
+ content_messages = {}
448
+
449
+ if self.image:
450
+ with open(self.image, "rb") as file:
451
+ content = file.read()
452
+ if content:
453
+ encoded_file = base64.b64encode(content).decode("utf-8")
454
+ img_url = f"data:image/jpeg;base64,{encoded_file}"
455
+ content_messages.update({ "type": "image_url", "image_url": { "url": img_url }})
456
+
457
+ if self.file:
458
+ if is_valid_url(self.file):
459
+ content_messages.update({ "type": "image_url", "image_url": self.file })
460
+
461
+ if self.audio:
462
+ audio_bytes = Path(self.audio).read_bytes()
463
+ encoded_data = base64.b64encode(audio_bytes).decode("utf-8")
464
+ content_messages.update({ "type": "image_url", "image_url": "data:audio/mp3;base64,{}".format(encoded_data)})
465
+
466
+ return content_messages
467
+
468
+
433
469
  def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
434
- """
435
- Structure a response format either from`response_fields` or `pydantic_output`.
436
- 1 nested item is accepted.
437
- """
470
+ """Structures `response_fields` or `pydantic_output` to a LLM response format."""
438
471
 
439
472
  from versionhq.task.structured_response import StructuredOutput
440
473
 
@@ -452,7 +485,7 @@ Ref. Output image: {output_formats_to_follow}
452
485
  required_fields.append(item.title)
453
486
 
454
487
  response_schema = {
455
- "type": "object",
488
+ "type": data_type,
456
489
  "properties": properties,
457
490
  "required": required_fields,
458
491
  "additionalProperties": False,
@@ -469,10 +502,43 @@ Ref. Output image: {output_formats_to_follow}
469
502
  return response_format
470
503
 
471
504
 
505
+ def _sanitize_raw_output(self, raw: str) -> Dict[str, str]:
506
+ """Sanitizes raw output and prepare for json.loads"""
507
+
508
+ import re
509
+ import ast
510
+
511
+ output, j = None, None
512
+ r = str(raw).strip()
513
+ r = r.replace("true", "True").replace("false", "False").replace("```json", '"').replace("```", '"').replace('\n', '').replace('\\', '')
514
+ r = re.sub("^'", '"', r)
515
+ r = re.sub(r"'\b", '"', r)
516
+ r = r.strip()
517
+ r = r.replace(" ", "")
518
+ try:
519
+ output = json.loads(r)
520
+ except:
521
+ try: j = json.dumps(eval(r))
522
+ except:
523
+ try: j = json.dumps(str(r))
524
+ except: j = r
525
+ output = json.loads(j)
526
+
527
+ if isinstance(output, dict):
528
+ return output
529
+ else:
530
+ try:
531
+ output = ast.literal_eval(j)
532
+ except:
533
+ output = ast.literal_eval(r)
534
+
535
+ return output if isinstance(output, dict) else { "output": str(r) }
536
+
537
+
472
538
  def _create_json_output(self, raw: str) -> Dict[str, Any]:
473
- """
474
- Create json (dict) output from the raw output and `response_fields` information.
475
- """
539
+ """Creates JSON output from the raw output."""
540
+
541
+ output = None
476
542
 
477
543
  if raw is None or raw == "":
478
544
  Logger().log(level="warning", message="The model returned an empty response. Returning an empty dict.", color="yellow")
@@ -480,27 +546,14 @@ Ref. Output image: {output_formats_to_follow}
480
546
  return output
481
547
 
482
548
  try:
483
- r = str(raw).replace("true", "True").replace("false", "False")
484
- j = json.dumps(eval(r))
485
- output = json.loads(j)
549
+ output = json.loads(raw)
486
550
  if isinstance(output, dict):
487
551
  return output
488
-
489
552
  else:
490
- r = str(raw).strip().replace("{'", '{"').replace("{ '", '{"').replace("': '", '": "').replace("'}", '"}').replace("' }", '"}').replace("', '", '", "').replace("['", '["').replace("[ '", '[ "').replace("']", '"]').replace("' ]", '" ]').replace("{\n'", '{"').replace("{\'", '{"').replace("true", "True").replace("false", "False").replace('\"', "'")
491
- j = json.dumps(eval(r))
492
- output = json.loads(j)
493
-
494
- if isinstance(output, dict):
495
- return output
496
-
497
- else:
498
- import ast
499
- output = ast.literal_eval(r)
500
- return output if isinstance(output, dict) else { "output": str(r) }
501
-
553
+ output = self._sanitize_raw_output(raw=raw)
554
+ return output
502
555
  except:
503
- output = { "output": str(raw) }
556
+ output = self._sanitize_raw_output(raw=raw)
504
557
  return output
505
558
 
506
559
 
@@ -674,7 +727,7 @@ Ref. Output image: {output_formats_to_follow}
674
727
  if self.allow_delegation == True:
675
728
  agent_to_delegate = self._select_agent_to_delegate(agent=agent)
676
729
  agent = agent_to_delegate
677
- self.delegations += 1
730
+ self._delegations += 1
678
731
 
679
732
  if self.tool_res_as_final == True:
680
733
  started_at = datetime.datetime.now()
@@ -38,7 +38,9 @@ class StructuredObject:
38
38
  description = self.field.description if hasattr(self.field, "description") and self.field.description is not None else ""
39
39
  field_name = self.field.__name__ if hasattr(self.field, "__name__") and self.field.__name__ else self.title
40
40
  self.properties.update({ field_name : { "type": SchemaType(self.field.annotation.__args__).convert() }})
41
- self.required.append(field_name)
41
+
42
+ if field_name not in self.required:
43
+ self.required.append(field_name)
42
44
 
43
45
  return {
44
46
  self.title: {
@@ -33,12 +33,11 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
33
33
 
34
34
  dep_type_prompt = ", ".join([k for k in DependencyType._member_map_.keys()])
35
35
 
36
- graph_expert = Agent(
37
- role="vhq-G Expert",
36
+ vhq_graph_expert = Agent(
37
+ role="vhq-Graph Expert",
38
38
  goal="design the most resource-efficient workflow graph to achieve the given goal",
39
39
  knowledge_sources=[
40
40
  "https://en.wikipedia.org/wiki/Graph_theory",
41
- # "https://www.geeksforgeeks.org/graph-data-structure-and-algorithms/?ref=lbp",
42
41
  "https://www.geeksforgeeks.org/graph-and-its-representations/",
43
42
  ", ".join([k for k in DependencyType._member_map_.keys()]),
44
43
  ],
@@ -60,12 +59,16 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
60
59
  ])
61
60
  ]
62
61
  )
63
- res = task.execute(agent=graph_expert, context=[context_prompt, context])
62
+ res = task.execute(agent=vhq_graph_expert, context=[context_prompt, context])
64
63
 
65
64
  if not res:
66
65
  return None
67
66
 
68
- task_items = res.json_dict["tasks"]
67
+ task_items = res.json_dict["tasks"] if "tasks" in res.json_dict else []
68
+
69
+ if not task_items:
70
+ return None
71
+
69
72
  tasks, nodes = [], []
70
73
 
71
74
  for item in task_items:
@@ -92,7 +95,6 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
92
95
  task_graph.add_dependency(
93
96
  source=source.identifier, target=target.identifier, dependency_type=dependency_type)
94
97
 
95
-
96
98
  task_graph.visualize()
97
99
 
98
100
  if human:
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.3.8
4
- Summary: An agentic orchestration framework for building agent networks that handle task automation.
3
+ Version: 1.2.4.1
4
+ Summary: A Python framework for autonomous agent networks that handle task automation with multi-step reasoning.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
7
7
 
@@ -88,15 +88,15 @@ Requires-Dist: torchvision>=0.21.0; extra == "torch"
88
88
 
89
89
  # Overview
90
90
 
91
- [![DL](https://img.shields.io/badge/Download-20K+-red)](https://clickpy.clickhouse.com/dashboard/versionhq)
91
+ [![DL](https://img.shields.io/badge/Download-30K+-red)](https://clickpy.clickhouse.com/dashboard/versionhq)
92
92
  ![MIT license](https://img.shields.io/badge/License-MIT-green)
93
93
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
94
- ![PyPI](https://img.shields.io/badge/PyPI-v1.2.2+-blue)
94
+ ![PyPI](https://img.shields.io/badge/PythonSDK-v1.2.4+-blue)
95
95
  ![python ver](https://img.shields.io/badge/Python-3.11|3.12|3.13-purple)
96
96
  ![pyenv ver](https://img.shields.io/badge/pyenv-2.5.0-orange)
97
97
 
98
98
 
99
- Agentic orchestration framework for multi-agent networks and task graphs for complex task automation.
99
+ A Python framework for autonomous agent networks that handle task automation with multi-step reasoning.
100
100
 
101
101
  **Visit:**
102
102
 
@@ -154,11 +154,11 @@ Agents adapt their formation based on task complexity.
154
154
  You can specify a desired formation or allow the agents to determine it autonomously (default).
155
155
 
156
156
 
157
- | | **Solo Agent** | **Supervising** | **Squad** | **Random** |
158
- | :--- | :--- | :--- | :--- | :--- |
159
- | **Formation** | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/rbgxttfoeqqis1ettlfz.png" alt="solo" width="200"> | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/zhungor3elxzer5dum10.png" alt="solo" width="200"> | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/dnusl7iy7kiwkxwlpmg8.png" alt="solo" width="200"> | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/sndpczatfzbrosxz9ama.png" alt="solo" width="200"> |
160
- | **Usage** | <ul><li>A single agent with tools, knowledge, and memory.</li><li>When self-learning mode is on - it will turn into **Random** formation.</li></ul> | <ul><li>Leader agent gives directions, while sharing its knowledge and memory.</li><li>Subordinates can be solo agents or networks.</li></ul> | <ul><li>Share tasks, knowledge, and memory among network members.</li></ul> | <ul><li>A single agent handles tasks, asking help from other agents without sharing its memory or knowledge.</li></ul> |
161
- | **Use case** | An email agent drafts promo message for the given audience. | The leader agent strategizes an outbound campaign plan and assigns components such as media mix or message creation to subordinate agents. | An email agent and social media agent share the product knowledge and deploy multi-channel outbound campaign. | 1. An email agent drafts promo message for the given audience, asking insights on tones from other email agents which oversee other clusters. 2. An agent calls the external agent to deploy the campaign. |
157
+ | | **Solo Agent** | **Supervising** | **Squad** | **Random** |
158
+ | :--- | :--- | :--- | :--- | :--- |
159
+ | **Formation** | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/rbgxttfoeqqis1ettlfz.png" alt="solo" width="200"> | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/zhungor3elxzer5dum10.png" alt="solo" width="200"> | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/dnusl7iy7kiwkxwlpmg8.png" alt="solo" width="200"> | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/sndpczatfzbrosxz9ama.png" alt="solo" width="200"> |
160
+ | **Usage** | <ul><li>A single agent with tools, knowledge, and memory.</li><li>When self-learning mode is on - it will turn into **Random** formation.</li></ul> | <ul><li>Leader agent gives directions, while sharing its knowledge and memory.</li><li>Subordinates can be solo agents or networks.</li></ul> | <ul><li>Share tasks, knowledge, and memory among network members.</li></ul> | <ul><li>A single agent handles tasks, asking help from other agents without sharing its memory or knowledge.</li></ul> |
161
+ | **Use case** | An email agent drafts promo message for the given audience. | The leader agent strategizes an outbound campaign plan and assigns components such as media mix or message creation to subordinate agents. | An email agent and social media agent share the product knowledge and deploy multi-channel outbound campaign. | 1. An email agent drafts promo message for the given audience, asking insights on tones from other email agents which oversee other clusters. 2. An agent calls the external agent to deploy the campaign. |
162
162
 
163
163
  <hr />
164
164
 
@@ -1,6 +1,8 @@
1
- versionhq/__init__.py,sha256=-XmzMUZrIz4XrCUOjV4K8kAbNGaSeptDjYOyWfMUYOc,2980
2
- versionhq/_utils/__init__.py,sha256=d-vYVcORZKG-kkLe_fzE8VbViDpAk9DDOKe2fVK25ew,178
1
+ versionhq/__init__.py,sha256=8lc6ZRR-zxO1Bmqh3OsFM1GD_SDxufX0ebdyTsMrMsE,2980
2
+ versionhq/_prompt/model.py,sha256=U2Uba84nVljMN4U6ALGGhVPr1aM4C9AUDTo-_WzTCYs,5205
3
+ versionhq/_utils/__init__.py,sha256=llXOcGFlR9YF5iMI5uFb4twvM9wo-vmoMw8y1KzQVVc,233
3
4
  versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
5
+ versionhq/_utils/is_valid_url.py,sha256=m8Mswvb-90FJtx1Heq6hPFDbwGgrv_R3wSbZQmEPM9Q,379
4
6
  versionhq/_utils/llm_as_a_judge.py,sha256=RM0oYfoeanuUyUL3Ewl6_8Xn1F5Axd285UMH46kxG1I,2378
5
7
  versionhq/_utils/logger.py,sha256=iHxGjm3BvUo5dHKLU88_pc0Z45wzSHOjyJGQkb7OADk,3255
6
8
  versionhq/_utils/process_config.py,sha256=YTGY_erW335RfceQfzS18YAqq-AAb-iSvKSjN7noD2E,782
@@ -8,14 +10,14 @@ versionhq/_utils/usage_metrics.py,sha256=xgYGRW3OTuK9EJyi3QYJeYcJl7dL27olcWaLo_7
8
10
  versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
9
11
  versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
12
  versionhq/agent/inhouse_agents.py,sha256=BPkvEyMH8VnZWsMeCwsGplDT_kLwlIejeRcr-6ItGqQ,2637
11
- versionhq/agent/model.py,sha256=xKfg20EdL11HiFbG7V7ErHlFSDpvh5lopnHDfWULJM4,25996
13
+ versionhq/agent/model.py,sha256=mXc_sYbrH_-dbuMnlxMArCCjxmP36MMaSWM-1lS5rKI,26600
12
14
  versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
13
15
  versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
14
16
  versionhq/agent/TEMPLATES/Backstory.py,sha256=dkfuATUQ2g2WoUKkmgAIch-RB--bektGoQaUlsDOn0g,529
15
17
  versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
18
  versionhq/agent_network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
- versionhq/agent_network/formation.py,sha256=CYKNUeKC392wW3leIDIAaGiKADSsumC_vTe0VOnNwRs,7901
18
- versionhq/agent_network/model.py,sha256=aSGZf-ZLi4I2Da9-RW8CfZ32hk9MN_RIT9Cl4xvHlE8,16050
19
+ versionhq/agent_network/formation.py,sha256=8lFOxp9f9rG3UdXiyDbDbrtHG9nNO2lkAOLpUSQXjyo,8067
20
+ versionhq/agent_network/model.py,sha256=-vLBqPCtfLxTf17toJkE7Gkxg1SwlrA-Frf2Pc_uB50,16021
19
21
  versionhq/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
22
  versionhq/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
23
  versionhq/clients/customer/__init__.py,sha256=-YXh1FQfvpfLacK8SUC7bD7Wx_eIEi4yrkCC_cUasFg,217
@@ -29,11 +31,11 @@ versionhq/knowledge/_utils.py,sha256=YWRF8U533cfZes_gZqUvdj-K24MD2ri1R0gjc_aPYyc
29
31
  versionhq/knowledge/embedding.py,sha256=KfHc__1THxb5jrg1EMrF-v944RDuIr2hE0l-MtM3Bp0,6826
30
32
  versionhq/knowledge/model.py,sha256=ixH8n5kLtJEp1nPAFYA0piYm-n0nnFDtWFp0r9YEVAs,1787
31
33
  versionhq/knowledge/source.py,sha256=-hEUPtJUHHMx4rUKtiHl19J8xAMw-WVBw34zwa2jZ08,13630
32
- versionhq/knowledge/source_docling.py,sha256=dcu1ITqPXwWZ_lK-6tykEKhhC82eNRTMoWRpxK9Kzls,5441
34
+ versionhq/knowledge/source_docling.py,sha256=XpavmLvh4dLcuTikj8MCE9KG52oQMafy7_wBneliMK0,4994
33
35
  versionhq/knowledge/storage.py,sha256=Kd-4r6aWM5EDaoXrzKXbgi1hY6tysSQARPGXM95qMmU,8266
34
36
  versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
37
  versionhq/llm/llm_vars.py,sha256=qSG-_pYeWksdMmwASXpjQqf97fMovsY4lNTSCHQF88k,5694
36
- versionhq/llm/model.py,sha256=W9zcrC18zEz8BKKG2HJ2rO-8rdphif7sdiAHNc9oJbU,17400
38
+ versionhq/llm/model.py,sha256=P4J6ZU0vY5HU7XHLelz7oznPmeEElHPFsAo-2Vd8DQ0,17255
37
39
  versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
40
  versionhq/memory/contextual_memory.py,sha256=QEMVvHuEXxY7M6-12S8HhyFKf108KfX8Zzt7paPW048,3882
39
41
  versionhq/memory/model.py,sha256=VQR1229t7GQPMItlGAHLtJrb6LrZfSoRA1DRW4z0SOU,8234
@@ -45,14 +47,14 @@ versionhq/storage/rag_storage.py,sha256=bS2eE874obarYl-4hT6ZWYWTRsqtfuGpKgKzERmM
45
47
  versionhq/storage/task_output_storage.py,sha256=M8vInLJ5idGAq17w1juHKXtyPyF-B-rK_P8UcqD-Px8,5357
46
48
  versionhq/storage/utils.py,sha256=r5ghA_ktdR2IuzlzKqZYCjsNxztEMzyhWLneA4cFuWY,748
47
49
  versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
- versionhq/task/evaluation.py,sha256=yKnqj3UwdNAMaFEBZmWWf0Hc09-M9hM8VETnaovlFco,3817
50
+ versionhq/task/evaluation.py,sha256=qQSA5ZWTWA3he54ystsYpTKXJWv68gBL6DCq8ZW1bl8,3813
49
51
  versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
50
- versionhq/task/model.py,sha256=8HQLzzAfM03gHL5M_7oL7UW-mZvI7rPCJ2Jxsv49VtI,29122
51
- versionhq/task/structured_response.py,sha256=4q-hQPu7oMMHHXEzh9YW4SJ7N5eCZ7OfZ65juyl_jCI,5000
52
+ versionhq/task/model.py,sha256=YawjfqUwmOynoFlO_3L7qVbK8PmF-V6dUA-Ox_55kpU,31415
53
+ versionhq/task/structured_response.py,sha256=tqOHpch8CVmMj0aZXjdDWtPNcVmBW8DVZnBvPBwS4PM,5053
52
54
  versionhq/task/TEMPLATES/Description.py,sha256=hKhpbz0ztbkUMXz9KiL-P40fis9OB5ICOdL9jCtgAhU,864
53
55
  versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
56
  versionhq/task_graph/colors.py,sha256=naJCx4Vho4iuJtbW8USUXb-M5uYvd5ds2p8qbjUfRus,669
55
- versionhq/task_graph/draft.py,sha256=l18XacRsbDhAv6CvKMnUMI26IDuizA1UNWHbL1q5gn4,5099
57
+ versionhq/task_graph/draft.py,sha256=K14J90Heii2JiCOsQsz1uE0NWout-lr8wpJbk-KEYfI,5099
56
58
  versionhq/task_graph/model.py,sha256=3DzZXP4SSJP3xdgZfkJDgFXUjQ05CSYFoisShfmSdcs,26582
57
59
  versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
60
  versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-es,1085
@@ -62,8 +64,8 @@ versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1
62
64
  versionhq/tool/model.py,sha256=ve9C4WyiRjQigOU0hRWVxtSUWAQNntlmeW-_DL0_lJY,12328
63
65
  versionhq/tool/rag_tool.py,sha256=dW5o-83V4bMFFJEj3PUm7XjblwrYJGmZVBlCpPj6CeM,3852
64
66
  versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
65
- versionhq-1.2.3.8.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
66
- versionhq-1.2.3.8.dist-info/METADATA,sha256=c-aVXe5FBs_a_YPXQN4HBxfAJZv7ZxR0c1f_5cAsuKg,21262
67
- versionhq-1.2.3.8.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
68
- versionhq-1.2.3.8.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
69
- versionhq-1.2.3.8.dist-info/RECORD,,
67
+ versionhq-1.2.4.1.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
68
+ versionhq-1.2.4.1.dist-info/METADATA,sha256=v3Va4NIWCPW_pdVWCSljRqCaDcmXjg_xV3mwybmszrA,21380
69
+ versionhq-1.2.4.1.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
70
+ versionhq-1.2.4.1.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
71
+ versionhq-1.2.4.1.dist-info/RECORD,,