versionhq 1.2.1.2__py3-none-any.whl → 1.2.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/agent/model.py +7 -8
- versionhq/memory/contextual_memory.py +2 -5
- versionhq/task/formation.py +14 -3
- versionhq/task/model.py +66 -107
- {versionhq-1.2.1.2.dist-info → versionhq-1.2.1.3.dist-info}/METADATA +23 -16
- {versionhq-1.2.1.2.dist-info → versionhq-1.2.1.3.dist-info}/RECORD +10 -10
- {versionhq-1.2.1.2.dist-info → versionhq-1.2.1.3.dist-info}/LICENSE +0 -0
- {versionhq-1.2.1.2.dist-info → versionhq-1.2.1.3.dist-info}/WHEEL +0 -0
- {versionhq-1.2.1.2.dist-info → versionhq-1.2.1.3.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
versionhq/agent/model.py
CHANGED
@@ -517,12 +517,11 @@ class Agent(BaseModel):
|
|
517
517
|
|
518
518
|
|
519
519
|
|
520
|
-
def execute_task(self, task, context: Optional[
|
520
|
+
def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
|
521
521
|
"""
|
522
|
-
|
523
|
-
The agent utilizes the tools in task or their own tools if the task.can_use_agent_tools is True.
|
524
|
-
The agent must consider the context to excute the task as well when it is given.
|
522
|
+
Format a task prompt, adding context from knowledge and memory (if given), and invoke LLM.
|
525
523
|
"""
|
524
|
+
|
526
525
|
from versionhq.task.model import Task
|
527
526
|
from versionhq.knowledge._utils import extract_knowledge_context
|
528
527
|
|
@@ -532,9 +531,7 @@ class Agent(BaseModel):
|
|
532
531
|
if self.max_rpm and self._rpm_controller:
|
533
532
|
self._rpm_controller._reset_request_count()
|
534
533
|
|
535
|
-
task_prompt = task.
|
536
|
-
if context is not task.prompt_context:
|
537
|
-
task_prompt += context
|
534
|
+
task_prompt = task._prompt(model_provider=self.llm.provider, context=context)
|
538
535
|
|
539
536
|
if self._knowledge:
|
540
537
|
agent_knowledge = self._knowledge.query(query=[task_prompt,], limit=5)
|
@@ -547,7 +544,9 @@ class Agent(BaseModel):
|
|
547
544
|
contextual_memory = ContextualMemory(
|
548
545
|
memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
|
549
546
|
)
|
550
|
-
|
547
|
+
context_str = task._draft_context_prompt(context=context)
|
548
|
+
query = f"{task.description} {context_str}".strip()
|
549
|
+
memory = contextual_memory.build_context_for_task(query=query)
|
551
550
|
if memory.strip() != "":
|
552
551
|
task_prompt += memory.strip()
|
553
552
|
|
@@ -23,14 +23,11 @@ class ContextualMemory:
|
|
23
23
|
self.um = um
|
24
24
|
|
25
25
|
|
26
|
-
def build_context_for_task(self,
|
26
|
+
def build_context_for_task(self, query: str = None) -> str:
|
27
27
|
"""
|
28
28
|
Automatically builds a minimal, highly relevant set of contextual information for a given task.
|
29
29
|
"""
|
30
|
-
|
31
|
-
query = f"{task.description} {context}".strip()
|
32
|
-
|
33
|
-
if query == "":
|
30
|
+
if not query:
|
34
31
|
return ""
|
35
32
|
|
36
33
|
context = []
|
versionhq/task/formation.py
CHANGED
@@ -82,32 +82,38 @@ def form_agent_network(
|
|
82
82
|
res = vhq_task.execute(agent=vhq_formation_planner, context=context)
|
83
83
|
_formation = Formation.SUPERVISING
|
84
84
|
|
85
|
+
|
85
86
|
if res.pydantic:
|
86
87
|
formation_keys = [k for k, v in Formation._member_map_.items() if k == res.pydantic.formation.upper()]
|
87
88
|
|
88
89
|
if formation_keys:
|
89
90
|
_formation = Formation[formation_keys[0]]
|
90
91
|
|
91
|
-
created_agents = [Agent(role=item, goal=item) for item in res.pydantic.agent_roles]
|
92
|
-
created_tasks = [Task(description=item) for item in res.pydantic.task_descriptions]
|
93
92
|
|
94
93
|
network_tasks = []
|
95
94
|
members = []
|
96
95
|
leader = str(res.pydantic.leader_agent)
|
97
96
|
|
97
|
+
created_agents = [Agent(role=item, goal=item) for item in res.pydantic.agent_roles]
|
98
|
+
created_tasks = [Task(description=item) for item in res.pydantic.task_descriptions]
|
99
|
+
|
100
|
+
|
98
101
|
for i in range(len(created_agents)):
|
99
102
|
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
100
103
|
member = Member(agent=created_agents[i], is_manager=is_manager)
|
101
104
|
|
102
105
|
if len(created_tasks) >= i and created_tasks[i]:
|
103
106
|
member.tasks.append(created_tasks[i])
|
104
|
-
|
105
107
|
members.append(member)
|
106
108
|
|
107
109
|
|
108
110
|
if len(created_agents) < len(created_tasks):
|
109
111
|
network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
|
110
112
|
|
113
|
+
if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
|
114
|
+
manager = Member(agent=Agent(role=leader, goal=leader), is_manager=True)
|
115
|
+
members.append(manager)
|
116
|
+
|
111
117
|
members.sort(key=lambda x: x.is_manager == False)
|
112
118
|
network = AgentNetwork(members=members, formation=_formation, network_tasks=network_tasks)
|
113
119
|
return network
|
@@ -138,8 +144,13 @@ def form_agent_network(
|
|
138
144
|
if len(created_agents) < len(created_tasks):
|
139
145
|
network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
|
140
146
|
|
147
|
+
if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
|
148
|
+
member = Member(agent=Agent(role=leader, goal=leader), is_manager=True)
|
149
|
+
members.append(member)
|
150
|
+
|
141
151
|
members.sort(key=lambda x: x.is_manager == False)
|
142
152
|
network = AgentNetwork(members=members, formation=_formation, network_tasks=network_tasks)
|
153
|
+
|
143
154
|
return network
|
144
155
|
|
145
156
|
|
versionhq/task/model.py
CHANGED
@@ -196,7 +196,7 @@ class TaskOutput(BaseModel):
|
|
196
196
|
"""
|
197
197
|
When the task is called as context, return its output in concise string to add it to the prompt
|
198
198
|
"""
|
199
|
-
return json.dumps(self.json_dict) if self.json_dict else self.raw[0:
|
199
|
+
return json.dumps(self.json_dict) if self.json_dict else self.raw[0: 1024]
|
200
200
|
|
201
201
|
|
202
202
|
def evaluate(self, task) -> Evaluation:
|
@@ -245,7 +245,7 @@ class TaskOutput(BaseModel):
|
|
245
245
|
|
246
246
|
|
247
247
|
@property
|
248
|
-
def
|
248
|
+
def json_string(self) -> Optional[str]:
|
249
249
|
return json.dumps(self.json_dict)
|
250
250
|
|
251
251
|
|
@@ -259,7 +259,6 @@ class Task(BaseModel):
|
|
259
259
|
"""
|
260
260
|
|
261
261
|
__hash__ = object.__hash__
|
262
|
-
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
|
263
262
|
_original_description: str = PrivateAttr(default=None)
|
264
263
|
_task_output_handler = TaskOutputStorageHandler()
|
265
264
|
config: Optional[Dict[str, Any]] = Field(default=None, description="values to set on Task class")
|
@@ -271,18 +270,17 @@ class Task(BaseModel):
|
|
271
270
|
# output
|
272
271
|
pydantic_output: Optional[Type[BaseModel]] = Field(default=None, description="store Pydantic class as structured response format")
|
273
272
|
response_fields: Optional[List[ResponseField]] = Field(default_factory=list, description="store list of ResponseField as structured response format")
|
274
|
-
output: Optional[TaskOutput] = Field(default=None, description="store the final task output in TaskOutput class")
|
275
273
|
|
276
274
|
# task setup
|
277
|
-
context: Optional[List["Task"]] = Field(default=None, description="other tasks whose outputs should be used as context")
|
278
|
-
prompt_context: Optional[str] = Field(default=None)
|
275
|
+
# context: Optional[List["Task"]] = Field(default=None, description="other tasks whose outputs should be used as context")
|
276
|
+
# prompt_context: Optional[str] = Field(default=None)
|
279
277
|
|
280
278
|
# tool usage
|
281
279
|
tools: Optional[List[ToolSet | Tool | Any]] = Field(default_factory=list, description="tools that the agent can use aside from their tools")
|
282
280
|
can_use_agent_tools: bool = Field(default=False, description="whether the agent can use their own tools when executing the task")
|
283
281
|
tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
|
284
282
|
|
285
|
-
#
|
283
|
+
# executing
|
286
284
|
execution_type: TaskExecutionType = Field(default=TaskExecutionType.SYNC)
|
287
285
|
allow_delegation: bool = Field(default=False, description="ask other agents for help and run the task instead")
|
288
286
|
callback: Optional[Callable] = Field(default=None, description="callback to be executed after the task is completed.")
|
@@ -294,10 +292,11 @@ class Task(BaseModel):
|
|
294
292
|
|
295
293
|
# recording
|
296
294
|
processed_agents: Set[str] = Field(default_factory=set, description="store roles of the agents that executed the task")
|
297
|
-
|
295
|
+
tool_errors: int = 0
|
298
296
|
delegations: int = 0
|
299
297
|
latency: int | float = 0 # job latency in sec
|
300
298
|
tokens: int = 0 # tokens consumed
|
299
|
+
output: Optional[TaskOutput] = Field(default=None, description="store the final task output in TaskOutput class")
|
301
300
|
|
302
301
|
|
303
302
|
@model_validator(mode="before")
|
@@ -339,13 +338,6 @@ class Task(BaseModel):
|
|
339
338
|
return self
|
340
339
|
|
341
340
|
|
342
|
-
@model_validator(mode="after")
|
343
|
-
def backup_description(self):
|
344
|
-
if self._original_description == None:
|
345
|
-
self._original_description = self.description
|
346
|
-
return self
|
347
|
-
|
348
|
-
|
349
341
|
def _draft_output_prompt(self, model_provider: str) -> str:
|
350
342
|
"""
|
351
343
|
Draft prompts on the output format by converting `
|
@@ -371,35 +363,60 @@ Your response MUST be a valid JSON string that strictly follows the response for
|
|
371
363
|
Response format: {response_format}
|
372
364
|
Ref. Output image: {output_formats_to_follow}
|
373
365
|
"""
|
374
|
-
|
375
366
|
else:
|
376
367
|
output_prompt = "Return your response as a valid JSON serializable string, enclosed in double quotes. Do not use single quotes, trailing commas, or other non-standard JSON syntax."
|
377
368
|
|
378
369
|
return output_prompt
|
379
370
|
|
380
371
|
|
381
|
-
def
|
372
|
+
def _draft_context_prompt(self, context: Any) -> str:
|
382
373
|
"""
|
383
|
-
|
384
|
-
When the task has context, add context prompting of all the tasks in the context.
|
385
|
-
When we have cusotmer/product info, add them to the prompt.
|
374
|
+
Create a context prompt from the given context in any format: a task object, task output object, list, dict.
|
386
375
|
"""
|
387
376
|
|
388
|
-
|
389
|
-
|
377
|
+
context_to_add = None
|
378
|
+
if not context:
|
379
|
+
Logger().log(level="error", color="red", message="Missing a context to add to the prompt. We'll return ''.")
|
380
|
+
return context_to_add
|
381
|
+
|
382
|
+
match context:
|
383
|
+
case str():
|
384
|
+
context_to_add = context
|
385
|
+
|
386
|
+
case Task():
|
387
|
+
if not context.output:
|
388
|
+
res = context.execute()
|
389
|
+
context_to_add = res.raw
|
390
|
+
|
391
|
+
else:
|
392
|
+
context_to_add = context.output.raw
|
393
|
+
|
394
|
+
case TaskOutput():
|
395
|
+
context_to_add = context.raw
|
396
|
+
|
397
|
+
case dict():
|
398
|
+
context_to_add = str(context)
|
399
|
+
|
400
|
+
case list():
|
401
|
+
res = ", ".join([self._draft_context_prompt(context=item) for item in context])
|
402
|
+
context_to_add = res
|
403
|
+
|
404
|
+
case _:
|
405
|
+
pass
|
390
406
|
|
391
|
-
|
392
|
-
context_outputs = "\n".join([task.output.context_prompting() if hasattr(task, "output") else "" for task in self.context])
|
393
|
-
task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_outputs}")
|
407
|
+
return context_to_add
|
394
408
|
|
395
|
-
if self.prompt_context:
|
396
|
-
task_slices.insert(len(task_slices), f"Consider the following context when responding: {self.prompt_context}")
|
397
409
|
|
398
|
-
|
399
|
-
|
410
|
+
def _prompt(self, model_provider: str = None, context: Optional[Any] = None) -> str:
|
411
|
+
"""
|
412
|
+
Format the task prompt and cascade it to the agent.
|
413
|
+
"""
|
414
|
+
output_prompt = self._draft_output_prompt(model_provider=model_provider)
|
415
|
+
context_prompt = self._draft_context_prompt(context=context) if context else None
|
416
|
+
task_slices = [self.description, output_prompt, ]
|
400
417
|
|
401
|
-
if
|
402
|
-
task_slices.insert(len(task_slices), f"
|
418
|
+
if context_prompt:
|
419
|
+
task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_prompt}")
|
403
420
|
|
404
421
|
return "\n".join(task_slices)
|
405
422
|
|
@@ -449,8 +466,8 @@ Ref. Output image: {output_formats_to_follow}
|
|
449
466
|
"""
|
450
467
|
|
451
468
|
if raw is None or raw == "":
|
452
|
-
|
453
|
-
output = { "output": "
|
469
|
+
Logger().log(level="warning", message="The model returned an empty response. Returning an empty dict.", color="yellow")
|
470
|
+
output = { "output": "" }
|
454
471
|
return output
|
455
472
|
|
456
473
|
try:
|
@@ -498,8 +515,10 @@ Ref. Output image: {output_formats_to_follow}
|
|
498
515
|
|
499
516
|
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
500
517
|
"""
|
501
|
-
Interpolate inputs into the task description
|
518
|
+
Interpolate inputs into the task description.
|
502
519
|
"""
|
520
|
+
self._original_description = self.description
|
521
|
+
|
503
522
|
if inputs:
|
504
523
|
self.description = self._original_description.format(**inputs)
|
505
524
|
|
@@ -537,17 +556,18 @@ Ref. Output image: {output_formats_to_follow}
|
|
537
556
|
)
|
538
557
|
|
539
558
|
except AttributeError as e:
|
540
|
-
|
559
|
+
Logger().log(level="error", message=f"Missing attributes for long term memory: {str(e)}", color="red")
|
541
560
|
pass
|
542
561
|
|
543
562
|
except Exception as e:
|
544
|
-
|
563
|
+
Logger().log(level="error", message=f"Failed to add to the memory: {str(e)}", color="red")
|
545
564
|
pass
|
546
565
|
|
566
|
+
|
547
567
|
def _build_agent_from_task(self, task_description: str = None) -> InstanceOf["vhq.Agent"]:
|
548
568
|
task_description = task_description if task_description else self.description
|
549
569
|
if not task_description:
|
550
|
-
|
570
|
+
Logger().log(level="error", message="Task is missing the description.", color="red")
|
551
571
|
pass
|
552
572
|
|
553
573
|
agent = vhq.Agent(goal=task_description, role=task_description, maxit=1) #! REFINEME
|
@@ -555,7 +575,9 @@ Ref. Output image: {output_formats_to_follow}
|
|
555
575
|
|
556
576
|
|
557
577
|
# task execution
|
558
|
-
def execute(
|
578
|
+
def execute(
|
579
|
+
self, type: TaskExecutionType = None, agent: Optional["vhq.Agent"] = None, context: Optional[Any] = None
|
580
|
+
) -> TaskOutput | Future[TaskOutput]:
|
559
581
|
"""
|
560
582
|
A main method to handle task execution. Build an agent when the agent is not given.
|
561
583
|
"""
|
@@ -572,26 +594,14 @@ Ref. Output image: {output_formats_to_follow}
|
|
572
594
|
return self._execute_async(agent=agent, context=context)
|
573
595
|
|
574
596
|
|
575
|
-
def _execute_sync(self, agent, context: Optional[str | List[Any]] = None) -> TaskOutput:
|
576
|
-
"""
|
577
|
-
Execute the task synchronously.
|
578
|
-
When the task has context, make sure we have executed all the tasks in the context first.
|
579
|
-
"""
|
580
|
-
|
581
|
-
if self.context:
|
582
|
-
if isinstance(self.context, list):
|
583
|
-
for task in self.context:
|
584
|
-
if isinstance(task, Task) and task.output is None:
|
585
|
-
task._execute_core(agent, context)
|
586
597
|
|
598
|
+
def _execute_sync(self, agent, context: Optional[Any] = None) -> TaskOutput:
|
599
|
+
"""Executes the task synchronously."""
|
587
600
|
return self._execute_core(agent, context)
|
588
601
|
|
589
602
|
|
590
|
-
def _execute_async(self, agent, context: Optional[
|
591
|
-
"""
|
592
|
-
Execute the task asynchronously.
|
593
|
-
"""
|
594
|
-
|
603
|
+
def _execute_async(self, agent, context: Optional[Any] = None) -> Future[TaskOutput]:
|
604
|
+
"""Executes the task asynchronously."""
|
595
605
|
future: Future[TaskOutput] = Future()
|
596
606
|
threading.Thread(daemon=True, target=self._execute_task_async, args=(agent, context, future)).start()
|
597
607
|
return future
|
@@ -601,21 +611,19 @@ Ref. Output image: {output_formats_to_follow}
|
|
601
611
|
"""
|
602
612
|
Executes the task asynchronously with context handling.
|
603
613
|
"""
|
604
|
-
|
605
614
|
result = self._execute_core(agent, context)
|
606
615
|
future.set_result(result)
|
607
616
|
|
608
617
|
|
609
|
-
def _execute_core(self, agent, context: Optional[
|
618
|
+
def _execute_core(self, agent, context: Optional[Any]) -> TaskOutput:
|
610
619
|
"""
|
611
620
|
A core method for task execution.
|
612
|
-
Handles 1. agent delegation, 2. tools, 3. context to add to the prompt, and 4. callbacks
|
621
|
+
Handles 1. agent delegation, 2. tools, 3. context to add to the prompt, and 4. callbacks.
|
613
622
|
"""
|
614
623
|
|
615
624
|
from versionhq.agent.model import Agent
|
616
625
|
from versionhq.agent_network.model import AgentNetwork
|
617
626
|
|
618
|
-
self.prompt_context = context
|
619
627
|
task_output: InstanceOf[TaskOutput] = None
|
620
628
|
raw_output: str = None
|
621
629
|
tool_output: str | list = None
|
@@ -719,52 +727,3 @@ Task ID: {str(self.id)}
|
|
719
727
|
"Description": {self.description}
|
720
728
|
"Tools": {", ".join([tool.name for tool in self.tools])}
|
721
729
|
"""
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
# class ConditionalTask(Task):
|
726
|
-
# """
|
727
|
-
# A task that can be conditionally executed based on the output of another task.
|
728
|
-
# When the `condition` return True, execute the task, else skipped with `skipped task output`.
|
729
|
-
# """
|
730
|
-
|
731
|
-
# condition: Callable[[TaskOutput], bool] = Field(
|
732
|
-
# default=None,
|
733
|
-
# description="max. number of retries for an agent to execute a task when an error occurs",
|
734
|
-
# )
|
735
|
-
|
736
|
-
|
737
|
-
# def __init__(self, condition: Callable[[Any], bool], **kwargs):
|
738
|
-
# super().__init__(**kwargs)
|
739
|
-
# self.condition = condition
|
740
|
-
|
741
|
-
|
742
|
-
# def should_execute(self, context: TaskOutput) -> bool:
|
743
|
-
# """
|
744
|
-
# Decide whether the conditional task should be executed based on the provided context.
|
745
|
-
# Return `True` if it should be executed.
|
746
|
-
# """
|
747
|
-
# return self.condition(context)
|
748
|
-
|
749
|
-
|
750
|
-
# def get_skipped_task_output(self):
|
751
|
-
# return TaskOutput(task_id=self.id, raw="", pydantic=None, json_dict={})
|
752
|
-
|
753
|
-
|
754
|
-
# def _handle_conditional_task(self, task_outputs: List[TaskOutput], task_index: int, was_replayed: bool) -> Optional[TaskOutput]:
|
755
|
-
# """
|
756
|
-
# When the conditional task should be skipped, return `skipped_task_output` as task_output else return None
|
757
|
-
# """
|
758
|
-
|
759
|
-
# previous_output = task_outputs[task_index - 1] if task_outputs and len(task_outputs) > 1 else None
|
760
|
-
|
761
|
-
# if previous_output and not self.should_execute(previous_output):
|
762
|
-
# self._logger.log(level="warning", message=f"Skipping conditional task: {self.description}", color="yellow")
|
763
|
-
# skipped_task_output = self.get_skipped_task_output()
|
764
|
-
# self.output = skipped_task_output
|
765
|
-
|
766
|
-
# if not was_replayed:
|
767
|
-
# self._store_execution_log(self, task_index=task_index, was_replayed=was_replayed, inputs={})
|
768
|
-
# return skipped_task_output
|
769
|
-
|
770
|
-
# return None
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.1.
|
3
|
+
Version: 1.2.1.3
|
4
4
|
Summary: An agentic orchestration framework for building agent networks that handle task automation.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -89,7 +89,7 @@ Requires-Dist: pygraphviz>=1.14; extra == "pygraphviz"
|
|
89
89
|

|
90
90
|
|
91
91
|
|
92
|
-
|
92
|
+
Agentic orchestration framework for multi-agent networks and task graph for complex task automation.
|
93
93
|
|
94
94
|
**Visit:**
|
95
95
|
|
@@ -188,7 +188,14 @@ task_graph.add_dependency(
|
|
188
188
|
type=vhq.DependencyType.FINISH_TO_FINISH, lag=1, required=False, weight=3
|
189
189
|
)
|
190
190
|
|
191
|
+
# To visualize the graph:
|
191
192
|
task_graph.visualize()
|
193
|
+
|
194
|
+
# To start executing nodes:
|
195
|
+
latest_output, outputs = task_graph.activate()
|
196
|
+
|
197
|
+
assert isinstance(last_task_output, vhq.TaskOutput)
|
198
|
+
assert [k in task_graph.nodes.keys() and v and isinstance(v, vhq.TaskOutput) for k, v in outputs.items()]
|
192
199
|
```
|
193
200
|
|
194
201
|
<hr />
|
@@ -346,34 +353,34 @@ Tasks can be delegated to a manager, peers within the agent network, or a comple
|
|
346
353
|
|
347
354
|
## Technologies Used
|
348
355
|
|
349
|
-
**Task Graph**
|
350
|
-
|
351
|
-
* [NetworkX](https://networkx.org/documentation/stable/reference/introduction.html): A Python package to analyze, create, and manipulate complex graph networks.
|
352
|
-
* [Matplotlib](https://matplotlib.org/stable/index.html): Visualization library
|
353
|
-
* [Graphviz](https://graphviz.org/about/): Graph visualization software
|
354
|
-
|
355
|
-
|
356
356
|
**Schema, Data Validation**
|
357
357
|
|
358
358
|
* [Pydantic](https://docs.pydantic.dev/latest/): Data validation and serialization library for Python.
|
359
359
|
* [Upstage](https://console.upstage.ai/docs/getting-started/overview): Document processer for ML tasks. (Use `Document Parser API` to extract data from documents)
|
360
360
|
* [Docling](https://ds4sd.github.io/docling/): Document parsing
|
361
361
|
|
362
|
+
**Workflow, Task Graph**
|
362
363
|
|
363
|
-
|
364
|
-
|
365
|
-
* [
|
366
|
-
* [Chroma DB](https://docs.trychroma.com/): Vector database for storing and querying usage data.
|
367
|
-
* [SQLite](https://www.sqlite.org/docs.html): C-language library to implements a small SQL database engine.
|
364
|
+
* [NetworkX](https://networkx.org/documentation/stable/reference/introduction.html): A Python package to analyze, create, and manipulate complex graph networks.
|
365
|
+
* [Matplotlib](https://matplotlib.org/stable/index.html): For graph visualization.
|
366
|
+
* [Graphviz](https://graphviz.org/about/): For graph visualization.
|
368
367
|
|
369
|
-
**LLM
|
368
|
+
**LLM Curation**
|
370
369
|
|
371
|
-
* [LiteLLM](https://docs.litellm.ai/docs/providers):
|
370
|
+
* [LiteLLM](https://docs.litellm.ai/docs/providers): LLM orchestration platform
|
372
371
|
|
373
372
|
**Tools**
|
374
373
|
|
375
374
|
* [Composio](https://composio.dev/): Conect RAG agents with external tools, Apps, and APIs to perform actions and receive triggers. We use [tools](https://composio.dev/tools) and [RAG tools](https://app.composio.dev/app/ragtool) from Composio toolset.
|
376
375
|
|
376
|
+
|
377
|
+
**Storage**
|
378
|
+
|
379
|
+
* [mem0ai](https://docs.mem0.ai/quickstart#install-package): Agents' memory storage and management.
|
380
|
+
* [Chroma DB](https://docs.trychroma.com/): Vector database for storing and querying usage data.
|
381
|
+
* [SQLite](https://www.sqlite.org/docs.html): C-language library to implements a small SQL database engine.
|
382
|
+
|
383
|
+
|
377
384
|
**Deployment**
|
378
385
|
|
379
386
|
* **Python**: Primary programming language. v3.12.x is recommended
|
@@ -1,4 +1,4 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=WBEWODzBMhY4LEtXSvUkIixwfk0-xvGLJSID6O_xVZY,2817
|
2
2
|
versionhq/_utils/__init__.py,sha256=dzoZr4cBlh-2QZuPzTdehPUCe9lP1dmRtauD7qTjUaA,158
|
3
3
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
4
4
|
versionhq/_utils/logger.py,sha256=IxSlr2Vi7AXaxj5Fuy8LRzEovaIFVwcbWTgJnASsHN8,3155
|
@@ -7,7 +7,7 @@ versionhq/_utils/usage_metrics.py,sha256=NXF18dn5NNvGK7EsQ4AAghpR8ppYOjMx6ABenLL
|
|
7
7
|
versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
|
8
8
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
9
|
versionhq/agent/inhouse_agents.py,sha256=snDtgDmvZB2bZKH_RTcz5uFOMl3MTjLJwTQBebFt8hk,2532
|
10
|
-
versionhq/agent/model.py,sha256=
|
10
|
+
versionhq/agent/model.py,sha256=L21COegL50x1sl-1gZqYnICx8Gm276J0GnQx6IthXf8,25343
|
11
11
|
versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
|
12
12
|
versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
|
13
13
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
|
@@ -33,7 +33,7 @@ versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
33
33
|
versionhq/llm/llm_vars.py,sha256=wjQK20cKvph6Vq1v71o4d16zBGcHlwq0bzOT_zWno7w,7041
|
34
34
|
versionhq/llm/model.py,sha256=wlzDUMEyIOm808d1vzqu9gmbB4ch-s_EUvwFR60gR80,17177
|
35
35
|
versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
36
|
-
versionhq/memory/contextual_memory.py,sha256=
|
36
|
+
versionhq/memory/contextual_memory.py,sha256=WeDujcEp4oud30OusXSPPNrMEQP-vGrt1mcfYopQruU,3483
|
37
37
|
versionhq/memory/model.py,sha256=MPO8dDP5eAuk9td6bMOq5j2huLzCADFJDrTujHhcWQY,8150
|
38
38
|
versionhq/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
39
39
|
versionhq/storage/base.py,sha256=p-Jas0fXQan_qotnRD6seQxrT2lj-uw9-SmHQhdppcs,355
|
@@ -44,10 +44,10 @@ versionhq/storage/task_output_storage.py,sha256=E1t_Fkt78dPYIOl3MP7LfQ8oGtjlzxBu
|
|
44
44
|
versionhq/storage/utils.py,sha256=ByYXPoEIGJYLUqz-DWjbCAnneNrH1otiYbp12SCILpM,747
|
45
45
|
versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
46
46
|
versionhq/task/evaluate.py,sha256=WdUgjbZL62XrxyWe5MTz29scfzwmuAHGxJ7GvAB8Fmk,3954
|
47
|
-
versionhq/task/formation.py,sha256=
|
47
|
+
versionhq/task/formation.py,sha256=qkAJ1ToeOHQFR3hqC8nJyU6msftPgm8yEPTrFfz77OA,6906
|
48
48
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
49
49
|
versionhq/task/log_handler.py,sha256=LT7YnO7gcPR9IZS7eRvMjnHh8crMBFtqduxd8dxIbkk,1680
|
50
|
-
versionhq/task/model.py,sha256=
|
50
|
+
versionhq/task/model.py,sha256=i-3ABpuE2a5Gp6-jalU0BOWrbolP90EnACDgdIcNAvQ,28722
|
51
51
|
versionhq/task/structured_response.py,sha256=4q-hQPu7oMMHHXEzh9YW4SJ7N5eCZ7OfZ65juyl_jCI,5000
|
52
52
|
versionhq/task/TEMPLATES/Description.py,sha256=V-4kh8xpQTKOcDMi2xnuP-fcNk6kuoz1_5tYBlDLQWQ,420
|
53
53
|
versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -59,8 +59,8 @@ versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtg
|
|
59
59
|
versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
|
60
60
|
versionhq/tool/model.py,sha256=PO4zNWBZcJhYVur381YL1dy6zqurio2jWjtbxOxZMGI,12194
|
61
61
|
versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
|
62
|
-
versionhq-1.2.1.
|
63
|
-
versionhq-1.2.1.
|
64
|
-
versionhq-1.2.1.
|
65
|
-
versionhq-1.2.1.
|
66
|
-
versionhq-1.2.1.
|
62
|
+
versionhq-1.2.1.3.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
|
63
|
+
versionhq-1.2.1.3.dist-info/METADATA,sha256=_Ybf1tHBU0lp8W77c7aswPZvr5DCPgx13vqoLmftGBc,22374
|
64
|
+
versionhq-1.2.1.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
65
|
+
versionhq-1.2.1.3.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
66
|
+
versionhq-1.2.1.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|