versionhq 1.1.10.7__py3-none-any.whl → 1.1.10.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. versionhq/__init__.py +1 -1
  2. versionhq/_utils/vars.py +2 -0
  3. versionhq/agent/TEMPLATES/Backstory.py +2 -2
  4. versionhq/agent/default_agents.py +10 -0
  5. versionhq/agent/model.py +127 -39
  6. versionhq/agent/parser.py +3 -20
  7. versionhq/{_utils → agent}/rpm_controller.py +22 -15
  8. versionhq/knowledge/__init__.py +0 -0
  9. versionhq/knowledge/_utils.py +11 -0
  10. versionhq/knowledge/embedding.py +192 -0
  11. versionhq/knowledge/model.py +54 -0
  12. versionhq/knowledge/source.py +413 -0
  13. versionhq/knowledge/source_docling.py +129 -0
  14. versionhq/knowledge/storage.py +177 -0
  15. versionhq/llm/model.py +76 -62
  16. versionhq/memory/__init__.py +0 -0
  17. versionhq/memory/contextual_memory.py +96 -0
  18. versionhq/memory/model.py +174 -0
  19. versionhq/storage/base.py +14 -0
  20. versionhq/storage/ltm_sqlite_storage.py +131 -0
  21. versionhq/storage/mem0_storage.py +109 -0
  22. versionhq/storage/rag_storage.py +231 -0
  23. versionhq/storage/task_output_storage.py +18 -29
  24. versionhq/storage/utils.py +26 -0
  25. versionhq/task/TEMPLATES/Description.py +5 -0
  26. versionhq/task/evaluate.py +122 -0
  27. versionhq/task/model.py +134 -43
  28. versionhq/team/team_planner.py +1 -1
  29. versionhq/tool/model.py +44 -46
  30. {versionhq-1.1.10.7.dist-info → versionhq-1.1.10.9.dist-info}/METADATA +48 -39
  31. versionhq-1.1.10.9.dist-info/RECORD +64 -0
  32. versionhq-1.1.10.7.dist-info/RECORD +0 -45
  33. {versionhq-1.1.10.7.dist-info → versionhq-1.1.10.9.dist-info}/LICENSE +0 -0
  34. {versionhq-1.1.10.7.dist-info → versionhq-1.1.10.9.dist-info}/WHEEL +0 -0
  35. {versionhq-1.1.10.7.dist-info → versionhq-1.1.10.9.dist-info}/top_level.txt +0 -0
versionhq/task/model.py CHANGED
@@ -2,6 +2,7 @@ import json
2
2
  import threading
3
3
  import datetime
4
4
  import uuid
5
+ import inspect
5
6
  from concurrent.futures import Future
6
7
  from hashlib import md5
7
8
  from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type, TypeVar
@@ -13,6 +14,7 @@ from pydantic_core import PydanticCustomError
13
14
  from versionhq._utils.process_config import process_config
14
15
  from versionhq.task import TaskOutputFormat
15
16
  from versionhq.task.log_handler import TaskOutputStorageHandler
17
+ from versionhq.task.evaluate import Evaluation, EvaluationItem
16
18
  from versionhq.tool.model import Tool, ToolSet
17
19
  from versionhq._utils.logger import Logger
18
20
 
@@ -170,9 +172,7 @@ class TaskOutput(BaseModel):
170
172
  pydantic: Optional[Any] = Field(default=None)
171
173
  tool_output: Optional[Any] = Field(default=None, description="store tool result when the task takes tool output as its final output")
172
174
  callback_output: Optional[Any] = Field(default=None, description="store task or agent callback outcome")
173
-
174
- def __str__(self) -> str:
175
- return str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw
175
+ evaluation: Optional[InstanceOf[Evaluation]] = Field(default=None, description="store overall evaluation of the task output. passed to ltm")
176
176
 
177
177
 
178
178
  def to_dict(self) -> Dict[str, Any] | None:
@@ -189,6 +189,39 @@ class TaskOutput(BaseModel):
189
189
  return json.dumps(self.json_dict) if self.json_dict else self.raw[0: 127]
190
190
 
191
191
 
192
+ def evaluate(self, task, latency: int | float = None, tokens: int = None) -> Evaluation:
193
+ """
194
+ Evaluate the output based on the criteria, score each from 0 to 1 scale, and raise suggestions for future improvement.
195
+ """
196
+ from versionhq.task.TEMPLATES.Description import EVALUATE
197
+
198
+ if not self.evaluation:
199
+ self.evaluation = Evaluation()
200
+
201
+ self.evaluation.latency = latency if latency is not None else task.latency
202
+ self.evaluation.tokens = tokens if tokens is not None else task.tokens
203
+
204
+ eval_criteria = task.eval_criteria if task.eval_criteria else ["Overall competitiveness", ]
205
+
206
+ for item in eval_criteria:
207
+ task_1 = Task(
208
+ description=EVALUATE.format(task_description=task.description, task_output=self.raw, eval_criteria=str(item)),
209
+ pydantic_output=EvaluationItem
210
+ )
211
+ res_a = task_1.execute_sync(agent=self.evaluation.responsible_agent)
212
+ self.evaluation.items.append(EvaluationItem(**res_a.json_dict))
213
+
214
+ return self.evaluation
215
+
216
+
217
+ @property
218
+ def aggregate_score(self) -> float | int:
219
+ if self.evaluation is None:
220
+ return 0
221
+ else:
222
+ self.evaluation.aggregate_score
223
+
224
+
192
225
  @property
193
226
  def json(self) -> Optional[str]:
194
227
  if self.output_format != TaskOutputFormat.JSON:
@@ -202,6 +235,10 @@ class TaskOutput(BaseModel):
202
235
  return json.dumps(self.json_dict)
203
236
 
204
237
 
238
+ def __str__(self) -> str:
239
+ return str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw
240
+
241
+
205
242
 
206
243
  class Task(BaseModel):
207
244
  """
@@ -222,14 +259,8 @@ class Task(BaseModel):
222
259
  description: str = Field(description="Description of the actual task")
223
260
 
224
261
  # output
225
- pydantic_custom_output: Optional[Any] = Field(
226
- default=None,
227
- description="store a custom Pydantic class that will be passed to the model as a response format."
228
- )
229
- response_fields: List[ResponseField] = Field(
230
- default_factory=list,
231
- description="store the list of ResponseFields to create the response format"
232
- )
262
+ pydantic_output: Optional[Any] = Field(default=None, description="store a custom Pydantic class as response format")
263
+ response_fields: List[ResponseField] = Field(default_factory=list, description="store the list of ResponseFields to create the response format")
233
264
  output: Optional[TaskOutput] = Field(default=None, description="store the final task output in TaskOutput class")
234
265
 
235
266
  # task setup
@@ -247,11 +278,16 @@ class Task(BaseModel):
247
278
  callback: Optional[Callable] = Field(default=None, description="callback to be executed after the task is completed.")
248
279
  callback_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict, description="kwargs for the callback when the callback is callable")
249
280
 
281
+ # evaluation
282
+ should_evaluate: bool = Field(default=False, description="True to run the evaluation flow")
283
+ eval_criteria: Optional[List[str]] = Field(default_factory=list, description="criteria to evaluate the outcome. i.e., fit to the brand tone")
284
+
250
285
  # recording
251
286
  processed_by_agents: Set[str] = Field(default_factory=set, description="store responsible agents' roles")
252
287
  tools_errors: int = 0
253
288
  delegations: int = 0
254
- execution_span_in_sec: int = 0
289
+ latency: int | float = 0 # execution latency in sec
290
+ tokens: int = 0 # tokens consumed
255
291
 
256
292
 
257
293
  @model_validator(mode="before")
@@ -276,18 +312,6 @@ class Task(BaseModel):
276
312
  return self
277
313
 
278
314
 
279
- # @model_validator(mode="after")
280
- # def set_attributes_based_on_config(self) -> Self:
281
- # """
282
- # Set attributes based on the task configuration.
283
- # """
284
-
285
- # if self.config:
286
- # for key, value in self.config.items():
287
- # setattr(self, key, value)
288
- # return self
289
-
290
-
291
315
  @model_validator(mode="after")
292
316
  def set_up_tools(self) -> Self:
293
317
  if not self.tools:
@@ -319,10 +343,10 @@ class Task(BaseModel):
319
343
 
320
344
  output_prompt = ""
321
345
 
322
- if self.pydantic_custom_output:
346
+ if self.pydantic_output:
323
347
  output_prompt = f"""
324
348
  Your response MUST STRICTLY follow the given repsonse format:
325
- JSON schema: {str(self.pydantic_custom_output)}
349
+ JSON schema: {str(self.pydantic_output)}
326
350
  """
327
351
 
328
352
  elif self.response_fields:
@@ -339,7 +363,7 @@ Ref. Output image: {output_formats_to_follow}
339
363
  """
340
364
 
341
365
  else:
342
- output_prompt = "Return your response as a valid JSON string, enclosed in double quotes. Do not use single quotes, trailing commas, or other non-standard JSON syntax."
366
+ output_prompt = "Return your response as a valid JSON serializable string, enclosed in double quotes. Do not use single quotes, trailing commas, or other non-standard JSON syntax."
343
367
 
344
368
  return output_prompt
345
369
 
@@ -380,7 +404,7 @@ Ref. Output image: {output_formats_to_follow}
380
404
 
381
405
  def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
382
406
  """
383
- Structure a response format either from`response_fields` or `pydantic_custom_output`.
407
+ Structure a response format either from`response_fields` or `pydantic_output`.
384
408
  1 nested item is accepted.
385
409
  """
386
410
 
@@ -412,8 +436,8 @@ Ref. Output image: {output_formats_to_follow}
412
436
  }
413
437
 
414
438
 
415
- elif self.pydantic_custom_output:
416
- response_format = StructuredOutput(response_format=self.pydantic_custom_output)._format()
439
+ elif self.pydantic_output:
440
+ response_format = StructuredOutput(response_format=self.pydantic_output)._format()
417
441
 
418
442
  return response_format
419
443
 
@@ -424,7 +448,7 @@ Ref. Output image: {output_formats_to_follow}
424
448
  """
425
449
 
426
450
  if raw is None or raw == "":
427
- self._logger.log(level="error", message="The model returned an empty response. Returning an empty dict.", color="yellow")
451
+ self._logger.log(level="warning", message="The model returned an empty response. Returning an empty dict.", color="yellow")
428
452
  output = { "output": "n.a." }
429
453
  return output
430
454
 
@@ -453,13 +477,12 @@ Ref. Output image: {output_formats_to_follow}
453
477
  return output
454
478
 
455
479
 
456
-
457
480
  def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel]:
458
481
  """
459
482
  Create pydantic output from raw or json_dict output.
460
483
  """
461
484
 
462
- output_pydantic = self.pydantic_custom_output
485
+ output_pydantic = self.pydantic_output
463
486
 
464
487
  try:
465
488
  json_dict = json_dict if json_dict else self._create_json_output(raw=raw)
@@ -481,6 +504,64 @@ Ref. Output image: {output_formats_to_follow}
481
504
  self.description = self._original_description.format(**inputs)
482
505
 
483
506
 
507
+ def _create_short_term_memory(self, agent, task_output: TaskOutput) -> None:
508
+ """
509
+ After the task execution, create and save short-term memory of the responsible agent.
510
+ """
511
+
512
+ from versionhq.agent.model import Agent
513
+ from versionhq.memory.model import ShortTermMemory
514
+
515
+ try:
516
+ if isinstance(agent, Agent) and agent.use_memory == True:
517
+ if hasattr(agent, "short_term_memory"):
518
+ agent.short_term_memory.save(value=task_output.raw, metadata={ "observation": self.description, }, agent=agent.role)
519
+ else:
520
+ agent.short_term_memory = ShortTermMemory(agent=agent, embedder_config=agent.embedder_config)
521
+ agent.short_term_memory.save(value=task_output.raw, metadata={ "observation": self.description, }, agent=agent.role)
522
+
523
+ except Exception as e:
524
+ self._logger.log(level="error", message=f"Failed to add to short term memory: {str(e)}", color="red")
525
+ pass
526
+
527
+
528
+ def _create_long_term_memory(self, agent, task_output: TaskOutput) -> None:
529
+ """
530
+ Create and save long-term and entity memory items based on evaluation.
531
+ """
532
+ from versionhq.agent.model import Agent
533
+ from versionhq.memory.model import LongTermMemory, LongTermMemoryItem
534
+
535
+ try:
536
+ if isinstance(agent, Agent) and agent.use_memory == True:
537
+ evaluation = task_output.evaluation if task_output.evaluation else task_output.evaluate(task=self)
538
+
539
+ long_term_memory_item = LongTermMemoryItem(
540
+ agent=str(agent.role),
541
+ task=str(self.description),
542
+ datetime=str(datetime.datetime.now()),
543
+ quality=evaluation.aggregate_score,
544
+ metadata={
545
+ "suggestions": evaluation.suggestion_summary,
546
+ "quality": evaluation.aggregate_score,
547
+ },
548
+ )
549
+
550
+ if hasattr(agent, "long_term_memory"):
551
+ agent.long_term_memory.save(item=long_term_memory_item)
552
+ else:
553
+ agent.long_term_memory = LongTermMemory(agent=agent)
554
+ agent.long_term_memory.save(item=long_term_memory_item)
555
+
556
+ except AttributeError as e:
557
+ self._logger.log(level="error", message=f"Missing attributes for long term memory: {str(e)}", color="red")
558
+ pass
559
+
560
+ except Exception as e:
561
+ self._logger.log(level="error", message=f"Failed to add to long term memory: {str(e)}", color="red")
562
+ pass
563
+
564
+
484
565
  # task execution
485
566
  def execute_sync(self, agent, context: Optional[str] = None) -> TaskOutput:
486
567
  """
@@ -527,7 +608,7 @@ Ref. Output image: {output_formats_to_follow}
527
608
  self.prompt_context = context
528
609
  task_output: InstanceOf[TaskOutput] = None
529
610
  tool_output: str | list = None
530
- task_tools: List[InstanceOf[Tool] | InstanceOf[ToolSet] | Type[Tool]] = []
611
+ task_tools: List[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = []
531
612
  started_at = datetime.datetime.now()
532
613
 
533
614
  if self.tools:
@@ -563,20 +644,34 @@ Ref. Output image: {output_formats_to_follow}
563
644
  if "outcome" in json_dict_output:
564
645
  json_dict_output = self._create_json_output(raw=str(json_dict_output["outcome"]))
565
646
 
566
- pydantic_output = self._create_pydantic_output(raw=raw_output, json_dict=json_dict_output) if self.pydantic_custom_output else None
647
+ pydantic_output = self._create_pydantic_output(raw=raw_output, json_dict=json_dict_output) if self.pydantic_output else None
567
648
 
568
649
  task_output = TaskOutput(
569
650
  task_id=self.id,
570
- raw=raw_output,
651
+ raw=raw_output if raw_output is not None else "",
571
652
  pydantic=pydantic_output,
572
653
  json_dict=json_dict_output
573
654
  )
574
655
 
656
+ ended_at = datetime.datetime.now()
657
+ self.latency = (ended_at - started_at).total_seconds()
658
+
575
659
  self.output = task_output
576
660
  self.processed_by_agents.add(agent.role)
577
661
 
662
+ if self.should_evaluate:
663
+ task_output.evaluate(task=self, latency=self.latency, tokens=self.tokens)
664
+
665
+ self._create_short_term_memory(agent=agent, task_output=task_output)
666
+ self._create_long_term_memory(agent=agent, task_output=task_output)
667
+
668
+
578
669
  if self.callback and isinstance(self.callback, Callable):
579
- callback_res = self.callback(**self.callback_kwargs, **task_output.json_dict)
670
+ kwargs = { **self.callback_kwargs, **task_output.json_dict }
671
+ sig = inspect.signature(self.callback)
672
+ valid_keys = [param.name for param in sig.parameters.values() if param.kind == param.POSITIONAL_OR_KEYWORD]
673
+ valid_kwargs = { k: kwargs[k] for k in valid_keys }
674
+ callback_res = self.callback(**valid_kwargs)
580
675
  task_output.callback_output = callback_res
581
676
 
582
677
  # if self.output_file: ## disabled for now
@@ -586,10 +681,6 @@ Ref. Output image: {output_formats_to_follow}
586
681
  # else pydantic_output.model_dump_json() if pydantic_output else result
587
682
  # )
588
683
  # self._save_file(content)
589
-
590
- ended_at = datetime.datetime.now()
591
- self.execution_span_in_sec = (ended_at - started_at).total_seconds()
592
-
593
684
  return task_output
594
685
 
595
686
 
@@ -603,7 +694,7 @@ Ref. Output image: {output_formats_to_follow}
603
694
 
604
695
  @property
605
696
  def key(self) -> str:
606
- output_format = TaskOutputFormat.JSON if self.response_fields else TaskOutputFormat.PYDANTIC if self.pydantic_custom_output is not None else TaskOutputFormat.RAW
697
+ output_format = TaskOutputFormat.JSON if self.response_fields else TaskOutputFormat.PYDANTIC if self.pydantic_output is not None else TaskOutputFormat.RAW
607
698
  source = [self.description, output_format]
608
699
  return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
609
700
 
@@ -656,7 +747,7 @@ class ConditionalTask(Task):
656
747
  previous_output = task_outputs[task_index - 1] if task_outputs and len(task_outputs) > 1 else None
657
748
 
658
749
  if previous_output and not self.should_execute(previous_output):
659
- self._logger.log(level="debug", message=f"Skipping conditional task: {self.description}", color="yellow")
750
+ self._logger.log(level="warning", message=f"Skipping conditional task: {self.description}", color="yellow")
660
751
  skipped_task_output = self.get_skipped_task_output()
661
752
  self.output = skipped_task_output
662
753
 
@@ -86,7 +86,7 @@ class TeamPlanner:
86
86
 
87
87
  Task summaries: {" ".join(task_summary_list)}
88
88
  """,
89
- pydantic_custom_output=TeamPlanIdea
89
+ pydantic_output=TeamPlanIdea
90
90
  )
91
91
  output = task.execute_sync(agent=team_planner, context=context, tools=tools)
92
92
  return output
versionhq/tool/model.py CHANGED
@@ -58,7 +58,7 @@ class BaseTool(ABC, BaseModel):
58
58
  "type": cls.object_type,
59
59
  "function": {
60
60
  "name": cls.name.replace(" ", "_"),
61
- "description": cls.description,
61
+ "description": cls.description if cls.description else "",
62
62
  "parameters": {
63
63
  "type": "object",
64
64
  "properties": p,
@@ -174,7 +174,7 @@ class Tool(BaseTool):
174
174
  @model_validator(mode="after")
175
175
  def set_up_name(self) -> Self:
176
176
  if not self.name:
177
- self.name = self.func.__name__ if self.func else ""
177
+ self.name = self.func.__name__ if self.func.__name__ != "<lambda>" else "random_func"
178
178
 
179
179
  return self
180
180
 
@@ -187,7 +187,7 @@ class Tool(BaseTool):
187
187
 
188
188
  args_schema = {
189
189
  name: {
190
- "description": field.description,
190
+ "description": field.description if field.description else "",
191
191
  "type": self._get_arg_annotations(field.annotation),
192
192
  }
193
193
  for name, field in self.args_schema.model_fields.items()
@@ -214,34 +214,36 @@ class Tool(BaseTool):
214
214
  """
215
215
 
216
216
  p, r = dict(), list()
217
- if self.args_schema:
218
- for name, field in self.args_schema.model_fields.items():
219
- if name != "kwargs" and name != "args":
220
- p.update(
221
- {
222
- name: {
223
- "description": field.description if field.description else "",
224
- "type": SchemaType(self._get_arg_annotations(field.annotation)).convert(),
225
- }
217
+ if not self.args_schema:
218
+ self.args_schema = self.set_up_args_schema()
219
+
220
+ for name, field in self.args_schema.model_fields.items():
221
+ if name != "kwargs" and name != "args":
222
+ p.update(
223
+ {
224
+ name: {
225
+ "description": field.description if field.description else "",
226
+ "type": SchemaType(self._get_arg_annotations(field.annotation)).convert(),
226
227
  }
227
- )
228
- r.append(name)
229
-
230
- properties = {
231
- "type": self.object_type,
232
- "function": {
233
- "name": self.name.replace(" ", "_"),
234
- "description": self.description if self.description else "a tool function to execute",
235
- "parameters": {
236
- "type": "object",
237
- "properties": p,
238
- "required": r,
239
- "additionalProperties": False
240
- },
241
- "strict": True,
228
+ }
229
+ )
230
+ r.append(name)
231
+
232
+ properties = {
233
+ "type": self.object_type,
234
+ "function": {
235
+ "name": self.name.replace(" ", "_"),
236
+ "description": self.description if self.description else "a tool function to execute",
237
+ "parameters": {
238
+ "type": "object",
239
+ "properties": p,
240
+ "required": r,
241
+ "additionalProperties": False
242
242
  },
243
- }
244
- self.properties = properties
243
+ "strict": True,
244
+ },
245
+ }
246
+ self.properties = properties
245
247
  return self
246
248
 
247
249
 
@@ -294,7 +296,7 @@ class Tool(BaseTool):
294
296
 
295
297
  def _handle_toolset(self, params: Dict[str, Any] = None) -> Any:
296
298
  """
297
- Read the cache from the ToolHandler instance or execute _run() method.
299
+ Return cached results or run the function and record the results.
298
300
  """
299
301
 
300
302
  from versionhq.tool.tool_handler import ToolHandler
@@ -308,28 +310,24 @@ class Tool(BaseTool):
308
310
  parsed_kwargs = self._parse_args(raw_args=acceptable_kwargs)
309
311
  tool_set = ToolSet(tool=self, kwargs=acceptable_kwargs)
310
312
 
311
- if self.tool_handler and isinstance(self.tool_handler, ToolHandler):
312
- if self.tool_handler.has_called_before(tool_set):
313
- self.tool_handler.error = "Agent execution error"
313
+ if not self.tool_handler or not isinstance(self.tool_handler, ToolHandler):
314
+ self.tool_handler = ToolHandler(last_used_tool=tool_set, cache_handler=self.cache_handler, should_cache=self.should_cache)
314
315
 
315
- elif self.tool_handler.cache:
316
+ try:
317
+ if self.tool_handler.has_called_before(tool_set) or self.tool_handler.cache:
316
318
  result = self.tool_handler.cache.read(tool_name=tool_set.tool.name, input=str(tool_set.kwargs))
317
- if not result:
318
- result = self.func(**parsed_kwargs)
319
319
 
320
- else:
320
+ if not result:
321
321
  result = self.func(**parsed_kwargs)
322
322
 
323
- else:
324
- tool_handler = ToolHandler(last_used_tool=tool_set, cache_handler=self.cache_handler, should_cache=self.should_cache)
325
- self.tool_handler = tool_handler
326
- result = self.func(**parsed_kwargs)
327
-
323
+ if self.should_cache is True:
324
+ self.tool_handler.record_last_tool_used(last_used_tool=tool_set, output=result, should_cache=self.should_cache)
328
325
 
329
- if self.should_cache is True:
330
- self.tool_handler.record_last_tool_used(last_used_tool=tool_set, output=result, should_cache=self.should_cache)
326
+ return result
331
327
 
332
- return result
328
+ except:
329
+ self.tool_handler.error = "Agent error"
330
+ return result
333
331
 
334
332
 
335
333
  def run(self, params: Dict[str, Any] = None) -> Any:
@@ -344,5 +342,5 @@ class ToolSet(BaseModel):
344
342
  """
345
343
  Store the tool called and any kwargs used. (The tool name and kwargs will be stored in the cache.)
346
344
  """
347
- tool: InstanceOf[Tool] | Type[Tool] = Field(..., description="store the tool instance to be called.")
345
+ tool: InstanceOf[Tool]| Type[Tool] = Field(..., description="store the tool instance to be called.")
348
346
  kwargs: Optional[Dict[str, Any]] = Field(..., description="kwargs passed to the tool")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.1.10.7
3
+ Version: 1.1.10.9
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -56,24 +56,29 @@ Requires-Dist: appdirs>=1.4.4
56
56
  Requires-Dist: langchain>=0.3.14
57
57
  Requires-Dist: langchain-openai>=0.2.14
58
58
  Requires-Dist: composio-langchain>=0.6.12
59
+ Requires-Dist: chromadb>=0.6.3
60
+ Requires-Dist: docling>=2.16.0
61
+ Requires-Dist: json-repair>=0.35.0
62
+ Requires-Dist: wheel>=0.45.1
63
+ Requires-Dist: pdfplumber>=0.11.5
64
+ Requires-Dist: mem0ai>=0.1.48
59
65
 
60
66
  # Overview
61
67
 
62
68
  ![MIT license](https://img.shields.io/badge/License-MIT-green)
63
69
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
64
70
  ![PyPI](https://img.shields.io/badge/PyPI->=v1.1.10-blue)
65
- ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple)
71
+ ![python ver](https://img.shields.io/badge/Python->=3.12-purple)
66
72
  ![pyenv ver](https://img.shields.io/badge/pyenv-2.5.0-orange)
67
73
 
68
74
 
69
- LLM orchestration frameworks to deploy multi-agent systems focusing on complex outbound tasks.
75
+ LLM orchestration frameworks to deploy multi-agent systems with task-based formation.
70
76
 
71
77
  **Visit:**
72
78
 
73
79
  - [PyPI](https://pypi.org/project/versionhq/)
74
80
  - [Github (LLM orchestration framework)](https://github.com/versionHQ/multi-agent-system)
75
- - [Github (Test client app)](https://github.com/versionHQ/test-client-app)
76
- - [Use case](https://versi0n.io/playground) / [Quick demo](https://res.cloudinary.com/dfeirxlea/video/upload/v1737732977/pj_m_home/pnsyh5mfvmilwgt0eusa.mov)
81
+ - [Use case](https://versi0n.io/) / [Quick demo](https://res.cloudinary.com/dfeirxlea/video/upload/v1737732977/pj_m_home/pnsyh5mfvmilwgt0eusa.mov)
77
82
  - [Documentation](https://chief-oxygen-8a2.notion.site/Documentation-17e923685cf98001a5fad5c4b2acd79b?pvs=4) *Some components are under review.
78
83
 
79
84
 
@@ -86,9 +91,9 @@ LLM orchestration frameworks to deploy multi-agent systems focusing on complex o
86
91
  - [Key Features](#key-features)
87
92
  - [Agent formation](#agent-formation)
88
93
  - [Quick Start](#quick-start)
89
- - [Case 1. Single agent network:](#case-1-single-agent-network)
94
+ - [Case 1. Solo Agent:](#case-1-solo-agent)
90
95
  - [Return a structured output with a summary in string.](#return-a-structured-output-with-a-summary-in-string)
91
- - [Case 2. Form a team to handle multiple tasks:](#case-2-form-a-team-to-handle-multiple-tasks)
96
+ - [Case 2. Supervising:](#case-2-supervising)
92
97
  - [Technologies Used](#technologies-used)
93
98
  - [Project Structure](#project-structure)
94
99
  - [Setup](#setup)
@@ -133,10 +138,10 @@ You can specify which formation you want them to generate, or let the agent deci
133
138
  pip install versionhq
134
139
  ```
135
140
 
136
- (Python >= 3.13)
141
+ (Python >= 3.12)
137
142
 
138
143
 
139
- ### Case 1. Single agent network:
144
+ ### Case 1. Solo Agent:
140
145
 
141
146
  #### Return a structured output with a summary in string.
142
147
 
@@ -177,7 +182,7 @@ This will return `TaskOutput` that stores a response in string, JSON dict, and P
177
182
  )
178
183
  ```
179
184
 
180
- ### Case 2. Form a team to handle multiple tasks:
185
+ ### Case 2. Supervising:
181
186
 
182
187
  ```
183
188
  from versionhq.agent.model import Agent
@@ -212,19 +217,21 @@ This will return a list with dictionaries with keys defined in the `ResponseFiel
212
217
 
213
218
  Tasks can be delegated to a team manager, peers in the team, or completely new agent.
214
219
 
215
-
216
220
  <hr />
217
221
 
218
222
  ## Technologies Used
219
- **Schema, Database, Data Validation**
220
- - [Pydantic](https://docs.pydantic.dev/latest/): Data validation and serialization library for Python
221
- - [Pydantic_core](https://pypi.org/project/pydantic-core/): Core func packages for Pydantic
222
- - [Chroma DB](https://docs.trychroma.com/): Vector database for storing and querying usage data
223
- - [SQLite](https://www.sqlite.org/docs.html): C-language library to implements a small SQL database engine
223
+ **Schema, Data Validation**
224
+ - [Pydantic](https://docs.pydantic.dev/latest/): Data validation and serialization library for Python.
225
+ - [Pydantic_core](https://pypi.org/project/pydantic-core/): Core func packages for Pydantic.
224
226
  - [Upstage](https://console.upstage.ai/docs/getting-started/overview): Document processer for ML tasks. (Use `Document Parser API` to extract data from documents)
227
+ - [Docling](https://ds4sd.github.io/docling/): Document parsing
228
+
229
+ **Storage**
230
+ - [mem0ai](https://docs.mem0.ai/quickstart#install-package): Agents' memory storage and management.
231
+ - [Chroma DB](https://docs.trychroma.com/): Vector database for storing and querying usage data.
232
+ - [SQLite](https://www.sqlite.org/docs.html): C-language library to implements a small SQL database engine.
225
233
 
226
234
  **LLM-curation**
227
- - OpenAI GPT-4: Advanced language model for analysis and recommendations
228
235
  - [LiteLLM](https://docs.litellm.ai/docs/providers): Curation platform to access LLMs
229
236
 
230
237
  **Tools**
@@ -242,27 +249,29 @@ Tasks can be delegated to a team manager, peers in the team, or completely new a
242
249
 
243
250
  ```
244
251
  .
252
+ .github
253
+ └── workflows/ # Github actions
254
+
245
255
  src/
246
- └── versionHQ/ # Orchestration frameworks on Pydantic
247
- ├── agent/
248
- └── llm/
249
- └── task/
250
- └── team/
251
- └── tool/
252
- └── clients/ # Classes to store the client related information
253
- └── cli/ # CLI commands
254
- └── ...
255
-
256
- ├── db/ # Database files
257
- ├── chroma.sqlite3
258
- └── ...
256
+ └── versionhq/ # Orchestration frameworks
257
+ ├── agent/ # Components
258
+ └── llm/
259
+ └── task/
260
+ └── team/
261
+ └── tool/
262
+ └── cli/
263
+ └── ...
264
+
265
+ ├── db/ # Storage
266
+ ├── chroma.sqlite3
267
+ └── ...
268
+
269
+ └──tests/ # Pytest
270
+ │ └── agent/
271
+ │ └── llm/
272
+ │ └── ...
259
273
 
260
- └──tests/
261
- └── cli/
262
- └── team/
263
- └── ...
264
-
265
- └── uploads/ # Uploaded files for the project
274
+ └── uploads/ # Local repo to store the uploaded files
266
275
 
267
276
  ```
268
277
 
@@ -284,9 +293,9 @@ src/
284
293
 
285
294
  * In case of AssertionError/module mismatch, run Python version control using `.pyenv`
286
295
  ```
287
- pyenv install 3.13.1
288
- pyenv global 3.13.1 (optional: `pyenv global system` to get back to the system default ver.)
289
- uv python pin 3.13.1
296
+ pyenv install 3.12.8
297
+ pyenv global 3.12.8 (optional: `pyenv global system` to get back to the system default ver.)
298
+ uv python pin 3.12.8
290
299
  ```
291
300
 
292
301
  3. Set up environment variables: