versionhq 1.2.1.7__tar.gz → 1.2.1.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/PKG-INFO +5 -12
  2. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/README.md +4 -11
  3. versionhq-1.2.1.7/docs/core/Agent.md → versionhq-1.2.1.9/docs/core/agent/index.md +116 -88
  4. versionhq-1.2.1.7/docs/core/task.md → versionhq-1.2.1.9/docs/core/task/index.md +145 -23
  5. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/core/task/response-field.md +5 -5
  6. versionhq-1.2.1.7/docs/core/task-graph.md → versionhq-1.2.1.9/docs/core/task-graph/index.md +2 -2
  7. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/index.md +3 -3
  8. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/mkdocs.yml +23 -7
  9. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/pyproject.toml +1 -1
  10. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/__init__.py +1 -1
  11. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task/model.py +1 -1
  12. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task_graph/draft.py +15 -14
  13. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task_graph/model.py +6 -2
  14. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq.egg-info/PKG-INFO +5 -12
  15. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq.egg-info/SOURCES.txt +3 -3
  16. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/formation_test.py +0 -1
  17. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/task/doc_test.py +62 -3
  18. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/task/task_test.py +27 -27
  19. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/task_graph/doc_test.py +2 -6
  20. versionhq-1.2.1.9/tests/task_graph/task_graph_test.py +26 -0
  21. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/uv.lock +34 -34
  22. versionhq-1.2.1.7/tests/task_graph/task_graph_test.py +0 -18
  23. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/.env.sample +0 -0
  24. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/.github/workflows/deploy_docs.yml +0 -0
  25. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/.github/workflows/publish.yml +0 -0
  26. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/.github/workflows/publish_testpypi.yml +0 -0
  27. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/.github/workflows/run_tests.yml +0 -0
  28. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/.github/workflows/security_check.yml +0 -0
  29. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/.gitignore +0 -0
  30. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/.pre-commit-config.yaml +0 -0
  31. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/.python-version +0 -0
  32. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/LICENSE +0 -0
  33. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/SECURITY.md +0 -0
  34. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/db/preprocess.py +0 -0
  35. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/CNAME +0 -0
  36. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/_logos/favicon.ico +0 -0
  37. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/_logos/logo192.png +0 -0
  38. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/core/task/evaluation.md +0 -0
  39. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/core/task/task-output.md +0 -0
  40. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/core/tool.md +0 -0
  41. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/quickstart.md +0 -0
  42. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/stylesheets/main.css +0 -0
  43. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/docs/tags.md +0 -0
  44. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/requirements-dev.txt +0 -0
  45. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/requirements.txt +0 -0
  46. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/runtime.txt +0 -0
  47. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/setup.cfg +0 -0
  48. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/_utils/__init__.py +0 -0
  49. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/_utils/i18n.py +0 -0
  50. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/_utils/logger.py +0 -0
  51. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/_utils/process_config.py +0 -0
  52. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/_utils/usage_metrics.py +0 -0
  53. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/_utils/vars.py +0 -0
  54. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  55. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  56. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/agent/__init__.py +0 -0
  57. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/agent/inhouse_agents.py +0 -0
  58. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/agent/model.py +0 -0
  59. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/agent/parser.py +0 -0
  60. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/agent/rpm_controller.py +0 -0
  61. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/agent_network/__init__.py +0 -0
  62. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/agent_network/model.py +0 -0
  63. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/cli/__init__.py +0 -0
  64. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/clients/__init__.py +0 -0
  65. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/clients/customer/__init__.py +0 -0
  66. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/clients/customer/model.py +0 -0
  67. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/clients/product/__init__.py +0 -0
  68. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/clients/product/model.py +0 -0
  69. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/clients/workflow/__init__.py +0 -0
  70. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/clients/workflow/model.py +0 -0
  71. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/knowledge/__init__.py +0 -0
  72. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/knowledge/_utils.py +0 -0
  73. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/knowledge/embedding.py +0 -0
  74. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/knowledge/model.py +0 -0
  75. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/knowledge/source.py +0 -0
  76. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/knowledge/source_docling.py +0 -0
  77. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/knowledge/storage.py +0 -0
  78. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/llm/__init__.py +0 -0
  79. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/llm/llm_vars.py +0 -0
  80. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/llm/model.py +0 -0
  81. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/memory/__init__.py +0 -0
  82. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/memory/contextual_memory.py +0 -0
  83. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/memory/model.py +0 -0
  84. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/storage/__init__.py +0 -0
  85. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/storage/base.py +0 -0
  86. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
  87. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/storage/mem0_storage.py +0 -0
  88. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/storage/rag_storage.py +0 -0
  89. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/storage/task_output_storage.py +0 -0
  90. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/storage/utils.py +0 -0
  91. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task/TEMPLATES/Description.py +0 -0
  92. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task/__init__.py +0 -0
  93. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task/evaluate.py +0 -0
  94. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task/formation.py +0 -0
  95. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task/formatter.py +0 -0
  96. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task/log_handler.py +0 -0
  97. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task/structured_response.py +0 -0
  98. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task_graph/__init__.py +0 -0
  99. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/task_graph/colors.py +0 -0
  100. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/tool/__init__.py +0 -0
  101. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/tool/cache_handler.py +0 -0
  102. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/tool/composio_tool.py +0 -0
  103. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/tool/composio_tool_vars.py +0 -0
  104. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/tool/decorator.py +0 -0
  105. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/tool/model.py +0 -0
  106. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq/tool/tool_handler.py +0 -0
  107. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq.egg-info/dependency_links.txt +0 -0
  108. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq.egg-info/requires.txt +0 -0
  109. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/src/versionhq.egg-info/top_level.txt +0 -0
  110. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/__init__.py +0 -0
  111. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/agent/__init__.py +0 -0
  112. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/agent/agent_test.py +0 -0
  113. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/agent/doc_test.py +0 -0
  114. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/agent_network/Prompts/Demo_test.py +0 -0
  115. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/agent_network/__init__.py +0 -0
  116. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/agent_network/agent_network_test.py +0 -0
  117. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/cli/__init__.py +0 -0
  118. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/clients/customer_test.py +0 -0
  119. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/clients/product_test.py +0 -0
  120. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/clients/workflow_test.py +0 -0
  121. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/conftest.py +0 -0
  122. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/doc_test.py +0 -0
  123. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/knowledge/__init__.py +0 -0
  124. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/knowledge/knowledge_test.py +0 -0
  125. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/knowledge/mock_report_compressed.pdf +0 -0
  126. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/llm/__init__.py +0 -0
  127. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/llm/llm_test.py +0 -0
  128. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/memory/__init__.py +0 -0
  129. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/memory/memory_test.py +0 -0
  130. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/task/__init__.py +0 -0
  131. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/task/doc_taskoutput_test.py +0 -0
  132. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/task/llm_connection_test.py +0 -0
  133. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/task_graph/__init__.py +0 -0
  134. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/tool/__init__.py +0 -0
  135. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/tool/composio_test.py +0 -0
  136. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/tool/doc_test.py +0 -0
  137. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/tool/tool_test.py +0 -0
  138. {versionhq-1.2.1.7 → versionhq-1.2.1.9}/tests/usecase_test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.1.7
3
+ Version: 1.2.1.9
4
4
  Summary: An agentic orchestration framework for building agent networks that handle task automation.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -81,7 +81,7 @@ Requires-Dist: pygraphviz>=1.14; extra == "pygraphviz"
81
81
 
82
82
  # Overview
83
83
 
84
- [![DL](https://img.shields.io/badge/Download-17K+-red)](https://clickpy.clickhouse.com/dashboard/versionhq)
84
+ [![DL](https://img.shields.io/badge/Download-20K+-red)](https://clickpy.clickhouse.com/dashboard/versionhq)
85
85
  ![MIT license](https://img.shields.io/badge/License-MIT-green)
86
86
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
87
87
  ![PyPI](https://img.shields.io/badge/PyPI-v1.2.1+-blue)
@@ -93,11 +93,10 @@ Agentic orchestration framework for multi-agent networks and task graphs for com
93
93
 
94
94
  **Visit:**
95
95
 
96
- - [PyPI](https://pypi.org/project/versionhq/)
96
+ - [Playground](https://versi0n.io/playground)
97
97
  - [Docs](https://docs.versi0n.io)
98
98
  - [Github Repository](https://github.com/versionHQ/multi-agent-system)
99
- - [Playground](https://versi0n.io/)
100
-
99
+ - [PyPI](https://pypi.org/project/versionhq/)
101
100
 
102
101
  <hr />
103
102
 
@@ -594,10 +593,4 @@ Common issues and solutions:
594
593
  ## Frequently Asked Questions (FAQ)
595
594
  **Q. Where can I see if the agent is working?**
596
595
 
597
- A. Visit [playground](https://versi0n.io).
598
-
599
-
600
- **Q. How do you analyze the customer?**
601
-
602
- A. We employ soft clustering for each customer.
603
- <img width="200" src="https://res.cloudinary.com/dfeirxlea/image/upload/v1732732628/pj_m_agents/ito937s5d5x0so8isvw6.png">
596
+ A. Visit [playground](https://versi0n.io/playground).
@@ -1,6 +1,6 @@
1
1
  # Overview
2
2
 
3
- [![DL](https://img.shields.io/badge/Download-17K+-red)](https://clickpy.clickhouse.com/dashboard/versionhq)
3
+ [![DL](https://img.shields.io/badge/Download-20K+-red)](https://clickpy.clickhouse.com/dashboard/versionhq)
4
4
  ![MIT license](https://img.shields.io/badge/License-MIT-green)
5
5
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
6
6
  ![PyPI](https://img.shields.io/badge/PyPI-v1.2.1+-blue)
@@ -12,11 +12,10 @@ Agentic orchestration framework for multi-agent networks and task graphs for com
12
12
 
13
13
  **Visit:**
14
14
 
15
- - [PyPI](https://pypi.org/project/versionhq/)
15
+ - [Playground](https://versi0n.io/playground)
16
16
  - [Docs](https://docs.versi0n.io)
17
17
  - [Github Repository](https://github.com/versionHQ/multi-agent-system)
18
- - [Playground](https://versi0n.io/)
19
-
18
+ - [PyPI](https://pypi.org/project/versionhq/)
20
19
 
21
20
  <hr />
22
21
 
@@ -513,10 +512,4 @@ Common issues and solutions:
513
512
  ## Frequently Asked Questions (FAQ)
514
513
  **Q. Where can I see if the agent is working?**
515
514
 
516
- A. Visit [playground](https://versi0n.io).
517
-
518
-
519
- **Q. How do you analyze the customer?**
520
-
521
- A. We employ soft clustering for each customer.
522
- <img width="200" src="https://res.cloudinary.com/dfeirxlea/image/upload/v1732732628/pj_m_agents/ito937s5d5x0so8isvw6.png">
515
+ A. Visit [playground](https://versi0n.io/playground).
@@ -27,9 +27,9 @@ agent = vhq.Agent(
27
27
 
28
28
  <hr />
29
29
 
30
- ## Customization
30
+ ## Customizing
31
31
 
32
- ### Model optimization
32
+ **Model Optimization**
33
33
 
34
34
  `[var]`<bold>`llm: Optional[str | LLM | Dict[str, Any]] = "gpt-4o"`</bold>
35
35
 
@@ -47,7 +47,9 @@ agent = vhq.Agent(
47
47
  )
48
48
  ```
49
49
 
50
- ### Switching models
50
+ <hr/>
51
+
52
+ **Switching Models**
51
53
 
52
54
  `[class method]`<bold>`update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self`<bold>
53
55
 
@@ -69,12 +71,14 @@ assert agent.llm.max_tokens == 3000
69
71
 
70
72
  <hr/>
71
73
 
72
- ### Developer Prompt (System Prompt)
74
+ **Developer Prompt (System Prompt)**
73
75
 
74
76
  `[var]`<bold>`backstory: Optional[str] = TEMPLATE_BACKSTORY`<bold>
75
77
 
76
78
  Backstory will be drafted automatically using the given role, goal and other values in the Agent model, and converted into the **developer prompt** when the agent executes the task.
77
79
 
80
+ <hr/>
81
+
78
82
  **Backstory template (full) for auto drafting:**
79
83
 
80
84
  ```python
@@ -127,9 +131,11 @@ agent = vhq.Agent(
127
131
  )
128
132
  ```
129
133
 
130
- ## Task Execution Rules
134
+ <hr />
135
+
136
+ ## Executing Tasks
131
137
 
132
- ### Delegation
138
+ **Delegation**
133
139
 
134
140
  `[var]`<bold>`allow_delegation: [bool] = False`</bold>
135
141
 
@@ -145,7 +151,9 @@ agent = vhq.Agent(
145
151
  )
146
152
  ```
147
153
 
148
- ### Max Retry Limit
154
+ <hr />
155
+
156
+ **Max Retry Limit**
149
157
 
150
158
  `[var]`<bold>`max_retry_limit: Optional[int] = 2`</bold>
151
159
 
@@ -161,7 +169,9 @@ agent = vhq.Agent(
161
169
  )
162
170
  ```
163
171
 
164
- ### Maximum Number of Iterations (MaxIt)
172
+ <hr />
173
+
174
+ **Maximum Number of Iterations (maxit)**
165
175
 
166
176
  `[var]`<bold>`maxit: Optional[int] = 25`</bold>
167
177
 
@@ -179,76 +189,9 @@ agent = vhq.Agent(
179
189
  )
180
190
  ```
181
191
 
182
- ### Callbacks
183
-
184
- `[var]`<bold>`callbacks: Optional[List[Callable]] = None`</bold>
185
-
186
- You can add callback functions that the agent will run after executing any task.
187
-
188
- By default, raw response from the agent will be added to the arguments of the callback function.
189
-
190
- e.g. Format a response after executing the task:
191
-
192
- ```python
193
- import json
194
- import versionhq as vhq
195
- from typing import Dict, Any
196
-
197
-
198
- def format_response(res: str = None) -> str | Dict[str, Any]:
199
- try:
200
- r = json.dumps(eval(res))
201
- formatted_res = json.loads(r)
202
- return formatted_res
203
- except:
204
- return res
205
-
206
- agent = vhq.Agent(
207
- role="Marketing Analyst",
208
- goal="Coping with increased price competition in saturated markets.",
209
- callbacks=[format_response]
210
- )
211
- ```
212
-
213
- **Multiple callbacks to call**
214
-
215
- The callback functions are called in order of the list index referring to the task response and response from the previous callback functions by default.
216
-
217
- e.g. Validate an initial response from the assigned agent, and format the response.
218
-
219
- ```python
220
- import json
221
- from typing import Dict, Any
222
- import versionhq as vhq
223
-
224
- def assessment(res: str) -> str:
225
- try:
226
- sub_agent = vhq.Agent(role="Validator", goal="Validate the given solutions.")
227
- task = vhq.Task(
228
- description=f"Assess the given solution based on feasibilities and fits to client's strategies, then refine the solution if necessary.\nSolution: {res}"
229
- )
230
- r = task.sync_execute(agent=sub_agent)
231
- return r.raw
232
-
233
- except:
234
- return res
235
-
236
- def format_response(res: str = None) -> str | Dict[str, Any]:
237
- try:
238
- r = json.dumps(eval(res))
239
- formatted_res = json.loads(r)
240
- return formatted_res
241
- except:
242
- return res
243
-
244
- agent = vhq.Agent(
245
- role="Marketing Analyst",
246
- goal="Build solutions to address increased price competition in saturated markets",
247
- callbacks=[assessment, format_response] # add multiple funcs as callbacks - executed in order of index
248
- )
249
- ```
192
+ <hr />
250
193
 
251
- ### Context Window
194
+ **Context Window**
252
195
 
253
196
  `[var]`<bold>`respect_context_window: [bool] = True`</bold>
254
197
 
@@ -260,7 +203,10 @@ By default, the agent will follow **the 80% rule** - where they only use 80% of
260
203
 
261
204
  You can turn off this rule by setting `respect_context_window` False to have larger context window.
262
205
 
263
- ### Max Tokens
206
+
207
+ <hr />
208
+
209
+ **Max Tokens**
264
210
 
265
211
  `[var]`<bold>`max_tokens: Optional[int] = None`</bold>
266
212
 
@@ -268,7 +214,10 @@ Max tokens defines the maximum number of tokens in the generated response. Token
268
214
 
269
215
  By default, the agent will follow the default max_tokens of the model, but you can specify the max token to limit the length of the generated output.
270
216
 
271
- ### Maximum Execution Time
217
+
218
+ <hr />
219
+
220
+ **Maximum Execution Time**
272
221
 
273
222
  `[var]`<bold>`max_execution_times: Optional[int] = None`</bold>
274
223
 
@@ -276,7 +225,10 @@ The maximum amount of wall clock time to spend in the execution loop.
276
225
 
277
226
  By default, the agent will follow the default setting of the model.
278
227
 
279
- ### Maximum RPM (Requests Per Minute)
228
+
229
+ <hr />
230
+
231
+ **Maximum RPM (Requests Per Minute)**
280
232
 
281
233
  `[var]`<bold>`max_rpm: Optional[int] = None`</bold>
282
234
 
@@ -284,7 +236,9 @@ The maximum number of requests that the agent can send to the LLM.
284
236
 
285
237
  By default, the agent will follow the default setting of the model. When the value is given, we let the model sleep for 60 seconds when the number of executions exceeds the maximum requests per minute.
286
238
 
287
- ### Other LLM Configuration
239
+ <hr />
240
+
241
+ **Other LLM Configuration**
288
242
 
289
243
  `[var]`<bold>`llm_config: Optional[Dict[str, Any]] = None`</bold>
290
244
 
@@ -326,12 +280,84 @@ print(agent.llm)
326
280
  # max_completion_tokens=10000,
327
281
  # )
328
282
  ```
283
+ <hr >
284
+
285
+ ## Callbacks
286
+
287
+ `[var]`<bold>`callbacks: Optional[List[Callable]] = None`</bold>
288
+
289
+ You can add callback functions that the agent will run after executing any task.
290
+
291
+ By default, raw response from the agent will be added to the arguments of the callback function.
292
+
293
+ e.g. Format a response after executing the task:
294
+
295
+ ```python
296
+ import json
297
+ import versionhq as vhq
298
+ from typing import Dict, Any
299
+
300
+
301
+ def format_response(res: str = None) -> str | Dict[str, Any]:
302
+ try:
303
+ r = json.dumps(eval(res))
304
+ formatted_res = json.loads(r)
305
+ return formatted_res
306
+ except:
307
+ return res
308
+
309
+ agent = vhq.Agent(
310
+ role="Marketing Analyst",
311
+ goal="Coping with increased price competition in saturated markets.",
312
+ callbacks=[format_response]
313
+ )
314
+ ```
315
+
316
+ <hr />
317
+
318
+ **Multiple callbacks to call**
319
+
320
+ The callback functions are called in order of the list index referring to the task response and response from the previous callback functions by default.
321
+
322
+ e.g. Validate an initial response from the assigned agent, and format the response.
323
+
324
+ ```python
325
+ import json
326
+ from typing import Dict, Any
327
+ import versionhq as vhq
328
+
329
+ def assessment(res: str) -> str:
330
+ try:
331
+ sub_agent = vhq.Agent(role="Validator", goal="Validate the given solutions.")
332
+ task = vhq.Task(
333
+ description=f"Assess the given solution based on feasibilities and fits to client's strategies, then refine the solution if necessary.\nSolution: {res}"
334
+ )
335
+ r = task.sync_execute(agent=sub_agent)
336
+ return r.raw
337
+
338
+ except:
339
+ return res
340
+
341
+ def format_response(res: str = None) -> str | Dict[str, Any]:
342
+ try:
343
+ r = json.dumps(eval(res))
344
+ formatted_res = json.loads(r)
345
+ return formatted_res
346
+ except:
347
+ return res
348
+
349
+ agent = vhq.Agent(
350
+ role="Marketing Analyst",
351
+ goal="Build solutions to address increased price competition in saturated markets",
352
+ callbacks=[assessment, format_response] # add multiple funcs as callbacks - executed in order of index
353
+ )
354
+ ```
329
355
 
330
356
  <hr />
331
357
 
332
- ## Knowledge
358
+ ## Building Knowledge
333
359
 
334
- ### Knowledge Sources
360
+ **Knowlege Source**
335
361
 
336
362
  `[var]`<bold>`knowledge_sources: Optional[List[KnowledgeSource]] = None`</bold>
337
363
 
@@ -373,9 +399,9 @@ assert "gold" in res.raw == True
373
399
 
374
400
  <hr />
375
401
 
376
- ## Memory
402
+ ## Accessing Memories
377
403
 
378
- ### Store task execution results in memory
404
+ Store task execution results in memory
379
405
 
380
406
  `[var]`<bold>`use_memory: bool = False`</bold>
381
407
 
@@ -397,7 +423,9 @@ print(agent.long_term_memory)
397
423
  # returns LongTermMemory object.
398
424
  ```
399
425
 
400
- ### RAG Storage
426
+ <hr />
427
+
428
+ **RAG Storage**
401
429
 
402
430
  When the agent is not given any `memory_config` values, they will create `RAGStorage` to store memory:
403
431
 
@@ -418,7 +446,7 @@ MEM0 Storage
418
446
 
419
447
  ## Utilities
420
448
 
421
- ### Model configuration
449
+ **Model configuration**
422
450
 
423
451
  `[var]`<bold>`config: Optional[Dict[str, Any]] = None`</bold>
424
452
 
@@ -450,7 +478,7 @@ agent = vhq.Agent(
450
478
 
451
479
  <hr />
452
480
 
453
- ### Updating model values
481
+ **Updating existing agents**
454
482
 
455
483
  `[class method]`<bold>`update(self, **kwargs) -> Self`</bold>
456
484
 
@@ -9,7 +9,7 @@ tags:
9
9
 
10
10
  A class to store and manage information for individual tasks, including their assignment to agents or agent networks, and dependencies via a node-based system that tracks conditions and status.
11
11
 
12
- Ref. Node / Edge / TaskGraph class
12
+ Ref. Node / Edge / <a href="/core/task-graph">TaskGraph</a> class
13
13
 
14
14
  <hr />
15
15
 
@@ -284,6 +284,7 @@ Context can consist of `Task` objects, `TaskOutput` objects, plain text `strings
284
284
 
285
285
  In this scenario, `sub_task_2` executes before the main task. Its string output is then incorporated into the main task's context prompt on top of other context before the main task is executed.
286
286
 
287
+ <hr>
287
288
 
288
289
  ## Executing
289
290
 
@@ -298,7 +299,6 @@ import versionhq as vhq
298
299
 
299
300
  task = vhq.Task(
300
301
  description="return the output following the given prompt.",
301
- response_fields=[vhq.ResponseField(title="test1", data_type=str, required=True)],
302
302
  allow_delegation=True
303
303
  )
304
304
  task.execute()
@@ -308,47 +308,169 @@ assert "vhq-Delegated-Agent" in task.processed_agents # delegated agent
308
308
  assert task.delegations ==1
309
309
  ```
310
310
 
311
+ <hr>
311
312
 
312
- <!--
313
+ **SYNC - ASYNC**
313
314
 
314
- ## Callbacks
315
- callback: Optional[Callable] = Field(default=None, description="callback to be executed after the task is completed.")
316
- callback_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict, description="kwargs for the callback when the callback is callable")
315
+ `[var]`<bold>`type: bool = False`</bold>
317
316
 
317
+ You can specify whether the task will be executed asynchronously.
318
318
 
319
- ### tools
320
- tools: Optional[List[ToolSet | Tool | Any]] = Field(default_factory=list, description="tools that the agent can use aside from their tools")
321
- can_use_agent_tools: bool = Field(default=False, description="whether the agent can use their own tools when executing the task")
322
- tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
319
+ ```python
320
+ import versionhq as vhq
323
321
 
322
+ task = vhq.Task(
323
+ description="Return a word: 'test'",
324
+ type=vhq.TaskExecutionType.ASYNC # default: vhq.TaskExecutionType.SYNC
325
+ )
324
326
 
327
+ from unittest.mock import patch
328
+ with patch.object(vhq.Agent, "execute_task", return_value="test") as execute:
329
+ res = task.execute()
330
+ assert res.raw == "test"
331
+ execute.assert_called_once_with(task=task, context=None, task_tools=list())
332
+ ```
325
333
 
334
+ <hr>
326
335
 
327
- ## Executing tasks
328
- EXECUTION type
336
+ **Using tools**
329
337
 
330
- ### Sync
338
+ `[var]`<bold>`tools: Optional[List[ToolSet | Tool | Any]] = None`</bold>
331
339
 
332
- <hr />
340
+ `[var]`<bold>`tool_res_as_final: bool = False`</bold>
333
341
 
334
- ### Async
335
342
 
336
- <hr />
343
+ Tasks can directly store tools explicitly called by the agent.
337
344
 
338
- ### Assigning agents
345
+ If the results from the tool should be the final results, set `tool_res_as_final` True.
339
346
 
340
- <hr />
347
+ This will allow the agent to store the tool results in the `tool_output` field of `TaskOutput` object.
341
348
 
342
- ### Context
343
349
 
350
+ ```python
351
+ import versionhq as vhq
352
+ from typing import Callable
353
+
354
+ def random_func(message: str) -> str:
355
+ return message + "_demo"
356
+
357
+ tool = vhq.Tool(name="tool", func=random_func)
358
+ tool_set = vhq.ToolSet(tool=tool, kwargs=dict(message="empty func"))
359
+ task = vhq.Task(
360
+ description="execute the given tools",
361
+ tools=[tool_set,], # stores tools
362
+ tool_res_as_final=True, # stores tool results in TaskOutput object
363
+ )
364
+
365
+ res = task.execute()
366
+ assert res.tool_output == "empty func_demo"
367
+ ```
368
+
369
+ Ref. <a href="/core/tool">Tool</a> class / <a href="/core/task/task-output">TaskOutput</a> class
370
+
371
+ <hr>
372
+
373
+ **Using agents' tools**
374
+
375
+ `[var]`<bold>`can_use_agent_tools: bool = True`</bold>
376
+
377
+ Tasks can explicitly stop/start using agent tools on top of the tools stored in the task object.
378
+
379
+ ```python
380
+ import versionhq as vhq
381
+
382
+ simple_tool = vhq.Tool(name="simple tool", func=lambda x: "simple func")
383
+ agent = vhq.Agent(role="demo", goal="execute tools", tools=[simple_tool,])
384
+ task = vhq.Task(
385
+ description="execute tools",
386
+ can_use_agent_tools=True, # Flagged
387
+ tool_res_as_final=True
388
+ )
389
+ res = task.execute(agent=agent)
390
+ assert res.tool_output == "simple func"
391
+ ```
344
392
 
393
+ <hr>
394
+
395
+ ## Callback
396
+
397
+ `[var]`<bold>`callback: Optional[Callable] = None`</bold>
398
+
399
+ `[var]`<bold>`callback_kwargs: Optional[Dict[str, Any]] = dict()`</bold>
400
+
401
+ After executing the task, you can run a `callback` function with `callback_kwargs` and task output as parameters.
402
+
403
+ Callback results will be stored in `callback_output` filed of the `TaskOutput` object.
404
+
405
+ ```python
406
+ import versionhq as vhq
407
+
408
+ def callback_func(condition: str, test1: str):
409
+ return f"Result: {test1}, condition added: {condition}"
410
+
411
+ task = vhq.Task(
412
+ description="return the output following the given prompt.",
413
+ callback=callback_func,
414
+ callback_kwargs=dict(condition="demo for pytest")
415
+ )
416
+ res = task.execute()
417
+
418
+ assert res and isinstance(res, vhq.TaskOutput)
419
+ assert res.task_id is task.id
420
+ assert "demo for pytest" in res.callback_output
421
+ ```
422
+
423
+ <hr>
345
424
 
346
425
  ## Evaluating
347
426
 
348
- should_evaluate: bool = Field(default=False, description="True to run the evaluation flow")
349
- eval_criteria: Optional[List[str]] = Field(default_factory=list, description="criteria to evaluate the outcome. i.e., fit to the brand tone")
427
+ `[var]`<bold>`should_evaluate: bool = False`</bold>
428
+
429
+ `[var]`<bold>`eval_criteria: Optional[List[str]] = list()`</bold>
430
+
431
+ You can turn on customized evaluations using the given criteria.
432
+
433
+ Refer <a href="/core/task/task-output">TaskOutput</a> class for details.
434
+
435
+ <hr>
436
+
437
+
438
+ ## Reference
439
+
440
+ ### Variables
441
+
442
+ | <div style="width:160px">**Variable**</div> | **Data Type** | **Default** | **Nullable** | **Description** |
443
+ | :--- | :--- | :--- | :--- | :--- |
444
+ | **`id`** | UUID | uuid.uuid4() | False | Stores task `id` as an identifier. |
445
+ | **`name`** | Optional[str] | None | True | Stores a task name (Inherited as `node` identifier if the task is dependent) |
446
+ | **`description`** | str | None | False | Required field to store a concise task description |
447
+ | **`pydantic_output`** | Optional[Type[BaseModel]] | None | True | Stores pydantic custom output class for structured response |
448
+ | **`response_fields`** | Optional[List[ResponseField]] | list() | True | Stores JSON formats for stuructured response |
449
+ | **`tools`** | Optional[List[ToolSet | Tool | Any]] | None | True | Stores tools to be called when the agent executes the task. |
450
+ | **`can_use_agent_tools`** | bool | True | - | Whether to use the agent tools |
451
+ | **`tool_res_as_final`** | bool | False | - | Whether to make the tool response a final response from the agent |
452
+ | **`execution_type`** | TaskExecutionType | TaskExecutionType.SYNC | - | Sync or async execution |
453
+ | **`allow_delegation`** | bool | False | - | Whether to allow the agent to delegate the task to another agent |
454
+ | **`callback`** | Optional[Callable] | None | True | Callback function to be executed after LLM calling |
455
+ | **`callback_kwargs`** | Optional[Dict[str, Any]] | dict() | True | Args for the callback function (if any)|
456
+ | **`should_evaluate`** | bool | False | - | Whether to evaluate the task output using eval criteria |
457
+ | **`eval_criteria`** | Optional[List[str]] | list() | True | Evaluation criteria given by the human client |
458
+ | **`processed_agents`** | Set[str] | set() | True | [Ops] Stores roles of the agents executed the task |
459
+ | **`tool_errors`** | int | 0 | True | [Ops] Stores number of tool errors |
460
+ | **`delegation`** | int | 0 | True | [Ops] Stores number of agent delegations |
461
+ | **`output`** | Optional[TaskOutput] | None | True | [Ops] Stores `TaskOutput` object after the execution |
462
+
463
+
464
+ ### Class Methods
465
+
466
+ | <div style="width:120px">**Method**</div> | <div style="width:300px">**Params**</div> | **Returns** | **Description** |
467
+ | :--- | :--- | :--- | :--- |
468
+ | **`execute`** | <p>type: TaskExecutionType = None<br>agent: Optional["vhq.Agent"] = None<br>context: Optional[Any] = None</p> | InstanceOf[`TaskOutput`] or None (error) | A main method to handle task execution. Auto-build an agent when the agent is not given. |
350
469
 
351
470
 
352
- ## Recording
471
+ ### Properties
353
472
 
354
- output: Optional[TaskOutput] = Field(default=None, description="store the final task output in TaskOutput class") -->
473
+ | <div style="width:120px">**Property**</div> | **Returns** | **Description** |
474
+ | :--- | :--- | :--- |
475
+ | **`key`** | str | Returns task key based on its description and output format. |
476
+ | **`summary`** | str | Returns a summary of the task based on its id, description and tools. |
@@ -7,7 +7,7 @@ tags:
7
7
 
8
8
  <class>`class` versionhq.task.model.<bold>ResponseField<bold></class>
9
9
 
10
- A Pydantic class to store response formats to create JSON response schema.
10
+ A Pydantic class to store response formats to generate a structured response in JSON.
11
11
 
12
12
  <hr/>
13
13
 
@@ -105,7 +105,7 @@ Agents can handle **one layer** of nested items usign `properties` and `items` f
105
105
 
106
106
  We highly recommend to use `gemini-x` or `gpt-x` to get stable results.
107
107
 
108
- ### List with Object
108
+ ### Object in List
109
109
 
110
110
  ```python
111
111
  import versionhq as vhq
@@ -129,7 +129,7 @@ list_with_objects = vhq.ResponseField(
129
129
 
130
130
  <hr />
131
131
 
132
- ### List with List
132
+ ### List in List
133
133
 
134
134
  ```python
135
135
  import versionhq as vhq
@@ -150,7 +150,7 @@ list_with_list = vhq.ResponseField(
150
150
 
151
151
  <hr />
152
152
 
153
- ### Object with List
153
+ ### List in Object
154
154
 
155
155
  ```python
156
156
  import versionhq as vhq
@@ -173,7 +173,7 @@ dict_with_list = vhq.ResponseField(
173
173
 
174
174
  <hr />
175
175
 
176
- ### Object with Object
176
+ ### Object in Object
177
177
 
178
178
  ```python
179
179
  import versionhq as vhq
@@ -23,7 +23,7 @@ The following example demonstrates a simple concept of a `supervising` agent net
23
23
  You can define nodes and edges mannually by creating nodes from tasks, and defining edges.
24
24
 
25
25
 
26
- ### Generating TaskGraph
26
+ ### Generating
27
27
 
28
28
  ```python
29
29
  import versionhq as vhq
@@ -57,7 +57,7 @@ assert critical_path and duration and paths
57
57
  ```
58
58
 
59
59
 
60
- ### Activating TaskGraph
60
+ ### Activating
61
61
 
62
62
  Calling `.activate()` begins execution of the graph's nodes, respecting dependencies [`dependency-met`] and prioritizing the critical path.
63
63