versionhq 1.2.4.14__tar.gz → 1.2.4.16__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/.github/workflows/publish.yml +8 -8
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/.gitignore +1 -0
- versionhq-1.2.4.16/.python-version +1 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/PKG-INFO +2 -2
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/llm/index.md +2 -2
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/task/reference.md +2 -1
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/pyproject.toml +2 -2
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/requirements.txt +0 -1
- versionhq-1.2.4.16/requirements_tool.txt +4 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/__init__.py +3 -3
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_prompt/model.py +27 -26
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent/model.py +6 -7
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/llm/llm_vars.py +14 -3
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/llm/model.py +50 -18
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task/model.py +1 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/gpt/cua.py +14 -21
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq.egg-info/PKG-INFO +2 -2
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq.egg-info/SOURCES.txt +3 -2
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq.egg-info/requires.txt +1 -1
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/llm/llm_connection_test.py +3 -2
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/llm/llm_test.py +3 -7
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/task/task_test.py +9 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/tool/gpt_test.py +10 -6
- versionhq-1.2.4.16/tests/tool/playwright_test.py +13 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/uv.lock +360 -324
- versionhq-1.2.4.14/.python-version +0 -1
- versionhq-1.2.4.14/runtime.txt +0 -2
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/.env.sample +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/.github/workflows/deploy_docs.yml +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/.github/workflows/run_tests.yml +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/.pre-commit-config.yaml +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/LICENSE +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/README.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/SECURITY.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/db/preprocess.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/CNAME +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/_logos/favicon.ico +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/_logos/logo192.png +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/agent/config.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/agent/index.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/agent/task-handling.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/agent-network/config.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/agent-network/form.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/agent-network/index.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/agent-network/ref.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/knowledge.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/memory.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/rag-tool.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/task/evaluation.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/task/index.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/task/response-field.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/task/task-execution.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/task/task-output.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/task/task-strc-response.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/task-graph/index.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/core/tool.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/index.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/quickstart.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/stylesheets/main.css +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/docs/tags.md +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/mkdocs.yml +0 -0
- /versionhq-1.2.4.14/requirements-dev.txt → /versionhq-1.2.4.16/requirements_dev.txt +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/setup.cfg +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_prompt/auto_feedback.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_prompt/constants.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/convert_img_url.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/handle_directory.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/is_valid_enum.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/is_valid_url.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/llm_as_a_judge.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/_utils/vars.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent/inhouse_agents.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent/rpm_controller.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent_network/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent_network/formation.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/agent_network/model.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/knowledge/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/knowledge/_utils.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/knowledge/embedding.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/knowledge/model.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/knowledge/source.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/knowledge/source_docling.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/knowledge/storage.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/memory/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/memory/contextual_memory.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/memory/model.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/storage/base.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/storage/mem0_storage.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/storage/rag_storage.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/storage/utils.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task/TEMPLATES/Description.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task/evaluation.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task/structured_response.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task_graph/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task_graph/colors.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task_graph/draft.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/task_graph/model.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/composio/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/composio/model.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/composio/params.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/gpt/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/gpt/_enum.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/gpt/file_search.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/gpt/web_search.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/rag_tool.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/_prompt/auto_feedback_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/_prompt/prompt_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/_sample/sample.csv +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/_sample/sample.json +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/_sample/sample.mp3 +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/_sample/screenshot.png +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/agent/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/agent/agent_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/agent/doc_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/agent_network/Prompts/Demo_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/agent_network/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/agent_network/agent_network_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/agent_network/doc_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/cli/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/clients/customer_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/clients/product_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/conftest.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/doc_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/formation_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/knowledge/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/knowledge/knowledge_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/knowledge/mock_report_compressed.pdf +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/llm/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/memory/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/memory/memory_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/task/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/task/doc_eval_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/task/doc_taskoutput_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/task/doc_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/task/eval_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/task_graph/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/task_graph/doc_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/task_graph/task_graph_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/tool/__init__.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/tool/composio_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/tool/doc_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/tool/rag_tool_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/tool/tool_test.py +0 -0
- {versionhq-1.2.4.14 → versionhq-1.2.4.16}/tests/usecase_test.py +0 -0
@@ -14,17 +14,20 @@ jobs:
|
|
14
14
|
|
15
15
|
steps:
|
16
16
|
- uses: actions/checkout@v4
|
17
|
-
|
18
|
-
|
19
|
-
|
17
|
+
- uses: actions/setup-python@v5
|
18
|
+
with:
|
19
|
+
python-version: 3.13
|
20
|
+
- uses: astral-sh/setup-uv@v5
|
20
21
|
with:
|
21
|
-
version: "
|
22
|
+
version: "latest"
|
23
|
+
python-version: 3.13.2
|
22
24
|
|
23
25
|
- name: Build release distributions
|
24
26
|
run: |
|
27
|
+
uv pip install --python=3.13.2 pip
|
25
28
|
uv venv
|
26
29
|
source .venv/bin/activate
|
27
|
-
uv pip install --upgrade pip
|
30
|
+
uv pip install --upgrade pip
|
28
31
|
uv pip install -r requirements.txt
|
29
32
|
uv build --sdist --wheel
|
30
33
|
|
@@ -59,6 +62,3 @@ jobs:
|
|
59
62
|
packages-dir: dist/
|
60
63
|
repository-url: https://upload.pypi.org/legacy/
|
61
64
|
verbose: true
|
62
|
-
# user: krik8235
|
63
|
-
# user: __token__
|
64
|
-
# password: ${{ secrets.PYPI_API_TOKEN }}
|
@@ -0,0 +1 @@
|
|
1
|
+
3.13.2
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.4.
|
3
|
+
Version: 1.2.4.16
|
4
4
|
Summary: Autonomous agent networks for task automation with multi-step reasoning.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -77,7 +77,7 @@ Provides-Extra: tools
|
|
77
77
|
Requires-Dist: html2text>=2024.2.26; extra == "tools"
|
78
78
|
Requires-Dist: sec-api>=1.0.28; extra == "tools"
|
79
79
|
Requires-Dist: pytest-playwright>=0.7.0; extra == "tools"
|
80
|
-
Requires-Dist: selenium>=4.
|
80
|
+
Requires-Dist: selenium>=4.30.0; extra == "tools"
|
81
81
|
Provides-Extra: torch
|
82
82
|
Requires-Dist: torch>=2.6.0; extra == "torch"
|
83
83
|
Requires-Dist: torchvision>=0.21.0; extra == "torch"
|
@@ -11,7 +11,6 @@ A Pydantic class to store LLM objects and its task handling rules.
|
|
11
11
|
|
12
12
|
You can specify a model and integration platform from the list. Else, we'll use `gemini` or `gpt` via `LiteLLM` by default.
|
13
13
|
|
14
|
-
|
15
14
|
**List of available models**
|
16
15
|
|
17
16
|
```python
|
@@ -26,10 +25,11 @@ You can specify a model and integration platform from the list. Else, we'll use
|
|
26
25
|
"o1-preview",
|
27
26
|
],
|
28
27
|
"gemini": [
|
29
|
-
"gemini/gemini-2.
|
28
|
+
"gemini/gemini-2.5-pro-exp-03-25",
|
30
29
|
"gemini/gemini-2.0-flash",
|
31
30
|
"gemini/gemini-2.0-flash-thinking-exp",
|
32
31
|
"gemini/gemini-2.0-flash-lite-preview-02-05",
|
32
|
+
"gemini/gemini-2.0-flash-exp",
|
33
33
|
],
|
34
34
|
"anthropic": [
|
35
35
|
"claude-3-7-sonnet-latest",
|
@@ -11,7 +11,8 @@
|
|
11
11
|
| **`tools`** | Optional[List[Any]] | None | Tools, tool sets, or RAG tools |
|
12
12
|
| **`can_use_agent_tools`** | bool | True | Whether to use the agent tools |
|
13
13
|
| **`tool_res_as_final`** | bool | False | Whether to make a tool output as a final response from the agent |
|
14
|
-
| **`
|
14
|
+
| **`is_multimodal`** | bool | False | Whether to handle multimodals as main task - audio/image/video |
|
15
|
+
| **`image`** | Optional[str] | None | Absolute file path or URL to the image file (either for prompt context or main task) |
|
15
16
|
| **`file`** | Optional[str] | None | Absolute file path or URL to the file |
|
16
17
|
| **`audio`** | Optional[str] | None | Absolute file path or URL to the audio file |
|
17
18
|
| **`should_test_run`** | bool | False | Whether to turn on auto-feedback learning |
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.2.4.
|
18
|
+
version = "1.2.4.16"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "Autonomous agent networks for task automation with multi-step reasoning."
|
21
21
|
readme = "README.md"
|
@@ -82,7 +82,7 @@ tools = [
|
|
82
82
|
"html2text>=2024.2.26",
|
83
83
|
"sec-api>=1.0.28",
|
84
84
|
"pytest-playwright>=0.7.0",
|
85
|
-
"selenium>=4.
|
85
|
+
"selenium>=4.30.0",
|
86
86
|
]
|
87
87
|
torch = [
|
88
88
|
"torch>=2.6.0",
|
@@ -10,7 +10,7 @@ load_dotenv(override=True)
|
|
10
10
|
from versionhq.agent.model import Agent
|
11
11
|
from versionhq.agent_network.model import AgentNetwork, Formation, Member, TaskHandlingProcess
|
12
12
|
from versionhq.llm.model import LLM
|
13
|
-
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODEL_PARAMS, PROVIDERS,
|
13
|
+
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODEL_PARAMS, PROVIDERS, MODELS
|
14
14
|
from versionhq.clients.customer.model import Customer
|
15
15
|
from versionhq.clients.product.model import Product, ProductProvider
|
16
16
|
from versionhq.clients.workflow.model import MessagingWorkflow, MessagingComponent
|
@@ -35,7 +35,7 @@ from versionhq.agent_network.formation import form_agent_network
|
|
35
35
|
from versionhq.task_graph.draft import workflow
|
36
36
|
|
37
37
|
|
38
|
-
__version__ = "1.2.4.
|
38
|
+
__version__ = "1.2.4.16"
|
39
39
|
__all__ = [
|
40
40
|
"Agent",
|
41
41
|
|
@@ -48,7 +48,7 @@ __all__ = [
|
|
48
48
|
"LLM_CONTEXT_WINDOW_SIZES",
|
49
49
|
"MODEL_PARAMS",
|
50
50
|
"PROVIDERS",
|
51
|
-
"
|
51
|
+
"MODELS",
|
52
52
|
|
53
53
|
"Customer",
|
54
54
|
"Product",
|
@@ -14,7 +14,6 @@ class Prompt:
|
|
14
14
|
agent: Any = None
|
15
15
|
context: Any = None
|
16
16
|
|
17
|
-
|
18
17
|
def __init__(self, task, agent, context):
|
19
18
|
from versionhq.agent.model import Agent
|
20
19
|
from versionhq.task.model import Task
|
@@ -32,22 +31,24 @@ class Prompt:
|
|
32
31
|
output_prompt = ""
|
33
32
|
output_formats_to_follow = dict()
|
34
33
|
|
35
|
-
if self.task.
|
36
|
-
if
|
37
|
-
|
38
|
-
|
39
|
-
|
34
|
+
if self.task.is_multimodal == False:
|
35
|
+
if self.task.response_schema:
|
36
|
+
if isinstance(self.task.response_schema, list):
|
37
|
+
for item in self.task.response_schema:
|
38
|
+
if isinstance(item, ResponseField):
|
39
|
+
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
40
40
|
|
41
|
-
|
42
|
-
|
43
|
-
|
41
|
+
elif issubclass(self.task.response_schema, BaseModel):
|
42
|
+
for k, v in self.task.response_schema.model_fields.items():
|
43
|
+
output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
|
44
44
|
|
45
|
-
|
46
|
-
Ref. Output image: {output_formats_to_follow}
|
47
|
-
|
48
|
-
|
49
|
-
output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
|
45
|
+
output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
46
|
+
Ref. Output image: {output_formats_to_follow}"""
|
47
|
+
else:
|
48
|
+
output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
|
50
49
|
|
50
|
+
else:
|
51
|
+
output_prompt = "Return your response in concise manner."
|
51
52
|
return dedent(output_prompt)
|
52
53
|
|
53
54
|
|
@@ -98,19 +99,20 @@ Ref. Output image: {output_formats_to_follow}
|
|
98
99
|
|
99
100
|
content_messages = {}
|
100
101
|
|
101
|
-
if self.task.
|
102
|
-
|
103
|
-
|
104
|
-
|
102
|
+
if self.task.is_multimodal == False:
|
103
|
+
if self.task.image:
|
104
|
+
img_url = convert_img_url(self.task.image)
|
105
|
+
if img_url:
|
106
|
+
content_messages.update({ "type": "image_url", "image_url": { "url": img_url }})
|
105
107
|
|
106
|
-
|
107
|
-
|
108
|
-
|
108
|
+
if self.task.file:
|
109
|
+
if is_valid_url(self.task.file):
|
110
|
+
content_messages.update({ "type": "image_url", "image_url": self.file })
|
109
111
|
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
112
|
+
if self.task.audio and self.agent.llm.provider == "gemini":
|
113
|
+
audio_bytes = Path(self.task.audio).read_bytes()
|
114
|
+
encoded_data = base64.b64encode(audio_bytes).decode("utf-8")
|
115
|
+
content_messages.update({ "type": "image_url", "image_url": "data:audio/mp3;base64,{}".format(encoded_data)})
|
114
116
|
|
115
117
|
return content_messages
|
116
118
|
|
@@ -188,7 +190,6 @@ Ref. Output image: {output_formats_to_follow}
|
|
188
190
|
# else:
|
189
191
|
# user_prompt = self.agent._use_trained_data(user_prompt=user_prompt)
|
190
192
|
|
191
|
-
|
192
193
|
content_prompt = self._format_content_prompt()
|
193
194
|
|
194
195
|
messages = []
|
@@ -356,12 +356,9 @@ class Agent(BaseModel):
|
|
356
356
|
response_format: Optional[Dict[str, Any]] = None,
|
357
357
|
tools: Optional[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = None,
|
358
358
|
tool_res_as_final: bool = False,
|
359
|
+
file: str = None, # absolute path to the content file (for multimodal use)
|
359
360
|
) -> Tuple[str, UsageMetrics]:
|
360
|
-
"""
|
361
|
-
Create formatted prompts using the developer prompt and the agent's backstory, then call the base model.
|
362
|
-
- Execute the task up to `self.max_retry_limit` times in case of receiving an error or empty response.
|
363
|
-
- Pass the task_tools to the model to let them execute.
|
364
|
-
"""
|
361
|
+
"""Calls LLM."""
|
365
362
|
|
366
363
|
task_execution_counter = 0
|
367
364
|
iterations = 0
|
@@ -375,10 +372,10 @@ class Agent(BaseModel):
|
|
375
372
|
Logger(**self._logger_config, filename=self.key).log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
|
376
373
|
|
377
374
|
if tool_res_as_final:
|
378
|
-
raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
|
375
|
+
raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True, file=file)
|
379
376
|
usage.record_token_usage(*self.func_calling_llm._usages)
|
380
377
|
else:
|
381
|
-
raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
|
378
|
+
raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools, file=file)
|
382
379
|
usage.record_token_usage(*self.llm._usages)
|
383
380
|
|
384
381
|
task_execution_counter += 1
|
@@ -582,6 +579,7 @@ class Agent(BaseModel):
|
|
582
579
|
raw_response = ""
|
583
580
|
user_prompt, dev_prompt = "", ""
|
584
581
|
usage = UsageMetrics(id=task.id)
|
582
|
+
file = task.audio if task.is_multimodal and task.audio else task.image if task.is_multimodal and task.image else task.file if task.is_multimodal and task.file else None
|
585
583
|
|
586
584
|
if self.max_rpm and self._rpm_controller:
|
587
585
|
self._rpm_controller._reset_request_count()
|
@@ -600,6 +598,7 @@ class Agent(BaseModel):
|
|
600
598
|
response_format=task._structure_response_format(model_provider=self.llm.provider),
|
601
599
|
tools=tools,
|
602
600
|
tool_res_as_final=task.tool_res_as_final,
|
601
|
+
file=file,
|
603
602
|
)
|
604
603
|
|
605
604
|
except Exception as e:
|
@@ -28,14 +28,13 @@ PROVIDERS = {
|
|
28
28
|
"HF_ENDPOINT": "HF_ENDPOINT",
|
29
29
|
},
|
30
30
|
"azure": {
|
31
|
-
"api_base": "
|
31
|
+
"api_base": "AZURE_OPENAI_ENDPOINT_MODEL_NAME",
|
32
32
|
"api_key": "AZURE_OPENAI_API_KEY",
|
33
33
|
"api_version": "AZURE_OPENAI_API_VERSION",
|
34
34
|
},
|
35
35
|
"azure_ai": {
|
36
36
|
"api_key": "AZURE_AI_API_KEY",
|
37
37
|
"base_url": "AZURE_AI_API_BASE",
|
38
|
-
|
39
38
|
}
|
40
39
|
}
|
41
40
|
|
@@ -47,7 +46,7 @@ ENDPOINTS = [
|
|
47
46
|
|
48
47
|
|
49
48
|
# Resaoning and text generation models
|
50
|
-
|
49
|
+
MODELS = {
|
51
50
|
"openai": [
|
52
51
|
"gpt-4.5-preview-2025-02-27",
|
53
52
|
"gpt-4",
|
@@ -59,6 +58,7 @@ TEXT_MODELS = {
|
|
59
58
|
"o1-preview",
|
60
59
|
],
|
61
60
|
"gemini": [
|
61
|
+
"gemini/gemini-2.5-pro-exp-03-25",
|
62
62
|
"gemini/gemini-2.0-flash",
|
63
63
|
"gemini/gemini-2.0-flash-thinking-exp",
|
64
64
|
"gemini/gemini-2.0-flash-lite-preview-02-05",
|
@@ -96,6 +96,10 @@ TEXT_MODELS = {
|
|
96
96
|
"bedrock/cohere.command-light-text-v14",
|
97
97
|
],
|
98
98
|
"azure": [
|
99
|
+
"azure/whisper",
|
100
|
+
"azure/whisper-2",
|
101
|
+
"azure/gpt-4o-mini-audio-preview",
|
102
|
+
|
99
103
|
"azure/DeepSeek-V3",
|
100
104
|
"azure/DeepSeek-R1",
|
101
105
|
"azure/Llama-3.3-70B-Instruct",
|
@@ -163,6 +167,13 @@ TEXT_MODELS = {
|
|
163
167
|
}
|
164
168
|
|
165
169
|
|
170
|
+
AUDIO_TO_TEXT_MODELS = [
|
171
|
+
"azure/whisper",
|
172
|
+
"azure/whisper-2",
|
173
|
+
"azure/gpt-4o-mini-audio-preview",
|
174
|
+
]
|
175
|
+
|
176
|
+
|
166
177
|
"""
|
167
178
|
Max input token size by the model.
|
168
179
|
"""
|
@@ -12,9 +12,9 @@ import litellm
|
|
12
12
|
from litellm import JSONSchemaValidationError, get_supported_openai_params, supports_response_schema
|
13
13
|
from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
|
14
14
|
|
15
|
-
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES,
|
15
|
+
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, AUDIO_TO_TEXT_MODELS, MODEL_PARAMS, PROVIDERS, ENDPOINTS
|
16
16
|
from versionhq.tool.model import Tool, ToolSet
|
17
|
-
from versionhq._utils import Logger
|
17
|
+
from versionhq._utils import Logger, UsageMetrics, ErrorType
|
18
18
|
|
19
19
|
|
20
20
|
load_dotenv(override=True)
|
@@ -115,7 +115,7 @@ class LLM(BaseModel):
|
|
115
115
|
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
116
116
|
|
117
117
|
else:
|
118
|
-
provider_model_list =
|
118
|
+
provider_model_list = MODELS.get(self.provider)
|
119
119
|
if provider_model_list:
|
120
120
|
self.model = provider_model_list[0]
|
121
121
|
self.provider = self.provider
|
@@ -127,29 +127,29 @@ class LLM(BaseModel):
|
|
127
127
|
elif self.model and self.provider is None:
|
128
128
|
model_match = [
|
129
129
|
item for item in [
|
130
|
-
[val for val in v if val == self.model][0] for k, v in
|
130
|
+
[val for val in v if val == self.model][0] for k, v in MODELS.items() if [val for val in v if val == self.model]
|
131
131
|
] if item
|
132
132
|
]
|
133
133
|
model_partial_match = [
|
134
134
|
item for item in [
|
135
|
-
[val for val in v if val.find(self.model) != -1][0] for k, v in
|
135
|
+
[val for val in v if val.find(self.model) != -1][0] for k, v in MODELS.items() if [val for val in v if val.find(self.model) != -1]
|
136
136
|
] if item
|
137
137
|
]
|
138
|
-
provider_match = [k for k, v in
|
138
|
+
provider_match = [k for k, v in MODELS.items() if k == self.model]
|
139
139
|
|
140
140
|
if model_match:
|
141
141
|
self.model = model_match[0]
|
142
|
-
self.provider = [k for k, v in
|
142
|
+
self.provider = [k for k, v in MODELS.items() if self.model in v][0]
|
143
143
|
|
144
144
|
elif model_partial_match:
|
145
145
|
self.model = model_partial_match[0]
|
146
|
-
self.provider = [k for k, v in
|
146
|
+
self.provider = [k for k, v in MODELS.items() if [item for item in v if item.find(self.model) != -1]][0]
|
147
147
|
|
148
148
|
elif provider_match:
|
149
149
|
provider = provider_match[0]
|
150
|
-
if self.
|
150
|
+
if self.MODELS.get(provider):
|
151
151
|
self.provider = provider
|
152
|
-
self.model = self.
|
152
|
+
self.model = self.MODELS.get(provider)[0]
|
153
153
|
else:
|
154
154
|
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
155
155
|
self.model = DEFAULT_MODEL_NAME
|
@@ -159,7 +159,7 @@ class LLM(BaseModel):
|
|
159
159
|
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
160
160
|
|
161
161
|
else:
|
162
|
-
provider_model_list =
|
162
|
+
provider_model_list = MODELS.get(self.provider)
|
163
163
|
if self.model not in provider_model_list:
|
164
164
|
self._logger.log(level="warning", message=f"The provided model: {self._init_model_name} is not in the list. We will assign a default model.", color="yellow")
|
165
165
|
self.model = DEFAULT_MODEL_NAME
|
@@ -232,7 +232,16 @@ class LLM(BaseModel):
|
|
232
232
|
|
233
233
|
valid_cred = {}
|
234
234
|
for k, v in cred.items():
|
235
|
-
val =
|
235
|
+
val = None
|
236
|
+
if '_MODEL_NAME' in v:
|
237
|
+
model_name = self.model.split('/')[-1] if self.model.split('/') else self.model
|
238
|
+
key = v.replace('_MODEL_NAME', f'_{model_name.replace("-", '_').replace(' ', '_').upper()}')
|
239
|
+
val = os.environ.get(key, None)
|
240
|
+
if not val:
|
241
|
+
val = os.environ.get(v.replace('_MODEL_NAME', ''), None)
|
242
|
+
else:
|
243
|
+
val = os.environ.get(v, None)
|
244
|
+
|
236
245
|
if val:
|
237
246
|
valid_cred[str(k)] = val
|
238
247
|
|
@@ -288,12 +297,12 @@ class LLM(BaseModel):
|
|
288
297
|
messages: List[Dict[str, str]],
|
289
298
|
response_format: Optional[Dict[str, Any]] = None,
|
290
299
|
tools: Optional[List[Tool | ToolSet | Any ]] = None,
|
291
|
-
config: Optional[Dict[str, Any]] =
|
292
|
-
tool_res_as_final: bool = False
|
300
|
+
config: Optional[Dict[str, Any]] = dict(),
|
301
|
+
tool_res_as_final: bool = False,
|
302
|
+
file: str = None
|
293
303
|
) -> str:
|
294
|
-
"""
|
295
|
-
|
296
|
-
"""
|
304
|
+
"""Configures and calls the LLM (chat, text generation, reasoning models)."""
|
305
|
+
|
297
306
|
litellm.drop_params = True
|
298
307
|
litellm.set_verbose = True
|
299
308
|
|
@@ -302,9 +311,32 @@ class LLM(BaseModel):
|
|
302
311
|
self._set_callbacks(self.callbacks)
|
303
312
|
|
304
313
|
try:
|
305
|
-
res
|
314
|
+
res = None
|
315
|
+
tool_res = ""
|
306
316
|
cred = self._set_credentials()
|
307
317
|
|
318
|
+
if file and self.model in AUDIO_TO_TEXT_MODELS:
|
319
|
+
params = self._create_valid_params(config=config)
|
320
|
+
audio_file = open(file, 'rb')
|
321
|
+
res = litellm.transcription(
|
322
|
+
model=self.model,
|
323
|
+
file=audio_file,
|
324
|
+
rompt=messages,
|
325
|
+
ustom_llm_provider=self.endpoint_provider,
|
326
|
+
response_format="json",
|
327
|
+
**cred
|
328
|
+
)
|
329
|
+
usage = UsageMetrics()
|
330
|
+
if res:
|
331
|
+
usage.latency = res._response_ms if hasattr(res, '_response_ms') else 0
|
332
|
+
self._usages.append(usage)
|
333
|
+
return res.text
|
334
|
+
else:
|
335
|
+
usage.record_errors(type=ErrorType.API)
|
336
|
+
self._usages.append(usage)
|
337
|
+
return None
|
338
|
+
|
339
|
+
|
308
340
|
if self.provider == "gemini":
|
309
341
|
self.response_format = { "type": "json_object" } if not tools and self.model != "gemini/gemini-2.0-flash-thinking-exp" else None
|
310
342
|
elif response_format and "json_schema" in response_format:
|
@@ -314,6 +314,7 @@ class Task(BaseModel):
|
|
314
314
|
name: Optional[str] = Field(default=None)
|
315
315
|
description: str = Field(description="Description of the actual task")
|
316
316
|
response_schema: Optional[Type[BaseModel] | List[ResponseField]] = Field(default=None, description="stores response format")
|
317
|
+
is_multimodal: bool = False
|
317
318
|
|
318
319
|
# tool usage
|
319
320
|
tools: Optional[List[Any]] = Field(default_factory=list, description="tools that the agent can use aside from their tools")
|
@@ -2,37 +2,32 @@ import base64
|
|
2
2
|
import datetime
|
3
3
|
import time
|
4
4
|
import platform
|
5
|
-
from typing import List, Dict, Any, Tuple
|
5
|
+
from typing import List, Dict, Any, Tuple, Literal, get_args
|
6
6
|
|
7
7
|
from versionhq._utils import convert_img_url
|
8
8
|
from versionhq.tool.gpt import openai_client
|
9
|
-
from versionhq.tool.gpt._enum import
|
9
|
+
from versionhq.tool.gpt._enum import GPTSizeEnum
|
10
10
|
from versionhq._utils import is_valid_enum, UsageMetrics, ErrorType, Logger, is_valid_url, handle_directory
|
11
11
|
|
12
|
-
|
13
|
-
|
12
|
+
BROWSER = Literal['chromium', 'firefox']
|
13
|
+
TYPE = Literal["computer_call_output", "computer_use_preview"]
|
14
|
+
ENV = Literal["browser", "mac", "windows", "ubuntu"]
|
14
15
|
|
15
16
|
class CUAToolSchema:
|
16
|
-
type:
|
17
|
+
type: TYPE = "computer_use_preview"
|
18
|
+
environment: ENV = "browser"
|
17
19
|
display_width: int = 1024
|
18
20
|
display_height: int = 768
|
19
|
-
environment: str = GPTCUABrowserEnum.BROWSER.value
|
20
21
|
|
21
|
-
def __init__(
|
22
|
-
self,
|
23
|
-
type: str | GPTCUATypeEnum = None,
|
24
|
-
display_width: int = None,
|
25
|
-
display_height: int = None,
|
26
|
-
environment: str | GPTCUABrowserEnum = None
|
27
|
-
):
|
22
|
+
def __init__(self, type: str = None, display_width: int = None, display_height: int = None, environment: str = None):
|
28
23
|
self.display_height = display_height if display_height else self.display_height
|
29
24
|
self.display_width = display_width if display_width else self.display_width
|
30
25
|
|
31
|
-
if type and
|
32
|
-
self.type = type
|
26
|
+
if type and type in get_args(TYPE):
|
27
|
+
self.type = type
|
33
28
|
|
34
|
-
if environment and
|
35
|
-
self.environment = environment
|
29
|
+
if environment and environment in get_args(ENV):
|
30
|
+
self.environment = environment
|
36
31
|
|
37
32
|
@property
|
38
33
|
def schema(self) -> Dict[str, Any]:
|
@@ -50,7 +45,7 @@ class GPTToolCUA:
|
|
50
45
|
user_prompt: str = None
|
51
46
|
img_url: str = None
|
52
47
|
web_url: str = "https://www.google.com"
|
53
|
-
browser:
|
48
|
+
browser: BROWSER = "firefox"
|
54
49
|
reasoning_effort: str = GPTSizeEnum.MEDIUM.value
|
55
50
|
truncation: str = "auto"
|
56
51
|
|
@@ -75,7 +70,7 @@ class GPTToolCUA:
|
|
75
70
|
):
|
76
71
|
self.user_prompt = user_prompt
|
77
72
|
self.web_url = web_url if is_valid_url(web_url) else None
|
78
|
-
self.browser = browser if browser in
|
73
|
+
self.browser = browser if browser in get_args(BROWSER) else 'chromium'
|
79
74
|
self.truncation = truncation if truncation else self.truncation
|
80
75
|
self._usage = _usage
|
81
76
|
self._response_ids = list()
|
@@ -421,8 +416,6 @@ class GPTToolCUA:
|
|
421
416
|
else:
|
422
417
|
res, _, usage = self._run()
|
423
418
|
|
424
|
-
print("res", res)
|
425
|
-
|
426
419
|
self._usage.aggregate(metrics=usage)
|
427
420
|
if not res:
|
428
421
|
usage.record_errors(type=ErrorType.API)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.4.
|
3
|
+
Version: 1.2.4.16
|
4
4
|
Summary: Autonomous agent networks for task automation with multi-step reasoning.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -77,7 +77,7 @@ Provides-Extra: tools
|
|
77
77
|
Requires-Dist: html2text>=2024.2.26; extra == "tools"
|
78
78
|
Requires-Dist: sec-api>=1.0.28; extra == "tools"
|
79
79
|
Requires-Dist: pytest-playwright>=0.7.0; extra == "tools"
|
80
|
-
Requires-Dist: selenium>=4.
|
80
|
+
Requires-Dist: selenium>=4.30.0; extra == "tools"
|
81
81
|
Provides-Extra: torch
|
82
82
|
Requires-Dist: torch>=2.6.0; extra == "torch"
|
83
83
|
Requires-Dist: torchvision>=0.21.0; extra == "torch"
|
@@ -7,9 +7,9 @@ README.md
|
|
7
7
|
SECURITY.md
|
8
8
|
mkdocs.yml
|
9
9
|
pyproject.toml
|
10
|
-
requirements-dev.txt
|
11
10
|
requirements.txt
|
12
|
-
|
11
|
+
requirements_dev.txt
|
12
|
+
requirements_tool.txt
|
13
13
|
uv.lock
|
14
14
|
.github/workflows/deploy_docs.yml
|
15
15
|
.github/workflows/publish.yml
|
@@ -169,5 +169,6 @@ tests/tool/__init__.py
|
|
169
169
|
tests/tool/composio_test.py
|
170
170
|
tests/tool/doc_test.py
|
171
171
|
tests/tool/gpt_test.py
|
172
|
+
tests/tool/playwright_test.py
|
172
173
|
tests/tool/rag_tool_test.py
|
173
174
|
tests/tool/tool_test.py
|
@@ -99,6 +99,7 @@ def _test_con_openai(simple_task, tool_task, schema_task, res_field_task):
|
|
99
99
|
|
100
100
|
def _test_con_gemini(simple_task, tool_task, schema_task, res_field_task):
|
101
101
|
llms_to_test = [
|
102
|
+
"gemini/gemini-2.5-pro-exp-03-25"
|
102
103
|
"gemini/gemini-2.0-flash",
|
103
104
|
"gemini/gemini-2.0-flash-thinking-exp",
|
104
105
|
"gemini/gemini-2.0-flash-lite-preview-02-05",
|
@@ -133,13 +134,13 @@ def _test_con_azure(simple_task, tool_task, schema_task, res_field_task):
|
|
133
134
|
llms_to_test = [
|
134
135
|
"azure/Phi-4-mini-instruct",
|
135
136
|
"azure_ai/Phi-4-mini-instruct",
|
136
|
-
"azure_ai/DeepSeek-V3"
|
137
|
+
"azure_ai/DeepSeek-V3",
|
137
138
|
]
|
138
139
|
agents = [set_agent(llm=llm) for llm in llms_to_test]
|
139
140
|
|
140
141
|
for agent in agents:
|
141
142
|
assert isinstance(agent.llm, LLM)
|
142
|
-
assert agent.llm.provider
|
143
|
+
assert "azure" in agent.llm.provider
|
143
144
|
assert agent.llm._init_model_name and agent.llm.provider and agent.llm.llm_config["max_tokens"] == agent.llm_config["max_tokens"]
|
144
145
|
|
145
146
|
res_1 = simple_task.execute(agent=agent, context="running a test")
|