versionhq 1.2.4.7__tar.gz → 1.2.4.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/.github/workflows/run_tests.yml +1 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/PKG-INFO +7 -3
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/README.md +5 -2
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/task/task-execution.md +1 -1
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/task/task-output.md +9 -4
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/task/task-strc-response.md +5 -2
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/quickstart.md +5 -3
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/pyproject.toml +2 -1
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/__init__.py +4 -4
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/logger.py +1 -1
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent/model.py +4 -4
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task/model.py +5 -3
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/gpt/_enum.py +2 -2
- versionhq-1.2.4.8/src/versionhq/tool/gpt/cua.py +295 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/gpt/file_search.py +10 -3
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/gpt/web_search.py +13 -6
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq.egg-info/PKG-INFO +7 -3
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq.egg-info/SOURCES.txt +1 -1
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq.egg-info/requires.txt +1 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/agent_network/agent_network_test.py +1 -1
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/doc_test.py +6 -4
- versionhq-1.2.4.8/tests/task/doc_taskoutput_test.py +39 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/task/doc_test.py +7 -4
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/tool/gpt_test.py +26 -12
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/uv.lock +475 -321
- versionhq-1.2.4.7/src/versionhq/tool/gpt/cup.py +0 -145
- versionhq-1.2.4.7/tests/task/doc_taskoutput_test.py +0 -36
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/.env.sample +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/.github/workflows/deploy_docs.yml +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/.github/workflows/publish.yml +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/.gitignore +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/.pre-commit-config.yaml +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/.python-version +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/LICENSE +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/SECURITY.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/db/preprocess.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/CNAME +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/_logos/favicon.ico +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/_logos/logo192.png +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/agent/config.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/agent/index.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/agent/task-handling.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/agent-network/config.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/agent-network/form.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/agent-network/index.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/agent-network/ref.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/knowledge.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/llm/index.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/memory.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/rag-tool.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/task/evaluation.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/task/index.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/task/reference.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/task/response-field.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/task-graph/index.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/core/tool.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/index.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/stylesheets/main.css +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/docs/tags.md +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/mkdocs.yml +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/requirements-dev.txt +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/requirements.txt +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/runtime.txt +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/setup.cfg +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_prompt/auto_feedback.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_prompt/constants.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_prompt/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/convert_img_url.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/is_valid_enum.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/is_valid_url.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/llm_as_a_judge.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/_utils/vars.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent/inhouse_agents.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent/rpm_controller.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent_network/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent_network/formation.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/agent_network/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/knowledge/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/knowledge/_utils.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/knowledge/embedding.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/knowledge/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/knowledge/source.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/knowledge/source_docling.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/knowledge/storage.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/llm/llm_vars.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/llm/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/memory/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/memory/contextual_memory.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/memory/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/storage/base.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/storage/mem0_storage.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/storage/rag_storage.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/storage/utils.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task/TEMPLATES/Description.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task/evaluation.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task/structured_response.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task_graph/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task_graph/colors.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task_graph/draft.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/task_graph/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/composio/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/composio/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/composio/params.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/gpt/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/rag_tool.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/_prompt/auto_feedback_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/_prompt/prompt_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/_sample/sample.csv +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/_sample/sample.json +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/_sample/sample.mp3 +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/_sample/screenshot.png +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/agent/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/agent/agent_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/agent/doc_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/agent_network/Prompts/Demo_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/agent_network/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/agent_network/doc_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/cli/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/clients/customer_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/clients/product_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/conftest.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/formation_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/knowledge/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/knowledge/knowledge_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/knowledge/mock_report_compressed.pdf +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/llm/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/llm/llm_connection_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/llm/llm_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/memory/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/memory/memory_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/task/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/task/doc_eval_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/task/eval_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/task/task_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/task_graph/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/task_graph/doc_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/task_graph/task_graph_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/tool/__init__.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/tool/composio_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/tool/doc_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/tool/rag_tool_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/tool/tool_test.py +0 -0
- {versionhq-1.2.4.7 → versionhq-1.2.4.8}/tests/usecase_test.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.4.
|
3
|
+
Version: 1.2.4.8
|
4
4
|
Summary: Autonomous agent networks for task automation with multi-step reasoning.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -77,6 +77,7 @@ Requires-Dist: pygraphviz>=1.14; extra == "pygraphviz"
|
|
77
77
|
Provides-Extra: tools
|
78
78
|
Requires-Dist: html2text>=2024.2.26; extra == "tools"
|
79
79
|
Requires-Dist: sec-api>=1.0.28; extra == "tools"
|
80
|
+
Requires-Dist: pytest-playwright>=0.7.0; extra == "tools"
|
80
81
|
Provides-Extra: torch
|
81
82
|
Requires-Dist: torch>=2.6.0; extra == "torch"
|
82
83
|
Requires-Dist: torchvision>=0.21.0; extra == "torch"
|
@@ -290,8 +291,11 @@ class CustomOutput(BaseModel):
|
|
290
291
|
test1: str
|
291
292
|
test2: list[str]
|
292
293
|
|
293
|
-
def dummy_func(message: str,
|
294
|
-
|
294
|
+
def dummy_func(message: str, **kwargs) -> str:
|
295
|
+
test1 = kwargs["test1"] if kwargs and "test1" in kwargs else ""
|
296
|
+
test2 = kwargs["test2"] if kwargs and "test2" in kwargs else ""
|
297
|
+
if test1 and test2:
|
298
|
+
return f"""{message}: {test1}, {", ".join(test2)}"""
|
295
299
|
|
296
300
|
task = vhq.Task(
|
297
301
|
description="Amazing task",
|
@@ -207,8 +207,11 @@ class CustomOutput(BaseModel):
|
|
207
207
|
test1: str
|
208
208
|
test2: list[str]
|
209
209
|
|
210
|
-
def dummy_func(message: str,
|
211
|
-
|
210
|
+
def dummy_func(message: str, **kwargs) -> str:
|
211
|
+
test1 = kwargs["test1"] if kwargs and "test1" in kwargs else ""
|
212
|
+
test2 = kwargs["test2"] if kwargs and "test2" in kwargs else ""
|
213
|
+
if test1 and test2:
|
214
|
+
return f"""{message}: {test1}, {", ".join(test2)}"""
|
212
215
|
|
213
216
|
task = vhq.Task(
|
214
217
|
description="Amazing task",
|
@@ -192,7 +192,7 @@ Callback results will be stored in `callback_output` filed of the `TaskOutput` o
|
|
192
192
|
```python
|
193
193
|
import versionhq as vhq
|
194
194
|
|
195
|
-
def callback_func(condition: str, test1: str):
|
195
|
+
def callback_func(condition: str, test1: str, **kwargs):
|
196
196
|
return f"Result: {test1}, condition added: {condition}"
|
197
197
|
|
198
198
|
task = vhq.Task(
|
@@ -22,8 +22,13 @@ class CustomOutput(BaseModel):
|
|
22
22
|
def dummy_tool():
|
23
23
|
return "dummy"
|
24
24
|
|
25
|
-
def summarize_response(message: str,
|
26
|
-
|
25
|
+
def summarize_response(message: str, **kwargs) -> str | None:
|
26
|
+
test1 = kwargs["test1"] if kwargs and "test1" in kwargs else None
|
27
|
+
test2 = kwargs["test2"] if kwargs and "test2" in kwargs else None
|
28
|
+
if test1 and test2:
|
29
|
+
return f"""{message}: {test1}, {", ".join(str(test2))}"""
|
30
|
+
else:
|
31
|
+
return None
|
27
32
|
|
28
33
|
task = vhq.Task(
|
29
34
|
description="Research a topic to teach a kid aged 6 about math.",
|
@@ -41,8 +46,8 @@ assert res.task_id == task.id
|
|
41
46
|
assert res.raw
|
42
47
|
assert res.json_dict
|
43
48
|
assert res.pydantic.test1 and res.pydantic.test2
|
44
|
-
|
45
|
-
assert
|
49
|
+
if res.callback_output:
|
50
|
+
assert "Hi! Here is the result: " in res.callback_output
|
46
51
|
assert res.tool_output is None
|
47
52
|
assert res.evaluation and isinstance(res.evaluation, vhq.Evaluation)
|
48
53
|
```
|
@@ -177,10 +177,13 @@ class Main(BaseModel):
|
|
177
177
|
main1: list[Any] # <= assume expecting to store Sub object.
|
178
178
|
main2: dict[str, Any]
|
179
179
|
|
180
|
-
|
180
|
+
|
181
|
+
def format_response(sub, **kwargs) -> Main:
|
182
|
+
main1 = kwargs["main1"] if kwargs and "main1" in kwargs else None
|
181
183
|
if main1:
|
182
184
|
main1.append(sub)
|
183
|
-
|
185
|
+
main2 = kwargs["main2"] if kwargs and "main2" in kwargs else None
|
186
|
+
main = Main(main1=main1, main2=str(main2))
|
184
187
|
return main
|
185
188
|
|
186
189
|
# 3. Executes
|
@@ -44,9 +44,11 @@ class CustomOutput(BaseModel):
|
|
44
44
|
test1: str
|
45
45
|
test2: list[str]
|
46
46
|
|
47
|
-
def dummy_func(message: str,
|
48
|
-
|
49
|
-
|
47
|
+
def dummy_func(message: str, **kwargs) -> str:
|
48
|
+
test1 = kwargs["test1"] if kwargs and "test1" in kwargs else ""
|
49
|
+
test2 = kwargs["test2"] if kwargs and "test2" in kwargs else ""
|
50
|
+
if test1 and test2:
|
51
|
+
return f"""{message}: {test1}, {", ".join(test2)}"""
|
50
52
|
|
51
53
|
agent = vhq.Agent(role="demo manager")
|
52
54
|
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.2.4.
|
18
|
+
version = "1.2.4.8"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "Autonomous agent networks for task automation with multi-step reasoning."
|
21
21
|
readme = "README.md"
|
@@ -82,6 +82,7 @@ pygraphviz = [
|
|
82
82
|
tools = [
|
83
83
|
"html2text>=2024.2.26",
|
84
84
|
"sec-api>=1.0.28",
|
85
|
+
"pytest-playwright>=0.7.0",
|
85
86
|
]
|
86
87
|
torch = [
|
87
88
|
"torch>=2.6.0",
|
@@ -25,7 +25,7 @@ from versionhq.tool.rag_tool import RagTool
|
|
25
25
|
from versionhq.tool.cache_handler import CacheHandler
|
26
26
|
from versionhq.tool.tool_handler import ToolHandler
|
27
27
|
from versionhq.tool.composio.model import ComposioBaseTool
|
28
|
-
from versionhq.tool.gpt.
|
28
|
+
from versionhq.tool.gpt.cua import GPTToolCUA, CUAToolSchema
|
29
29
|
from versionhq.tool.gpt.file_search import GPTToolFileSearch, FilterSchema
|
30
30
|
from versionhq.tool.gpt.web_search import GPTToolWebSearch
|
31
31
|
from versionhq.memory.contextual_memory import ContextualMemory
|
@@ -35,7 +35,7 @@ from versionhq.agent_network.formation import form_agent_network
|
|
35
35
|
from versionhq.task_graph.draft import workflow
|
36
36
|
|
37
37
|
|
38
|
-
__version__ = "1.2.4.
|
38
|
+
__version__ = "1.2.4.8"
|
39
39
|
__all__ = [
|
40
40
|
"Agent",
|
41
41
|
|
@@ -90,8 +90,8 @@ __all__ = [
|
|
90
90
|
"ToolHandler",
|
91
91
|
"ComposioBaseTool",
|
92
92
|
|
93
|
-
"
|
94
|
-
"
|
93
|
+
"GPTToolCUA",
|
94
|
+
"CUAToolSchema",
|
95
95
|
"GPTToolFileSearch",
|
96
96
|
"FilterSchema",
|
97
97
|
"GPTToolWebSearch",
|
@@ -51,7 +51,7 @@ class Logger(BaseModel):
|
|
51
51
|
def log(self, level: str, message: str, color="yellow"):
|
52
52
|
if self.verbose:
|
53
53
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
54
|
-
self._printer.print(f"\n{timestamp} -
|
54
|
+
self._printer.print(f"\n{timestamp} - vhq [{level.upper()}]: {message}", color=color)
|
55
55
|
|
56
56
|
self._save(level=level, message=message, filename=self.filename)
|
57
57
|
|
@@ -126,7 +126,7 @@ class Agent(BaseModel):
|
|
126
126
|
from versionhq.tool.rag_tool import RagTool
|
127
127
|
from versionhq.tool.gpt.web_search import GPTToolWebSearch
|
128
128
|
from versionhq.tool.gpt.file_search import GPTToolFileSearch
|
129
|
-
from versionhq.tool.gpt.
|
129
|
+
from versionhq.tool.gpt.cua import GPTToolCUA
|
130
130
|
|
131
131
|
if not self.tools:
|
132
132
|
return self
|
@@ -134,7 +134,7 @@ class Agent(BaseModel):
|
|
134
134
|
tool_list = []
|
135
135
|
for item in self.tools:
|
136
136
|
match item:
|
137
|
-
case RagTool() | BaseTool() |
|
137
|
+
case RagTool() | BaseTool() | GPTToolCUA() | GPTToolFileSearch() | GPTToolWebSearch():
|
138
138
|
tool_list.append(item)
|
139
139
|
|
140
140
|
case Tool():
|
@@ -433,7 +433,7 @@ class Agent(BaseModel):
|
|
433
433
|
from versionhq.tool.rag_tool import RagTool
|
434
434
|
from versionhq.tool.gpt.web_search import GPTToolWebSearch
|
435
435
|
from versionhq.tool.gpt.file_search import GPTToolFileSearch
|
436
|
-
from versionhq.tool.gpt.
|
436
|
+
from versionhq.tool.gpt.cua import GPTToolCUA
|
437
437
|
|
438
438
|
all_tools = []
|
439
439
|
if task: all_tools = task.tools + self.tools if task.can_use_agent_tools else task.tools
|
@@ -446,7 +446,7 @@ class Agent(BaseModel):
|
|
446
446
|
case RagTool():
|
447
447
|
rag_tools.append(item)
|
448
448
|
|
449
|
-
case
|
449
|
+
case GPTToolCUA() | GPTToolFileSearch() | GPTToolWebSearch():
|
450
450
|
gpt_tools.append(item)
|
451
451
|
|
452
452
|
case Tool() | BaseTool() | ToolSet():
|
@@ -18,7 +18,7 @@ from versionhq.tool.model import Tool, ToolSet, BaseTool
|
|
18
18
|
from versionhq.tool.rag_tool import RagTool
|
19
19
|
from versionhq.tool.gpt.web_search import GPTToolWebSearch
|
20
20
|
from versionhq.tool.gpt.file_search import GPTToolFileSearch
|
21
|
-
from versionhq.tool.gpt.
|
21
|
+
from versionhq.tool.gpt.cua import GPTToolCUA
|
22
22
|
from versionhq._utils import process_config, Logger, UsageMetrics, ErrorType
|
23
23
|
|
24
24
|
|
@@ -374,7 +374,7 @@ class Task(BaseModel):
|
|
374
374
|
tool_list = []
|
375
375
|
for item in self.tools:
|
376
376
|
match item:
|
377
|
-
case Tool() | ToolSet() | BaseTool() | RagTool() |
|
377
|
+
case Tool() | ToolSet() | BaseTool() | RagTool() | GPTToolCUA() | GPTToolFileSearch() | GPTToolWebSearch():
|
378
378
|
tool_list.append(item)
|
379
379
|
case type(item, callable):
|
380
380
|
tool_list.append(Tool(func=item))
|
@@ -387,6 +387,8 @@ class Task(BaseModel):
|
|
387
387
|
tool = RagTool(**item)
|
388
388
|
except:
|
389
389
|
pass
|
390
|
+
if tool:
|
391
|
+
tool_list.append(tool)
|
390
392
|
case _:
|
391
393
|
pass
|
392
394
|
self.tools = tool_list
|
@@ -713,7 +715,7 @@ class Task(BaseModel):
|
|
713
715
|
sig = inspect.signature(self.callback)
|
714
716
|
valid_keys = [param.name for param in sig.parameters.values() if param.kind == param.POSITIONAL_OR_KEYWORD]
|
715
717
|
valid_kwargs = { k: kwargs[k] if k in kwargs else None for k in valid_keys }
|
716
|
-
callback_res = self.callback(**valid_kwargs)
|
718
|
+
callback_res = self.callback(**valid_kwargs, **task_output.json_dict )
|
717
719
|
task_output.callback_output = callback_res
|
718
720
|
|
719
721
|
end_dt = datetime.datetime.now()
|
@@ -7,14 +7,14 @@ class GPTSizeEnum(str, Enum):
|
|
7
7
|
HIGH = "high"
|
8
8
|
|
9
9
|
|
10
|
-
class
|
10
|
+
class GPTCUAEnvironmentEnum(str, Enum):
|
11
11
|
BROWSER = "browser"
|
12
12
|
MAC = "mac"
|
13
13
|
WINDOWS = "windows"
|
14
14
|
UNBUNTU = "ubuntu"
|
15
15
|
|
16
16
|
|
17
|
-
class
|
17
|
+
class GPTCUATypeEnum(str, Enum):
|
18
18
|
COMPUTER_CALL_OUTPUT = "computer_call_output"
|
19
19
|
COMPUTER_USE_PREVIEW = "computer_use_preview"
|
20
20
|
|
@@ -0,0 +1,295 @@
|
|
1
|
+
import datetime
|
2
|
+
import time
|
3
|
+
from typing import List, Dict, Any, Tuple
|
4
|
+
|
5
|
+
from versionhq._utils import convert_img_url
|
6
|
+
from versionhq.tool.gpt import openai_client
|
7
|
+
from versionhq.tool.gpt._enum import GPTCUAEnvironmentEnum, GPTCUATypeEnum, GPTSizeEnum
|
8
|
+
from versionhq._utils import is_valid_enum, UsageMetrics, ErrorType, Logger, is_valid_url
|
9
|
+
|
10
|
+
|
11
|
+
allowed_browsers = ['webkit', 'chromium', 'firefox']
|
12
|
+
|
13
|
+
|
14
|
+
class CUAToolSchema:
|
15
|
+
type: str = GPTCUATypeEnum.COMPUTER_USE_PREVIEW.value
|
16
|
+
display_width: int = 1024
|
17
|
+
display_height: int = 768
|
18
|
+
environment: str = GPTCUAEnvironmentEnum.BROWSER.value
|
19
|
+
|
20
|
+
def __init__(
|
21
|
+
self,
|
22
|
+
type: str | GPTCUATypeEnum = None,
|
23
|
+
display_width: int = None,
|
24
|
+
display_height: int = None,
|
25
|
+
environment: str | GPTCUAEnvironmentEnum = None
|
26
|
+
):
|
27
|
+
self.display_height = display_height if display_height else self.display_height
|
28
|
+
self.display_width = display_width if display_width else self.display_width
|
29
|
+
|
30
|
+
if type and is_valid_enum(enum=GPTCUATypeEnum, val=type):
|
31
|
+
self.type = type.value if isinstance(type, GPTCUATypeEnum) else type
|
32
|
+
|
33
|
+
if environment and is_valid_enum(enum=GPTCUAEnvironmentEnum, val=environment):
|
34
|
+
self.environment = environment.value if isinstance(environment, GPTCUAEnvironmentEnum) else environment
|
35
|
+
|
36
|
+
self.environment = environment if environment else self.environment
|
37
|
+
|
38
|
+
|
39
|
+
@property
|
40
|
+
def schema(self) -> Dict[str, Any]:
|
41
|
+
return {
|
42
|
+
"type": self.type if isinstance(self.type, str) else self.type.value,
|
43
|
+
"display_width": self.display_width,
|
44
|
+
"display_height": self.display_height,
|
45
|
+
"environment": self.environment if isinstance(self.environment, str) else self.environment.value,
|
46
|
+
}
|
47
|
+
|
48
|
+
|
49
|
+
class GPTToolCUA:
|
50
|
+
model: str = "computer-use-preview"
|
51
|
+
tools: List[CUAToolSchema] = list()
|
52
|
+
user_prompt: str = None
|
53
|
+
img_url: str = None
|
54
|
+
web_url: str = "https://www.google.com"
|
55
|
+
browser: str = "firefox"
|
56
|
+
reasoning_effort: str = GPTSizeEnum.MEDIUM.value
|
57
|
+
truncation: str = "auto"
|
58
|
+
|
59
|
+
_response_ids: List[str] = list()
|
60
|
+
_call_ids: List[str] = list()
|
61
|
+
_usage: UsageMetrics = UsageMetrics()
|
62
|
+
_logger: Logger = Logger(info_file_save=True, filename="cua-task-{}".format(str(datetime.datetime.now().timestamp())) + ".png")
|
63
|
+
|
64
|
+
|
65
|
+
def __init__(
|
66
|
+
self,
|
67
|
+
user_prompt: str,
|
68
|
+
tools: List[CUAToolSchema] | CUAToolSchema = None,
|
69
|
+
img_url: str = None,
|
70
|
+
web_url: str = "https://www.google.com",
|
71
|
+
browser: str = "chromium",
|
72
|
+
reasoning_effort: GPTSizeEnum | str = None,
|
73
|
+
truncation: str = None,
|
74
|
+
_usage: UsageMetrics = UsageMetrics()
|
75
|
+
):
|
76
|
+
self.user_prompt = user_prompt
|
77
|
+
self.web_url = web_url if is_valid_url(web_url) else "https://www.google.com"
|
78
|
+
self.browser = browser if browser in allowed_browsers else 'chromium'
|
79
|
+
self.truncation = truncation if truncation else self.truncation
|
80
|
+
self._usage = _usage
|
81
|
+
self._response_ids = list()
|
82
|
+
self._call_ids = list()
|
83
|
+
|
84
|
+
if img_url:
|
85
|
+
img_url = convert_img_url(img_url)
|
86
|
+
self.img_url = img_url
|
87
|
+
|
88
|
+
if reasoning_effort and is_valid_enum(enum=GPTSizeEnum, val=reasoning_effort):
|
89
|
+
self.reasoning_effort = reasoning_effort.value if isinstance(reasoning_effort, GPTSizeEnum) else reasoning_effort
|
90
|
+
|
91
|
+
if tools:
|
92
|
+
match tools:
|
93
|
+
case list():
|
94
|
+
if self.tools:
|
95
|
+
self.tools.extend(tools)
|
96
|
+
else:
|
97
|
+
self.tools = tools
|
98
|
+
case CUAToolSchema():
|
99
|
+
if self.tools:
|
100
|
+
self.tools.append(tools)
|
101
|
+
else:
|
102
|
+
self.tools = [tools]
|
103
|
+
case _:
|
104
|
+
pass
|
105
|
+
|
106
|
+
|
107
|
+
def _take_screenshot(self, page: Any = None, path: str = None) -> Tuple[str | None, str | None]:
|
108
|
+
import base64
|
109
|
+
if not page:
|
110
|
+
return None, None
|
111
|
+
|
112
|
+
path = path if path else "screenshot.png"
|
113
|
+
screenshot_bytes = page.screenshot()
|
114
|
+
screenshot_base64 = base64.b64encode(screenshot_bytes).decode("utf-8")
|
115
|
+
self._logger.log(message=f"Action: screenshot", level="info", color="blue")
|
116
|
+
return screenshot_bytes, screenshot_base64
|
117
|
+
|
118
|
+
|
119
|
+
def _handle_model_action(self, page: Any, action: Any, action_type: str = None) -> bool:
|
120
|
+
"""Creates a page object and performs actions."""
|
121
|
+
|
122
|
+
action_type = action_type if action_type else action.type
|
123
|
+
start_dt = datetime.datetime.now()
|
124
|
+
|
125
|
+
try:
|
126
|
+
match action_type:
|
127
|
+
case "click":
|
128
|
+
x, y = action.x, action.y
|
129
|
+
button = action.button
|
130
|
+
self._logger.log(message=f"Action: click at ({x}, {y}) with button '{button}'", level="info", color="blue")
|
131
|
+
if button != "left" and button != "right":
|
132
|
+
button = "left"
|
133
|
+
page.mouse.click(x, y, button=button)
|
134
|
+
|
135
|
+
case "scroll":
|
136
|
+
x, y = action.x, action.y
|
137
|
+
scroll_x, scroll_y = action.scroll_x, action.scroll_y
|
138
|
+
self._logger.log(message=f"Action: scroll at ({x}, {y}) with offsets (scroll_x={scroll_x}, scroll_y={scroll_y})", level="info", color="blue")
|
139
|
+
page.mouse.move(x, y)
|
140
|
+
page.evaluate(f"window.scrollBy({scroll_x}, {scroll_y})")
|
141
|
+
|
142
|
+
case "keypress":
|
143
|
+
keys = action.keys
|
144
|
+
for k in keys:
|
145
|
+
self._logger.log(message=f"Action: keypress '{k}'", level="info", color="blue")
|
146
|
+
if k.lower() == "enter":
|
147
|
+
page.keyboard.press("Enter")
|
148
|
+
elif k.lower() == "space":
|
149
|
+
page.keyboard.press(" ")
|
150
|
+
else:
|
151
|
+
page.keyboard.press(k)
|
152
|
+
|
153
|
+
case "type":
|
154
|
+
text = action.text
|
155
|
+
self._logger.log(message=f"Action: type text: {text}", level="info", color="blue")
|
156
|
+
page.keyboard.type(text)
|
157
|
+
|
158
|
+
case "wait":
|
159
|
+
self._logger.log(message=f"Action: wait", level="info", color="blue")
|
160
|
+
time.sleep(2)
|
161
|
+
|
162
|
+
case "screenshot":
|
163
|
+
pass
|
164
|
+
|
165
|
+
case _:
|
166
|
+
self._logger.log(message=f"Unrecognized action: {action}", level="warning", color="yellow")
|
167
|
+
|
168
|
+
except Exception as e:
|
169
|
+
self._usage.record_errors(type=ErrorType.API)
|
170
|
+
self._logger.log(message=f"Error handling action {action}: {e}", level="error", color="red")
|
171
|
+
|
172
|
+
end_dt = datetime.datetime.now()
|
173
|
+
self._usage.record_latency(start_dt=start_dt, end_dt=end_dt)
|
174
|
+
return bool(self._usage.total_errors)
|
175
|
+
|
176
|
+
|
177
|
+
def run(self, screenshot: str = None) -> Tuple[Dict[str, Any], None, UsageMetrics]:
|
178
|
+
raw_res = dict()
|
179
|
+
usage = self._usage if self._usage else UsageMetrics()
|
180
|
+
start_dt = datetime.datetime.now()
|
181
|
+
|
182
|
+
try:
|
183
|
+
schema = self.schema
|
184
|
+
if screenshot and "output" in schema["input"][0]:
|
185
|
+
output_image_url = schema["input"][0]["output"]["image_url"].replace("SCREENSHOT", str(screenshot))
|
186
|
+
schema["input"][0]["output"]["image_url"] = output_image_url
|
187
|
+
|
188
|
+
res = openai_client.responses.create(**schema)
|
189
|
+
if not res:
|
190
|
+
usage.record_errors(ErrorType.TOOL)
|
191
|
+
else:
|
192
|
+
for item in res.output:
|
193
|
+
match item.type:
|
194
|
+
case "reasoning":
|
195
|
+
raw_res.update(dict(reasoning=item.summary[0].text))
|
196
|
+
if item.id and item.id.startwith('rs'):
|
197
|
+
self._response_ids.append(item.id)
|
198
|
+
case "computer_call":
|
199
|
+
raw_res.update(dict(action=item.action))
|
200
|
+
# self._response_ids.append(item.id)
|
201
|
+
self._call_ids.append(item.call_id)
|
202
|
+
case _:
|
203
|
+
pass
|
204
|
+
usage.record_token_usage(**res.usage.__dict__)
|
205
|
+
|
206
|
+
except Exception as e:
|
207
|
+
self._logger.log(message=f"Failed to run: {str(e)}", color="red", level="error")
|
208
|
+
usage.record_errors(ErrorType.TOOL)
|
209
|
+
|
210
|
+
end_dt = datetime.datetime.now()
|
211
|
+
usage.record_latency(start_dt=start_dt, end_dt=end_dt)
|
212
|
+
return raw_res, None, usage
|
213
|
+
|
214
|
+
|
215
|
+
def invoke_playwright(self) -> Tuple[Dict[str, Any], None, UsageMetrics]:
|
216
|
+
"""Handles computer use loop. Ref. OpenAI official website."""
|
217
|
+
|
218
|
+
from playwright.sync_api import sync_playwright
|
219
|
+
|
220
|
+
self._logger.log(message="Start the operation.", level="info", color="blue")
|
221
|
+
|
222
|
+
try:
|
223
|
+
with sync_playwright() as p:
|
224
|
+
b = p.firefox if self.browser == "firefox" else p.webkit if self.browser == "webkit" else p.chromium
|
225
|
+
browser = b.launch(headless=True)
|
226
|
+
page = browser.new_page()
|
227
|
+
if not browser or not page:
|
228
|
+
return None, None, None
|
229
|
+
|
230
|
+
page.goto(self.web_url)
|
231
|
+
res, _, usage = self.run()
|
232
|
+
self._usage = usage
|
233
|
+
actions = [v for k, v in res.items() if k =="action"] if res else []
|
234
|
+
action = actions[0] if actions else None
|
235
|
+
start_dt = datetime.datetime.now()
|
236
|
+
|
237
|
+
if action:
|
238
|
+
while True:
|
239
|
+
self._handle_model_action(page=page, action=action)
|
240
|
+
_, screenshot_base64 = self._take_screenshot(page=page)
|
241
|
+
res, _, usage = self.run(screenshot=screenshot_base64)
|
242
|
+
self._usage.agggregate(metrics=usage)
|
243
|
+
if not res:
|
244
|
+
usage.record_errors(type=ErrorType.API)
|
245
|
+
break
|
246
|
+
|
247
|
+
actions = [v for k, v in res.items() if k =="action"] if res else []
|
248
|
+
action = actions[0] if actions else None
|
249
|
+
if not action:
|
250
|
+
break
|
251
|
+
else:
|
252
|
+
self._usage.record_errors(type=ErrorType.TOOL)
|
253
|
+
|
254
|
+
except Exception as e:
|
255
|
+
self._logger.log(message=f"Failed to execute. {str(e)}", color="red", level="error")
|
256
|
+
|
257
|
+
end_dt = datetime.datetime.now()
|
258
|
+
self._usage.record_latency(start_dt=start_dt, end_dt=end_dt)
|
259
|
+
# browser.close()
|
260
|
+
return res, _, self._usage
|
261
|
+
|
262
|
+
|
263
|
+
@property
|
264
|
+
def schema(self) -> Dict[str, Any]:
|
265
|
+
"""Formats args schema for CUA calling."""
|
266
|
+
|
267
|
+
tool_schema = [item.schema for item in self.tools]
|
268
|
+
schema = dict()
|
269
|
+
inputs = list()
|
270
|
+
previous_response_id = self._response_ids[-1] if self._response_ids and self._response_ids[-1].startswith("rs") else None
|
271
|
+
|
272
|
+
if self._call_ids:
|
273
|
+
inputs = [
|
274
|
+
{
|
275
|
+
"call_id": self._call_ids[-1],
|
276
|
+
"type": "computer_call_output",
|
277
|
+
"output": { "type": "input_image", "image_url": f"data:image/png;base64,SCREENSHOT"}
|
278
|
+
}
|
279
|
+
]
|
280
|
+
schema = dict(
|
281
|
+
model=self.model,
|
282
|
+
previous_response_id=previous_response_id,
|
283
|
+
tools=tool_schema,
|
284
|
+
input=inputs,
|
285
|
+
truncation=self.truncation
|
286
|
+
)
|
287
|
+
|
288
|
+
else:
|
289
|
+
img_url = convert_img_url(self.img_url) if self.img_url else None
|
290
|
+
input = [{ "role": "user", "content": self.user_prompt } ]
|
291
|
+
if img_url:
|
292
|
+
input.append({"type": "input_image", "image_url": f"data:image/png;base64,{img_url}"})
|
293
|
+
schema = dict(model=self.model, tools=tool_schema, input=input, reasoning={ "effort": self.reasoning_effort}, truncation=self.truncation)
|
294
|
+
|
295
|
+
return schema
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import datetime
|
1
2
|
from typing import List, Dict, Any, Optional, Tuple
|
2
3
|
|
3
4
|
from versionhq.tool.gpt import openai_client
|
@@ -87,6 +88,7 @@ class GPTToolFileSearch:
|
|
87
88
|
max_num_results: int = 2
|
88
89
|
include: List[str] = ["output[*].file_search_call.search_results"]
|
89
90
|
filters: Optional[FilterSchema] = None
|
91
|
+
_usage: UsageMetrics = UsageMetrics()
|
90
92
|
|
91
93
|
def __init__(
|
92
94
|
self,
|
@@ -119,7 +121,8 @@ class GPTToolFileSearch:
|
|
119
121
|
def run(self) -> Tuple[str, List[Dict[str, Any]], UsageMetrics] | None:
|
120
122
|
raw_res = ""
|
121
123
|
annotations = list()
|
122
|
-
usage = UsageMetrics()
|
124
|
+
usage = self._usage if self._usage else UsageMetrics()
|
125
|
+
start_dt = datetime.datetime.now()
|
123
126
|
|
124
127
|
try:
|
125
128
|
res = openai_client.responses.create(**self.schema)
|
@@ -130,10 +133,14 @@ class GPTToolFileSearch:
|
|
130
133
|
annotations = [{ "index": item.index, "file_id": item.file_id, "filename": item.filename }
|
131
134
|
for item in res.output[1].content[0].annotations]
|
132
135
|
usage.record_token_usage(**res.usage.__dict__)
|
133
|
-
|
136
|
+
|
134
137
|
except:
|
135
138
|
usage.record_errors(ErrorType.TOOL)
|
136
|
-
|
139
|
+
|
140
|
+
end_dt = datetime.datetime.now()
|
141
|
+
usage.record_latency(start_dt=start_dt, end_dt=end_dt)
|
142
|
+
self._usage = usage
|
143
|
+
return raw_res, annotations, usage
|
137
144
|
|
138
145
|
|
139
146
|
@property
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import datetime
|
1
2
|
from typing import Dict, Any, Optional, Tuple, List
|
2
3
|
|
3
4
|
from versionhq.tool.gpt import openai_client
|
@@ -16,6 +17,7 @@ class GPTToolWebSearch:
|
|
16
17
|
region: str = None # "London"
|
17
18
|
search_content_size: str = GPTSizeEnum.MEDIUM.value
|
18
19
|
_user_location: Optional[Dict[str, str]] = None
|
20
|
+
_usage: UsageMetrics = UsageMetrics()
|
19
21
|
|
20
22
|
|
21
23
|
def __init__(
|
@@ -52,20 +54,25 @@ class GPTToolWebSearch:
|
|
52
54
|
|
53
55
|
raw_res = ""
|
54
56
|
annotations = list()
|
55
|
-
usage = UsageMetrics()
|
57
|
+
usage = self._usage if self._usage else UsageMetrics()
|
58
|
+
start_dt = datetime.datetime.now()
|
56
59
|
|
57
60
|
try:
|
58
61
|
res = openai_client.responses.create(**self.schema)
|
59
62
|
if not res:
|
60
63
|
usage.record_errors(ErrorType.TOOL)
|
61
64
|
else:
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
65
|
+
content = res.output[0].content[0] if len(res.output) == 1 else res.output[1].content[0]
|
66
|
+
if content:
|
67
|
+
raw_res = content.text
|
68
|
+
annotations = [{ "title": item.title, "url": item.url } for item in content.annotations] if content.annotations else []
|
69
|
+
usage.record_token_usage(**res.usage.__dict__)
|
66
70
|
except:
|
67
71
|
usage.record_errors(ErrorType.TOOL)
|
68
|
-
|
72
|
+
end_dt = datetime.datetime.now()
|
73
|
+
usage.record_latency(start_dt=start_dt, end_dt=end_dt)
|
74
|
+
self._usage = usage
|
75
|
+
return raw_res, annotations, usage
|
69
76
|
|
70
77
|
|
71
78
|
@property
|