payi 0.1.0a86__tar.gz → 0.1.0a87__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of payi might be problematic. Click here for more details.
- payi-0.1.0a87/.release-please-manifest.json +3 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/CHANGELOG.md +8 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/PKG-INFO +1 -1
- {payi-0.1.0a86 → payi-0.1.0a87}/pyproject.toml +1 -1
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_version.py +1 -1
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/lib/AnthropicInstrumentor.py +41 -1
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/lib/BedrockInstrumentor.py +90 -16
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/lib/GoogleGenAiInstrumentor.py +11 -62
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/lib/OpenAIInstrumentor.py +56 -2
- payi-0.1.0a87/src/payi/lib/VertexInstrumentor.py +211 -0
- payi-0.1.0a87/src/payi/lib/VertexRequest.py +237 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/lib/instrument.py +51 -221
- payi-0.1.0a86/.release-please-manifest.json +0 -3
- payi-0.1.0a86/src/payi/lib/VertexInstrumentor.py +0 -397
- {payi-0.1.0a86 → payi-0.1.0a87}/.gitignore +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/CONTRIBUTING.md +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/LICENSE +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/README.md +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/SECURITY.md +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/api.md +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/bin/check-release-environment +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/bin/publish-pypi +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/examples/.keep +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/mypy.ini +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/noxfile.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/release-please-config.json +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/requirements-dev.lock +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/requirements.lock +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_base_client.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_client.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_compat.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_constants.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_exceptions.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_files.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_models.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_qs.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_resource.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_streaming.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_types.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/_logs.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/_proxy.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/_reflection.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/_resources_proxy.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/_streams.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/_sync.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/_transform.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/_typing.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/_utils/_utils.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/lib/.keep +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/lib/Stopwatch.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/lib/helpers.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/pagination.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/py.typed +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/categories/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/categories/categories.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/categories/fixed_cost_resources.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/categories/resources.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/experiences/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/experiences/experiences.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/experiences/properties.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/experiences/types/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/experiences/types/limit_config.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/experiences/types/types.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/ingest.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/limits/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/limits/limits.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/limits/tags.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/requests/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/requests/properties.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/requests/requests.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/requests/result.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/use_cases/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/definitions.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/kpis.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/limit_config.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/version.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/use_cases/kpis.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/use_cases/properties.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/resources/use_cases/use_cases.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/bulk_ingest_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/categories/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/categories/fixed_cost_resource_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/categories/resource_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/categories/resource_list_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/category_delete_resource_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/category_delete_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/category_list_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/category_list_resources_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/category_resource_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/category_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/cost_data.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/cost_details.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/default_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/experience_instance_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/experiences/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/experiences/experience_type.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/experiences/property_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/experiences/type_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/experiences/type_list_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/experiences/type_update_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/experiences/types/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/experiences/types/limit_config_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/ingest_bulk_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/ingest_event_param.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/ingest_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/ingest_units_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limit_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limit_history_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limit_list_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limit_list_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limit_reset_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limit_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limit_update_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/limit_tags.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/tag_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/tag_create_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/tag_delete_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/tag_list_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/tag_remove_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/tag_remove_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/tag_update_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/limits/tag_update_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/pay_i_common_models_api_router_header_info_param.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/requests/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/requests/property_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/requests/request_result.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/requests_data.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared/evaluation_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared/ingest_units.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared/pay_i_common_models_budget_management_cost_details_base.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared/pay_i_common_models_budget_management_create_limit_base.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared/properties_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared/xproxy_error.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared/xproxy_result.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared_params/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared_params/ingest_units.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/shared_params/pay_i_common_models_budget_management_create_limit_base.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/total_cost_data.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_case_instance_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definition_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definition_list_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definition_update_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_create_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_delete_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_list_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_list_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_retrieve_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_update_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_update_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/limit_config_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/kpi_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/kpi_list_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/kpi_list_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/kpi_update_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/property_create_params.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/src/payi/types/use_cases/use_case_definition.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/categories/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/categories/test_fixed_cost_resources.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/categories/test_resources.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/experiences/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/experiences/test_properties.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/experiences/test_types.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/experiences/types/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/experiences/types/test_limit_config.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/limits/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/limits/test_tags.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/requests/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/requests/test_properties.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/requests/test_result.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/test_categories.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/test_experiences.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/test_ingest.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/test_limits.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/test_use_cases.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/use_cases/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/use_cases/definitions/__init__.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/use_cases/definitions/test_kpis.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/use_cases/definitions/test_limit_config.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/use_cases/definitions/test_version.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/use_cases/test_definitions.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/use_cases/test_kpis.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/api_resources/use_cases/test_properties.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/conftest.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/sample_file.txt +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_client.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_deepcopy.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_extract_files.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_files.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_models.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_qs.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_required_args.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_response.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_streaming.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_transform.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_utils/test_proxy.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/test_utils/test_typing.py +0 -0
- {payi-0.1.0a86 → payi-0.1.0a87}/tests/utils.py +0 -0
|
@@ -1,5 +1,13 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 0.1.0-alpha.87 (2025-06-16)
|
|
4
|
+
|
|
5
|
+
Full Changelog: [v0.1.0-alpha.86...v0.1.0-alpha.87](https://github.com/Pay-i/pay-i-python/compare/v0.1.0-alpha.86...v0.1.0-alpha.87)
|
|
6
|
+
|
|
7
|
+
### Features
|
|
8
|
+
|
|
9
|
+
* capture function calling ([#324](https://github.com/Pay-i/pay-i-python/issues/324)) ([5e89cb8](https://github.com/Pay-i/pay-i-python/commit/5e89cb8f8526cd8c9053683e40a9d6c9a1773742))
|
|
10
|
+
|
|
3
11
|
## 0.1.0-alpha.86 (2025-06-13)
|
|
4
12
|
|
|
5
13
|
Full Changelog: [v0.1.0-alpha.85...v0.1.0-alpha.86](https://github.com/Pay-i/pay-i-python/compare/v0.1.0-alpha.85...v0.1.0-alpha.86)
|
|
@@ -227,6 +227,20 @@ def anthropic_process_synchronous_response(request: _ProviderRequest, response:
|
|
|
227
227
|
|
|
228
228
|
units["text"] = Units(input=input, output=output)
|
|
229
229
|
|
|
230
|
+
content = response.get('content', [])
|
|
231
|
+
if content:
|
|
232
|
+
for c in content:
|
|
233
|
+
if c.get("type", "") != "tool_use":
|
|
234
|
+
continue
|
|
235
|
+
name = c.get("name", "")
|
|
236
|
+
input = c.get("input", "")
|
|
237
|
+
arguments: Optional[str] = None
|
|
238
|
+
if input and isinstance(input, dict):
|
|
239
|
+
arguments = json.dumps(input, ensure_ascii=False)
|
|
240
|
+
|
|
241
|
+
if name and arguments:
|
|
242
|
+
request.add_synchronous_function_call(name=name, arguments=arguments)
|
|
243
|
+
|
|
230
244
|
if log_prompt_and_response:
|
|
231
245
|
request._ingest["provider_response_json"] = json.dumps(response)
|
|
232
246
|
|
|
@@ -280,6 +294,32 @@ def anthropic_process_chunk(request: _ProviderRequest, chunk: 'dict[str, Any]',
|
|
|
280
294
|
request._ingest["units"]["text"]["output"] = usage.get('output_tokens', 0)
|
|
281
295
|
|
|
282
296
|
request._instrumentor._logger.debug(f"Anthropic streaming finished: output tokens {usage.get('output_tokens', 0)} ")
|
|
297
|
+
|
|
298
|
+
elif type == "content_block_start":
|
|
299
|
+
request._building_function_response = False
|
|
300
|
+
|
|
301
|
+
content_block = chunk.get('content_block', {})
|
|
302
|
+
if content_block and content_block.get('type', "") == "tool_use":
|
|
303
|
+
index = chunk.get('index', None)
|
|
304
|
+
name = content_block.get('name', "")
|
|
305
|
+
|
|
306
|
+
if index and isinstance(index, int) and name:
|
|
307
|
+
request._building_function_response = True
|
|
308
|
+
request.add_streaming_function_call(index=index, name=name, arguments=None)
|
|
309
|
+
|
|
310
|
+
elif type == "content_block_delta":
|
|
311
|
+
if request._building_function_response:
|
|
312
|
+
delta = chunk.get("delta", {})
|
|
313
|
+
type = delta.get("type", "")
|
|
314
|
+
partial_json = delta.get("partial_json", "")
|
|
315
|
+
index = chunk.get('index', None)
|
|
316
|
+
|
|
317
|
+
if index and isinstance(index, int) and type == "input_json_delta" and partial_json:
|
|
318
|
+
request.add_streaming_function_call(index=index, name=None, arguments=partial_json)
|
|
319
|
+
|
|
320
|
+
elif type == "content_block_stop":
|
|
321
|
+
request._building_function_response = False
|
|
322
|
+
|
|
283
323
|
else:
|
|
284
324
|
request._instrumentor._logger.debug(f"Anthropic streaming chunk: {type}")
|
|
285
325
|
|
|
@@ -301,7 +341,7 @@ def anthropic_has_image_and_get_texts(request: _ProviderRequest, messages: Any)
|
|
|
301
341
|
request._estimated_prompt_tokens = estimated_token_count
|
|
302
342
|
|
|
303
343
|
except Exception:
|
|
304
|
-
request._instrumentor._logger.
|
|
344
|
+
request._instrumentor._logger.info("Anthropic skipping vision token calc, could not load cl100k_base")
|
|
305
345
|
|
|
306
346
|
def has_image_and_get_texts(encoding: tiktoken.Encoding, content: Union[str, 'list[Any]']) -> 'tuple[bool, int]':
|
|
307
347
|
if isinstance(content, list): # type: ignore
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import json
|
|
3
|
-
from typing import Any, Sequence
|
|
3
|
+
from typing import Any, Optional, Sequence
|
|
4
4
|
from functools import wraps
|
|
5
5
|
from typing_extensions import override
|
|
6
6
|
|
|
@@ -132,15 +132,32 @@ class InvokeResponseWrapper(ObjectProxy): # type: ignore
|
|
|
132
132
|
log_prompt_and_response=False, # will evaluate logging later
|
|
133
133
|
assign_id=False)
|
|
134
134
|
|
|
135
|
-
elif
|
|
136
|
-
input = response
|
|
137
|
-
output = response
|
|
135
|
+
elif self._request._is_meta:
|
|
136
|
+
input = response.get('prompt_token_count', 0)
|
|
137
|
+
output = response.get('generation_token_count', 0)
|
|
138
138
|
units["text"] = Units(input=input, output=output)
|
|
139
139
|
|
|
140
|
+
elif self._request._is_nova:
|
|
141
|
+
usage = response.get("usage", {})
|
|
142
|
+
|
|
143
|
+
input = usage.get("inputTokens", 0)
|
|
144
|
+
output = usage.get("outputTokens", 0)
|
|
145
|
+
units["text"] = Units(input=input, output=output)
|
|
146
|
+
|
|
147
|
+
text_cache_read = usage.get("cacheReadInputTokenCount", None)
|
|
148
|
+
if text_cache_read:
|
|
149
|
+
units["text_cache_read"] = text_cache_read
|
|
150
|
+
|
|
151
|
+
text_cache_write = usage.get("cacheWriteInputTokenCount", None)
|
|
152
|
+
if text_cache_write:
|
|
153
|
+
units["text_cache_write"] = text_cache_write
|
|
154
|
+
|
|
155
|
+
bedrock_converse_process_synchronous_function_call(self._request, response)
|
|
156
|
+
|
|
140
157
|
if self._log_prompt_and_response:
|
|
141
158
|
ingest["provider_response_json"] = data.decode('utf-8') # type: ignore
|
|
142
159
|
|
|
143
|
-
self._request._instrumentor._ingest_units(
|
|
160
|
+
self._request._instrumentor._ingest_units(self._request)
|
|
144
161
|
|
|
145
162
|
return data # type: ignore
|
|
146
163
|
|
|
@@ -260,6 +277,8 @@ class _BedrockInvokeProviderRequest(_BedrockProviderRequest):
|
|
|
260
277
|
def __init__(self, instrumentor: _PayiInstrumentor, model_id: str):
|
|
261
278
|
super().__init__(instrumentor=instrumentor)
|
|
262
279
|
self._is_anthropic: bool = 'anthropic' in model_id
|
|
280
|
+
self._is_nova: bool = 'nova' in model_id
|
|
281
|
+
self._is_meta: bool = 'meta' in model_id
|
|
263
282
|
|
|
264
283
|
@override
|
|
265
284
|
def process_request(self, instance: Any, extra_headers: 'dict[str, str]', args: Sequence[Any], kwargs: Any) -> bool:
|
|
@@ -280,24 +299,35 @@ class _BedrockInvokeProviderRequest(_BedrockProviderRequest):
|
|
|
280
299
|
|
|
281
300
|
@override
|
|
282
301
|
def process_chunk(self, chunk: Any) -> _ChunkResult:
|
|
283
|
-
|
|
284
|
-
return self.process_invoke_streaming_anthropic_chunk(chunk)
|
|
285
|
-
else:
|
|
286
|
-
return self.process_invoke_streaming_llama_chunk(chunk)
|
|
302
|
+
chunk_dict = json.loads(chunk)
|
|
287
303
|
|
|
288
|
-
|
|
289
|
-
|
|
304
|
+
if self._is_anthropic:
|
|
305
|
+
from .AnthropicInstrumentor import anthropic_process_chunk
|
|
306
|
+
return anthropic_process_chunk(self, chunk_dict, assign_id=False)
|
|
290
307
|
|
|
291
|
-
|
|
308
|
+
if self._is_nova:
|
|
309
|
+
bedrock_converse_process_streaming_for_function_call(self, chunk_dict)
|
|
310
|
+
|
|
311
|
+
# meta and nova
|
|
312
|
+
return self.process_invoke_other_provider_chunk(chunk_dict)
|
|
292
313
|
|
|
293
|
-
def
|
|
314
|
+
def process_invoke_other_provider_chunk(self, chunk_dict: 'dict[str, Any]') -> _ChunkResult:
|
|
294
315
|
ingest = False
|
|
295
|
-
|
|
316
|
+
|
|
296
317
|
metrics = chunk_dict.get("amazon-bedrock-invocationMetrics", {})
|
|
297
318
|
if metrics:
|
|
298
319
|
input = metrics.get("inputTokenCount", 0)
|
|
299
320
|
output = metrics.get("outputTokenCount", 0)
|
|
300
321
|
self._ingest["units"]["text"] = Units(input=input, output=output)
|
|
322
|
+
|
|
323
|
+
text_cache_read = metrics.get("cacheReadInputTokenCount", None)
|
|
324
|
+
if text_cache_read:
|
|
325
|
+
self._ingest["units"]["text_cache_read"] = text_cache_read
|
|
326
|
+
|
|
327
|
+
text_cache_write = metrics.get("cacheWriteInputTokenCount", None)
|
|
328
|
+
if text_cache_write:
|
|
329
|
+
self._ingest["units"]["text_cache_write"] = text_cache_write
|
|
330
|
+
|
|
301
331
|
ingest = True
|
|
302
332
|
|
|
303
333
|
return _ChunkResult(send_chunk_to_caller=True, ingest=ingest)
|
|
@@ -356,7 +386,9 @@ class _BedrockConverseProviderRequest(_BedrockProviderRequest):
|
|
|
356
386
|
response_without_metadata.pop("ResponseMetadata", None)
|
|
357
387
|
self._ingest["provider_response_json"] = json.dumps(response_without_metadata)
|
|
358
388
|
|
|
359
|
-
|
|
389
|
+
bedrock_converse_process_synchronous_function_call(self, response)
|
|
390
|
+
|
|
391
|
+
return None
|
|
360
392
|
|
|
361
393
|
@override
|
|
362
394
|
def process_chunk(self, chunk: 'dict[str, Any]') -> _ChunkResult:
|
|
@@ -371,4 +403,46 @@ class _BedrockConverseProviderRequest(_BedrockProviderRequest):
|
|
|
371
403
|
|
|
372
404
|
ingest = True
|
|
373
405
|
|
|
374
|
-
|
|
406
|
+
bedrock_converse_process_streaming_for_function_call(self, chunk)
|
|
407
|
+
|
|
408
|
+
return _ChunkResult(send_chunk_to_caller=True, ingest=ingest)
|
|
409
|
+
|
|
410
|
+
def bedrock_converse_process_streaming_for_function_call(request: _ProviderRequest, chunk: 'dict[str, Any]') -> None:
|
|
411
|
+
contentBlockStart = chunk.get("contentBlockStart", {})
|
|
412
|
+
tool_use = contentBlockStart.get("start", {}).get("toolUse", {})
|
|
413
|
+
if tool_use:
|
|
414
|
+
index = contentBlockStart.get("contentBlockIndex", None)
|
|
415
|
+
name = tool_use.get("name", "")
|
|
416
|
+
|
|
417
|
+
if name and index is not None:
|
|
418
|
+
request.add_streaming_function_call(index=index, name=name, arguments=None)
|
|
419
|
+
|
|
420
|
+
return
|
|
421
|
+
|
|
422
|
+
contentBlockDelta = chunk.get("contentBlockDelta", {})
|
|
423
|
+
tool_use = contentBlockDelta.get("delta", {}).get("toolUse", {})
|
|
424
|
+
if tool_use:
|
|
425
|
+
index = contentBlockDelta.get("contentBlockIndex", None)
|
|
426
|
+
input = tool_use.get("input", "")
|
|
427
|
+
|
|
428
|
+
if input and index is not None:
|
|
429
|
+
request.add_streaming_function_call(index=index, name=None, arguments=input)
|
|
430
|
+
|
|
431
|
+
return
|
|
432
|
+
|
|
433
|
+
def bedrock_converse_process_synchronous_function_call(request: _ProviderRequest, response: 'dict[str, Any]') -> None:
|
|
434
|
+
content = response.get("output", {}).get("message", {}).get("content", [])
|
|
435
|
+
if content:
|
|
436
|
+
for item in content:
|
|
437
|
+
tool_use = item.get("toolUse", {})
|
|
438
|
+
if tool_use:
|
|
439
|
+
name = tool_use.get("name", "")
|
|
440
|
+
input = tool_use.get("input", {})
|
|
441
|
+
arguments: Optional[str] = None
|
|
442
|
+
|
|
443
|
+
if input and isinstance(input, dict):
|
|
444
|
+
arguments = json.dumps(input)
|
|
445
|
+
|
|
446
|
+
if name:
|
|
447
|
+
request.add_synchronous_function_call(name=name, arguments=arguments)
|
|
448
|
+
|
|
@@ -1,12 +1,10 @@
|
|
|
1
|
-
import
|
|
2
|
-
from typing import Any, List, Union, Optional, Sequence
|
|
1
|
+
from typing import Any, List, Union, Sequence
|
|
3
2
|
from typing_extensions import override
|
|
4
3
|
|
|
5
4
|
from wrapt import wrap_function_wrapper # type: ignore
|
|
6
5
|
|
|
7
|
-
from
|
|
8
|
-
|
|
9
|
-
from .instrument import _ChunkResult, _IsStreaming, _StreamingType, _ProviderRequest, _PayiInstrumentor
|
|
6
|
+
from .instrument import _ChunkResult, _IsStreaming, _PayiInstrumentor
|
|
7
|
+
from .VertexRequest import _VertexRequest
|
|
10
8
|
|
|
11
9
|
|
|
12
10
|
class GoogleGenAiInstrumentor:
|
|
@@ -113,13 +111,10 @@ async def agenerate_stream_wrapper(
|
|
|
113
111
|
kwargs,
|
|
114
112
|
)
|
|
115
113
|
|
|
116
|
-
class _GoogleGenAiRequest(
|
|
114
|
+
class _GoogleGenAiRequest(_VertexRequest):
|
|
117
115
|
def __init__(self, instrumentor: _PayiInstrumentor):
|
|
118
116
|
super().__init__(
|
|
119
117
|
instrumentor=instrumentor,
|
|
120
|
-
category=PayiCategories.google_vertex,
|
|
121
|
-
streaming_type=_StreamingType.generator,
|
|
122
|
-
is_google_vertex_or_genai_client=True,
|
|
123
118
|
)
|
|
124
119
|
self._prompt_character_count = 0
|
|
125
120
|
self._candidates_character_count = 0
|
|
@@ -154,8 +149,6 @@ class _GoogleGenAiRequest(_ProviderRequest):
|
|
|
154
149
|
if isinstance(value, list):
|
|
155
150
|
items = value # type: ignore
|
|
156
151
|
|
|
157
|
-
from .VertexInstrumentor import count_chars_skip_spaces
|
|
158
|
-
|
|
159
152
|
for item in items: # type: ignore
|
|
160
153
|
text = ""
|
|
161
154
|
if isinstance(item, Part):
|
|
@@ -166,8 +159,8 @@ class _GoogleGenAiRequest(_ProviderRequest):
|
|
|
166
159
|
text = item
|
|
167
160
|
|
|
168
161
|
if text != "":
|
|
169
|
-
self._prompt_character_count += count_chars_skip_spaces(text) # type: ignore
|
|
170
|
-
|
|
162
|
+
self._prompt_character_count += self.count_chars_skip_spaces(text) # type: ignore
|
|
163
|
+
|
|
171
164
|
return True
|
|
172
165
|
|
|
173
166
|
@override
|
|
@@ -247,65 +240,21 @@ class _GoogleGenAiRequest(_ProviderRequest):
|
|
|
247
240
|
|
|
248
241
|
@override
|
|
249
242
|
def process_chunk(self, chunk: Any) -> _ChunkResult:
|
|
250
|
-
from .VertexInstrumentor import vertex_compute_usage, count_chars_skip_spaces
|
|
251
|
-
|
|
252
|
-
ingest = False
|
|
253
243
|
response_dict: dict[str, Any] = chunk.to_json_dict()
|
|
254
|
-
if "provider_response_id" not in self._ingest:
|
|
255
|
-
id = response_dict.get("response_id", None)
|
|
256
|
-
if id:
|
|
257
|
-
self._ingest["provider_response_id"] = id
|
|
258
244
|
|
|
259
245
|
model: str = response_dict.get("model_version", "")
|
|
260
|
-
|
|
261
246
|
self._ingest["resource"] = "google." + model
|
|
262
247
|
|
|
248
|
+
return self.process_chunk_dict(response_dict=response_dict)
|
|
263
249
|
|
|
264
|
-
for candidate in response_dict.get("candidates", []):
|
|
265
|
-
parts = candidate.get("content", {}).get("parts", [])
|
|
266
|
-
for part in parts:
|
|
267
|
-
self._candidates_character_count += count_chars_skip_spaces(part.get("text", ""))
|
|
268
|
-
|
|
269
|
-
usage = response_dict.get("usage_metadata", {})
|
|
270
|
-
if usage and "prompt_token_count" in usage and "candidates_token_count" in usage:
|
|
271
|
-
vertex_compute_usage(
|
|
272
|
-
request=self,
|
|
273
|
-
model=model,
|
|
274
|
-
response_dict=response_dict,
|
|
275
|
-
prompt_character_count=self._prompt_character_count,
|
|
276
|
-
streaming_candidates_characters=self._candidates_character_count
|
|
277
|
-
)
|
|
278
|
-
ingest = True
|
|
279
|
-
|
|
280
|
-
return _ChunkResult(send_chunk_to_caller=True, ingest=ingest)
|
|
281
|
-
|
|
282
250
|
@override
|
|
283
251
|
def process_synchronous_response(
|
|
284
252
|
self,
|
|
285
253
|
response: Any,
|
|
286
254
|
log_prompt_and_response: bool,
|
|
287
255
|
kwargs: Any) -> Any:
|
|
288
|
-
response_dict = response.to_json_dict()
|
|
289
|
-
|
|
290
|
-
from .VertexInstrumentor import vertex_compute_usage
|
|
291
|
-
|
|
292
|
-
id: Optional[str] = response_dict.get("response_id", None)
|
|
293
|
-
if id:
|
|
294
|
-
self._ingest["provider_response_id"] = id
|
|
295
|
-
|
|
296
|
-
model: Optional[str] = response_dict.get("model_version", None)
|
|
297
|
-
if model:
|
|
298
|
-
self._ingest["resource"] = "google." + model
|
|
299
|
-
|
|
300
|
-
vertex_compute_usage(
|
|
301
|
-
request=self,
|
|
302
|
-
model=model,
|
|
303
|
-
response_dict=response_dict,
|
|
304
|
-
prompt_character_count=self._prompt_character_count,
|
|
305
|
-
streaming_candidates_characters=self._candidates_character_count
|
|
306
|
-
)
|
|
307
|
-
|
|
308
|
-
if log_prompt_and_response:
|
|
309
|
-
self._ingest["provider_response_json"] = [json.dumps(response_dict)]
|
|
310
256
|
|
|
311
|
-
return
|
|
257
|
+
return self.vertex_process_synchronous_response(
|
|
258
|
+
response_dict=response.to_json_dict(),
|
|
259
|
+
log_prompt_and_response=log_prompt_and_response,
|
|
260
|
+
)
|
|
@@ -349,6 +349,19 @@ class _OpenAiChatProviderRequest(_OpenAiProviderRequest):
|
|
|
349
349
|
|
|
350
350
|
send_chunk_to_client = True
|
|
351
351
|
|
|
352
|
+
choices = model.get("choices", [])
|
|
353
|
+
if choices:
|
|
354
|
+
for choice in choices:
|
|
355
|
+
function = choice.get("delta", {}).get("function_call", {})
|
|
356
|
+
index = choice.get("index", None)
|
|
357
|
+
|
|
358
|
+
if function and index is not None:
|
|
359
|
+
name = function.get("name", None)
|
|
360
|
+
arguments = function.get("arguments", None)
|
|
361
|
+
|
|
362
|
+
if name or arguments:
|
|
363
|
+
self.add_streaming_function_call(index=index, name=name, arguments=arguments)
|
|
364
|
+
|
|
352
365
|
usage = model.get("usage")
|
|
353
366
|
if usage:
|
|
354
367
|
self.add_usage_units(usage)
|
|
@@ -379,7 +392,7 @@ class _OpenAiChatProviderRequest(_OpenAiProviderRequest):
|
|
|
379
392
|
try:
|
|
380
393
|
enc = tiktoken.get_encoding("o200k_base") # type: ignore
|
|
381
394
|
except Exception:
|
|
382
|
-
self._instrumentor._logger.
|
|
395
|
+
self._instrumentor._logger.info("OpenAI skipping vision token calc, could not load o200k_base")
|
|
383
396
|
enc = None
|
|
384
397
|
|
|
385
398
|
if enc:
|
|
@@ -411,6 +424,22 @@ class _OpenAiChatProviderRequest(_OpenAiProviderRequest):
|
|
|
411
424
|
response: Any,
|
|
412
425
|
log_prompt_and_response: bool,
|
|
413
426
|
kwargs: Any) -> Any:
|
|
427
|
+
|
|
428
|
+
response_dict = model_to_dict(response)
|
|
429
|
+
choices = response_dict.get("choices", [])
|
|
430
|
+
if choices:
|
|
431
|
+
for choice in choices:
|
|
432
|
+
function = choice.get("message", {}).get("function_call", {})
|
|
433
|
+
|
|
434
|
+
if not function:
|
|
435
|
+
continue
|
|
436
|
+
|
|
437
|
+
name = function.get("name", None)
|
|
438
|
+
arguments = function.get("arguments", None)
|
|
439
|
+
|
|
440
|
+
if name:
|
|
441
|
+
self.add_synchronous_function_call(name=name, arguments=arguments)
|
|
442
|
+
|
|
414
443
|
return self.process_synchronous_response_worker(response, log_prompt_and_response)
|
|
415
444
|
|
|
416
445
|
class _OpenAiResponsesProviderRequest(_OpenAiProviderRequest):
|
|
@@ -432,6 +461,16 @@ class _OpenAiResponsesProviderRequest(_OpenAiProviderRequest):
|
|
|
432
461
|
if response_id:
|
|
433
462
|
self._ingest["provider_response_id"] = response_id
|
|
434
463
|
|
|
464
|
+
type = model.get("type", "")
|
|
465
|
+
if type and type == "response.output_item.done":
|
|
466
|
+
item = model.get("item", {})
|
|
467
|
+
if item and item.get("type", "") == "function_call":
|
|
468
|
+
name = item.get("name", None)
|
|
469
|
+
arguments = item.get("arguments", None)
|
|
470
|
+
|
|
471
|
+
if name:
|
|
472
|
+
self.add_synchronous_function_call(name=name, arguments=arguments)
|
|
473
|
+
|
|
435
474
|
usage = response.get("usage")
|
|
436
475
|
if usage:
|
|
437
476
|
self.add_usage_units(usage)
|
|
@@ -459,7 +498,7 @@ class _OpenAiResponsesProviderRequest(_OpenAiProviderRequest):
|
|
|
459
498
|
try:
|
|
460
499
|
enc = tiktoken.get_encoding("o200k_base") # type: ignore
|
|
461
500
|
except Exception:
|
|
462
|
-
self._instrumentor._logger.
|
|
501
|
+
self._instrumentor._logger.info("OpenAI skipping vision token calc, could not load o200k_base")
|
|
463
502
|
enc = None
|
|
464
503
|
|
|
465
504
|
# find each content..type="input_text" and count tokens
|
|
@@ -498,6 +537,21 @@ class _OpenAiResponsesProviderRequest(_OpenAiProviderRequest):
|
|
|
498
537
|
response: Any,
|
|
499
538
|
log_prompt_and_response: bool,
|
|
500
539
|
kwargs: Any) -> Any:
|
|
540
|
+
|
|
541
|
+
response_dict = model_to_dict(response)
|
|
542
|
+
output = response_dict.get("output", [])
|
|
543
|
+
if output:
|
|
544
|
+
for o in output:
|
|
545
|
+
type = o.get("type", "")
|
|
546
|
+
if type != "function_call":
|
|
547
|
+
continue
|
|
548
|
+
|
|
549
|
+
name = o.get("name", None)
|
|
550
|
+
arguments = o.get("arguments", None)
|
|
551
|
+
|
|
552
|
+
if name:
|
|
553
|
+
self.add_synchronous_function_call(name=name, arguments=arguments)
|
|
554
|
+
|
|
501
555
|
return self.process_synchronous_response_worker(response, log_prompt_and_response)
|
|
502
556
|
|
|
503
557
|
def model_to_dict(model: Any) -> Any:
|