payi 0.1.0a85__tar.gz → 0.1.0a87__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of payi might be problematic. Click here for more details.

Files changed (210) hide show
  1. payi-0.1.0a87/.release-please-manifest.json +3 -0
  2. {payi-0.1.0a85 → payi-0.1.0a87}/CHANGELOG.md +16 -0
  3. {payi-0.1.0a85 → payi-0.1.0a87}/PKG-INFO +1 -1
  4. {payi-0.1.0a85 → payi-0.1.0a87}/pyproject.toml +1 -1
  5. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_version.py +1 -1
  6. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/lib/AnthropicInstrumentor.py +56 -3
  7. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/lib/BedrockInstrumentor.py +90 -16
  8. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/lib/GoogleGenAiInstrumentor.py +11 -62
  9. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/lib/OpenAIInstrumentor.py +56 -2
  10. payi-0.1.0a87/src/payi/lib/VertexInstrumentor.py +211 -0
  11. payi-0.1.0a87/src/payi/lib/VertexRequest.py +237 -0
  12. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/lib/instrument.py +54 -222
  13. payi-0.1.0a85/.release-please-manifest.json +0 -3
  14. payi-0.1.0a85/src/payi/lib/VertexInstrumentor.py +0 -397
  15. {payi-0.1.0a85 → payi-0.1.0a87}/.gitignore +0 -0
  16. {payi-0.1.0a85 → payi-0.1.0a87}/CONTRIBUTING.md +0 -0
  17. {payi-0.1.0a85 → payi-0.1.0a87}/LICENSE +0 -0
  18. {payi-0.1.0a85 → payi-0.1.0a87}/README.md +0 -0
  19. {payi-0.1.0a85 → payi-0.1.0a87}/SECURITY.md +0 -0
  20. {payi-0.1.0a85 → payi-0.1.0a87}/api.md +0 -0
  21. {payi-0.1.0a85 → payi-0.1.0a87}/bin/check-release-environment +0 -0
  22. {payi-0.1.0a85 → payi-0.1.0a87}/bin/publish-pypi +0 -0
  23. {payi-0.1.0a85 → payi-0.1.0a87}/examples/.keep +0 -0
  24. {payi-0.1.0a85 → payi-0.1.0a87}/mypy.ini +0 -0
  25. {payi-0.1.0a85 → payi-0.1.0a87}/noxfile.py +0 -0
  26. {payi-0.1.0a85 → payi-0.1.0a87}/release-please-config.json +0 -0
  27. {payi-0.1.0a85 → payi-0.1.0a87}/requirements-dev.lock +0 -0
  28. {payi-0.1.0a85 → payi-0.1.0a87}/requirements.lock +0 -0
  29. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/__init__.py +0 -0
  30. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_base_client.py +0 -0
  31. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_client.py +0 -0
  32. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_compat.py +0 -0
  33. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_constants.py +0 -0
  34. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_exceptions.py +0 -0
  35. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_files.py +0 -0
  36. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_models.py +0 -0
  37. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_qs.py +0 -0
  38. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_resource.py +0 -0
  39. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_response.py +0 -0
  40. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_streaming.py +0 -0
  41. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_types.py +0 -0
  42. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/__init__.py +0 -0
  43. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/_logs.py +0 -0
  44. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/_proxy.py +0 -0
  45. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/_reflection.py +0 -0
  46. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/_resources_proxy.py +0 -0
  47. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/_streams.py +0 -0
  48. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/_sync.py +0 -0
  49. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/_transform.py +0 -0
  50. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/_typing.py +0 -0
  51. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/_utils/_utils.py +0 -0
  52. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/lib/.keep +0 -0
  53. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/lib/Stopwatch.py +0 -0
  54. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/lib/helpers.py +0 -0
  55. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/pagination.py +0 -0
  56. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/py.typed +0 -0
  57. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/__init__.py +0 -0
  58. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/categories/__init__.py +0 -0
  59. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/categories/categories.py +0 -0
  60. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/categories/fixed_cost_resources.py +0 -0
  61. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/categories/resources.py +0 -0
  62. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/experiences/__init__.py +0 -0
  63. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/experiences/experiences.py +0 -0
  64. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/experiences/properties.py +0 -0
  65. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/experiences/types/__init__.py +0 -0
  66. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/experiences/types/limit_config.py +0 -0
  67. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/experiences/types/types.py +0 -0
  68. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/ingest.py +0 -0
  69. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/limits/__init__.py +0 -0
  70. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/limits/limits.py +0 -0
  71. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/limits/tags.py +0 -0
  72. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/requests/__init__.py +0 -0
  73. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/requests/properties.py +0 -0
  74. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/requests/requests.py +0 -0
  75. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/requests/result.py +0 -0
  76. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/use_cases/__init__.py +0 -0
  77. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/__init__.py +0 -0
  78. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/definitions.py +0 -0
  79. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/kpis.py +0 -0
  80. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/limit_config.py +0 -0
  81. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/use_cases/definitions/version.py +0 -0
  82. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/use_cases/kpis.py +0 -0
  83. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/use_cases/properties.py +0 -0
  84. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/resources/use_cases/use_cases.py +0 -0
  85. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/__init__.py +0 -0
  86. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/bulk_ingest_response.py +0 -0
  87. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/categories/__init__.py +0 -0
  88. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/categories/fixed_cost_resource_create_params.py +0 -0
  89. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/categories/resource_create_params.py +0 -0
  90. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/categories/resource_list_params.py +0 -0
  91. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/category_delete_resource_response.py +0 -0
  92. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/category_delete_response.py +0 -0
  93. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/category_list_params.py +0 -0
  94. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/category_list_resources_params.py +0 -0
  95. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/category_resource_response.py +0 -0
  96. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/category_response.py +0 -0
  97. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/cost_data.py +0 -0
  98. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/cost_details.py +0 -0
  99. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/default_response.py +0 -0
  100. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/experience_instance_response.py +0 -0
  101. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/experiences/__init__.py +0 -0
  102. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/experiences/experience_type.py +0 -0
  103. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/experiences/property_create_params.py +0 -0
  104. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/experiences/type_create_params.py +0 -0
  105. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/experiences/type_list_params.py +0 -0
  106. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/experiences/type_update_params.py +0 -0
  107. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/experiences/types/__init__.py +0 -0
  108. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/experiences/types/limit_config_create_params.py +0 -0
  109. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/ingest_bulk_params.py +0 -0
  110. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/ingest_event_param.py +0 -0
  111. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/ingest_response.py +0 -0
  112. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/ingest_units_params.py +0 -0
  113. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limit_create_params.py +0 -0
  114. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limit_history_response.py +0 -0
  115. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limit_list_params.py +0 -0
  116. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limit_list_response.py +0 -0
  117. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limit_reset_params.py +0 -0
  118. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limit_response.py +0 -0
  119. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limit_update_params.py +0 -0
  120. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/__init__.py +0 -0
  121. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/limit_tags.py +0 -0
  122. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/tag_create_params.py +0 -0
  123. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/tag_create_response.py +0 -0
  124. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/tag_delete_response.py +0 -0
  125. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/tag_list_response.py +0 -0
  126. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/tag_remove_params.py +0 -0
  127. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/tag_remove_response.py +0 -0
  128. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/tag_update_params.py +0 -0
  129. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/limits/tag_update_response.py +0 -0
  130. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/pay_i_common_models_api_router_header_info_param.py +0 -0
  131. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/requests/__init__.py +0 -0
  132. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/requests/property_create_params.py +0 -0
  133. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/requests/request_result.py +0 -0
  134. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/requests_data.py +0 -0
  135. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared/__init__.py +0 -0
  136. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared/evaluation_response.py +0 -0
  137. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared/ingest_units.py +0 -0
  138. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared/pay_i_common_models_budget_management_cost_details_base.py +0 -0
  139. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared/pay_i_common_models_budget_management_create_limit_base.py +0 -0
  140. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared/properties_response.py +0 -0
  141. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared/xproxy_error.py +0 -0
  142. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared/xproxy_result.py +0 -0
  143. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared_params/__init__.py +0 -0
  144. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared_params/ingest_units.py +0 -0
  145. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/shared_params/pay_i_common_models_budget_management_create_limit_base.py +0 -0
  146. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/total_cost_data.py +0 -0
  147. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_case_instance_response.py +0 -0
  148. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/__init__.py +0 -0
  149. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definition_create_params.py +0 -0
  150. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definition_list_params.py +0 -0
  151. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definition_update_params.py +0 -0
  152. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/__init__.py +0 -0
  153. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_create_params.py +0 -0
  154. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_create_response.py +0 -0
  155. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_delete_response.py +0 -0
  156. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_list_params.py +0 -0
  157. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_list_response.py +0 -0
  158. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_retrieve_response.py +0 -0
  159. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_update_params.py +0 -0
  160. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/kpi_update_response.py +0 -0
  161. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/definitions/limit_config_create_params.py +0 -0
  162. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/kpi_create_params.py +0 -0
  163. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/kpi_list_params.py +0 -0
  164. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/kpi_list_response.py +0 -0
  165. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/kpi_update_params.py +0 -0
  166. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/property_create_params.py +0 -0
  167. {payi-0.1.0a85 → payi-0.1.0a87}/src/payi/types/use_cases/use_case_definition.py +0 -0
  168. {payi-0.1.0a85 → payi-0.1.0a87}/tests/__init__.py +0 -0
  169. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/__init__.py +0 -0
  170. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/categories/__init__.py +0 -0
  171. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/categories/test_fixed_cost_resources.py +0 -0
  172. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/categories/test_resources.py +0 -0
  173. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/experiences/__init__.py +0 -0
  174. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/experiences/test_properties.py +0 -0
  175. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/experiences/test_types.py +0 -0
  176. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/experiences/types/__init__.py +0 -0
  177. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/experiences/types/test_limit_config.py +0 -0
  178. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/limits/__init__.py +0 -0
  179. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/limits/test_tags.py +0 -0
  180. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/requests/__init__.py +0 -0
  181. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/requests/test_properties.py +0 -0
  182. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/requests/test_result.py +0 -0
  183. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/test_categories.py +0 -0
  184. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/test_experiences.py +0 -0
  185. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/test_ingest.py +0 -0
  186. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/test_limits.py +0 -0
  187. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/test_use_cases.py +0 -0
  188. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/use_cases/__init__.py +0 -0
  189. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/use_cases/definitions/__init__.py +0 -0
  190. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/use_cases/definitions/test_kpis.py +0 -0
  191. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/use_cases/definitions/test_limit_config.py +0 -0
  192. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/use_cases/definitions/test_version.py +0 -0
  193. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/use_cases/test_definitions.py +0 -0
  194. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/use_cases/test_kpis.py +0 -0
  195. {payi-0.1.0a85 → payi-0.1.0a87}/tests/api_resources/use_cases/test_properties.py +0 -0
  196. {payi-0.1.0a85 → payi-0.1.0a87}/tests/conftest.py +0 -0
  197. {payi-0.1.0a85 → payi-0.1.0a87}/tests/sample_file.txt +0 -0
  198. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_client.py +0 -0
  199. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_deepcopy.py +0 -0
  200. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_extract_files.py +0 -0
  201. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_files.py +0 -0
  202. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_models.py +0 -0
  203. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_qs.py +0 -0
  204. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_required_args.py +0 -0
  205. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_response.py +0 -0
  206. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_streaming.py +0 -0
  207. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_transform.py +0 -0
  208. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_utils/test_proxy.py +0 -0
  209. {payi-0.1.0a85 → payi-0.1.0a87}/tests/test_utils/test_typing.py +0 -0
  210. {payi-0.1.0a85 → payi-0.1.0a87}/tests/utils.py +0 -0
@@ -0,0 +1,3 @@
1
+ {
2
+ ".": "0.1.0-alpha.87"
3
+ }
@@ -1,5 +1,21 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.1.0-alpha.87 (2025-06-16)
4
+
5
+ Full Changelog: [v0.1.0-alpha.86...v0.1.0-alpha.87](https://github.com/Pay-i/pay-i-python/compare/v0.1.0-alpha.86...v0.1.0-alpha.87)
6
+
7
+ ### Features
8
+
9
+ * capture function calling ([#324](https://github.com/Pay-i/pay-i-python/issues/324)) ([5e89cb8](https://github.com/Pay-i/pay-i-python/commit/5e89cb8f8526cd8c9053683e40a9d6c9a1773742))
10
+
11
+ ## 0.1.0-alpha.86 (2025-06-13)
12
+
13
+ Full Changelog: [v0.1.0-alpha.85...v0.1.0-alpha.86](https://github.com/Pay-i/pay-i-python/compare/v0.1.0-alpha.85...v0.1.0-alpha.86)
14
+
15
+ ### Bug Fixes
16
+
17
+ * anthopic logging ([#322](https://github.com/Pay-i/pay-i-python/issues/322)) ([1af515b](https://github.com/Pay-i/pay-i-python/commit/1af515b9603058cf23b75e477fbb67e7acd405ad))
18
+
3
19
  ## 0.1.0-alpha.85 (2025-06-12)
4
20
 
5
21
  Full Changelog: [v0.1.0-alpha.84...v0.1.0-alpha.85](https://github.com/Pay-i/pay-i-python/compare/v0.1.0-alpha.84...v0.1.0-alpha.85)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: payi
3
- Version: 0.1.0a85
3
+ Version: 0.1.0a87
4
4
  Summary: The official Python library for the payi API
5
5
  Project-URL: Homepage, https://github.com/Pay-i/pay-i-python
6
6
  Project-URL: Repository, https://github.com/Pay-i/pay-i-python
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "payi"
3
- version = "0.1.0-alpha.85"
3
+ version = "0.1.0-alpha.87"
4
4
  description = "The official Python library for the payi API"
5
5
  dynamic = ["readme"]
6
6
  license = "Apache-2.0"
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "payi"
4
- __version__ = "0.1.0-alpha.85" # x-release-please-version
4
+ __version__ = "0.1.0-alpha.87" # x-release-please-version
@@ -169,6 +169,8 @@ class _AnthropicProviderRequest(_ProviderRequest):
169
169
  def process_request(self, instance: Any, extra_headers: 'dict[str, str]', args: Sequence[Any], kwargs: Any) -> bool:
170
170
  self._ingest["resource"] = ("anthropic." if self._is_vertex else "") + kwargs.get("model", "")
171
171
 
172
+ self._instrumentor._logger.debug(f"Processing anthropic request: model {self._ingest['resource']}, category {self._category}")
173
+
172
174
  messages = kwargs.get("messages")
173
175
  if messages:
174
176
  anthropic_has_image_and_get_texts(self, messages)
@@ -225,6 +227,20 @@ def anthropic_process_synchronous_response(request: _ProviderRequest, response:
225
227
 
226
228
  units["text"] = Units(input=input, output=output)
227
229
 
230
+ content = response.get('content', [])
231
+ if content:
232
+ for c in content:
233
+ if c.get("type", "") != "tool_use":
234
+ continue
235
+ name = c.get("name", "")
236
+ input = c.get("input", "")
237
+ arguments: Optional[str] = None
238
+ if input and isinstance(input, dict):
239
+ arguments = json.dumps(input, ensure_ascii=False)
240
+
241
+ if name and arguments:
242
+ request.add_synchronous_function_call(name=name, arguments=arguments)
243
+
228
244
  if log_prompt_and_response:
229
245
  request._ingest["provider_response_json"] = json.dumps(response)
230
246
 
@@ -243,6 +259,11 @@ def anthropic_process_chunk(request: _ProviderRequest, chunk: 'dict[str, Any]',
243
259
  if assign_id:
244
260
  request._ingest["provider_response_id"] = message.get('id', None)
245
261
 
262
+ model = message.get('model', None)
263
+ if model:
264
+ request._instrumentor._logger.debug(f"Anthropic streaming, reported model: {model}, instrumented model {request._ingest['resource']}")
265
+
266
+
246
267
  usage = message['usage']
247
268
  units = request._ingest["units"]
248
269
 
@@ -258,6 +279,8 @@ def anthropic_process_chunk(request: _ProviderRequest, chunk: 'dict[str, Any]',
258
279
  if text_cache_read > 0:
259
280
  units["text_cache_read"] = Units(input=text_cache_read, output=0)
260
281
 
282
+ request._instrumentor._logger.debug(f"Anthropic streaming captured {input} input tokens, ")
283
+
261
284
  elif type == "message_delta":
262
285
  usage = chunk.get('usage', {})
263
286
  ingest = True
@@ -265,12 +288,42 @@ def anthropic_process_chunk(request: _ProviderRequest, chunk: 'dict[str, Any]',
265
288
  # Web search will return an updated input tokens value at the end of streaming
266
289
  input_tokens = usage.get('input_tokens', None)
267
290
  if input_tokens is not None:
291
+ request._instrumentor._logger.debug(f"Anthropic streaming finished, updated input tokens: {input_tokens}")
268
292
  request._ingest["units"]["text"]["input"] = input_tokens
269
293
 
270
294
  request._ingest["units"]["text"]["output"] = usage.get('output_tokens', 0)
271
-
272
- return _ChunkResult(send_chunk_to_caller=True, ingest=ingest)
273
295
 
296
+ request._instrumentor._logger.debug(f"Anthropic streaming finished: output tokens {usage.get('output_tokens', 0)} ")
297
+
298
+ elif type == "content_block_start":
299
+ request._building_function_response = False
300
+
301
+ content_block = chunk.get('content_block', {})
302
+ if content_block and content_block.get('type', "") == "tool_use":
303
+ index = chunk.get('index', None)
304
+ name = content_block.get('name', "")
305
+
306
+ if index and isinstance(index, int) and name:
307
+ request._building_function_response = True
308
+ request.add_streaming_function_call(index=index, name=name, arguments=None)
309
+
310
+ elif type == "content_block_delta":
311
+ if request._building_function_response:
312
+ delta = chunk.get("delta", {})
313
+ type = delta.get("type", "")
314
+ partial_json = delta.get("partial_json", "")
315
+ index = chunk.get('index', None)
316
+
317
+ if index and isinstance(index, int) and type == "input_json_delta" and partial_json:
318
+ request.add_streaming_function_call(index=index, name=None, arguments=partial_json)
319
+
320
+ elif type == "content_block_stop":
321
+ request._building_function_response = False
322
+
323
+ else:
324
+ request._instrumentor._logger.debug(f"Anthropic streaming chunk: {type}")
325
+
326
+ return _ChunkResult(send_chunk_to_caller=True, ingest=ingest)
274
327
 
275
328
  def anthropic_has_image_and_get_texts(request: _ProviderRequest, messages: Any) -> None:
276
329
  estimated_token_count = 0
@@ -288,7 +341,7 @@ def anthropic_has_image_and_get_texts(request: _ProviderRequest, messages: Any)
288
341
  request._estimated_prompt_tokens = estimated_token_count
289
342
 
290
343
  except Exception:
291
- request._instrumentor._logger.warning("Error getting encoding for cl100k_base")
344
+ request._instrumentor._logger.info("Anthropic skipping vision token calc, could not load cl100k_base")
292
345
 
293
346
  def has_image_and_get_texts(encoding: tiktoken.Encoding, content: Union[str, 'list[Any]']) -> 'tuple[bool, int]':
294
347
  if isinstance(content, list): # type: ignore
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  import json
3
- from typing import Any, Sequence
3
+ from typing import Any, Optional, Sequence
4
4
  from functools import wraps
5
5
  from typing_extensions import override
6
6
 
@@ -132,15 +132,32 @@ class InvokeResponseWrapper(ObjectProxy): # type: ignore
132
132
  log_prompt_and_response=False, # will evaluate logging later
133
133
  assign_id=False)
134
134
 
135
- elif resource.startswith("meta.llama3"):
136
- input = response['prompt_token_count']
137
- output = response['generation_token_count']
135
+ elif self._request._is_meta:
136
+ input = response.get('prompt_token_count', 0)
137
+ output = response.get('generation_token_count', 0)
138
138
  units["text"] = Units(input=input, output=output)
139
139
 
140
+ elif self._request._is_nova:
141
+ usage = response.get("usage", {})
142
+
143
+ input = usage.get("inputTokens", 0)
144
+ output = usage.get("outputTokens", 0)
145
+ units["text"] = Units(input=input, output=output)
146
+
147
+ text_cache_read = usage.get("cacheReadInputTokenCount", None)
148
+ if text_cache_read:
149
+ units["text_cache_read"] = text_cache_read
150
+
151
+ text_cache_write = usage.get("cacheWriteInputTokenCount", None)
152
+ if text_cache_write:
153
+ units["text_cache_write"] = text_cache_write
154
+
155
+ bedrock_converse_process_synchronous_function_call(self._request, response)
156
+
140
157
  if self._log_prompt_and_response:
141
158
  ingest["provider_response_json"] = data.decode('utf-8') # type: ignore
142
159
 
143
- self._request._instrumentor._ingest_units(ingest)
160
+ self._request._instrumentor._ingest_units(self._request)
144
161
 
145
162
  return data # type: ignore
146
163
 
@@ -260,6 +277,8 @@ class _BedrockInvokeProviderRequest(_BedrockProviderRequest):
260
277
  def __init__(self, instrumentor: _PayiInstrumentor, model_id: str):
261
278
  super().__init__(instrumentor=instrumentor)
262
279
  self._is_anthropic: bool = 'anthropic' in model_id
280
+ self._is_nova: bool = 'nova' in model_id
281
+ self._is_meta: bool = 'meta' in model_id
263
282
 
264
283
  @override
265
284
  def process_request(self, instance: Any, extra_headers: 'dict[str, str]', args: Sequence[Any], kwargs: Any) -> bool:
@@ -280,24 +299,35 @@ class _BedrockInvokeProviderRequest(_BedrockProviderRequest):
280
299
 
281
300
  @override
282
301
  def process_chunk(self, chunk: Any) -> _ChunkResult:
283
- if self._is_anthropic:
284
- return self.process_invoke_streaming_anthropic_chunk(chunk)
285
- else:
286
- return self.process_invoke_streaming_llama_chunk(chunk)
302
+ chunk_dict = json.loads(chunk)
287
303
 
288
- def process_invoke_streaming_anthropic_chunk(self, chunk: str) -> _ChunkResult:
289
- from .AnthropicInstrumentor import anthropic_process_chunk
304
+ if self._is_anthropic:
305
+ from .AnthropicInstrumentor import anthropic_process_chunk
306
+ return anthropic_process_chunk(self, chunk_dict, assign_id=False)
290
307
 
291
- return anthropic_process_chunk(self, json.loads(chunk), assign_id=False)
308
+ if self._is_nova:
309
+ bedrock_converse_process_streaming_for_function_call(self, chunk_dict)
310
+
311
+ # meta and nova
312
+ return self.process_invoke_other_provider_chunk(chunk_dict)
292
313
 
293
- def process_invoke_streaming_llama_chunk(self, chunk: str) -> _ChunkResult:
314
+ def process_invoke_other_provider_chunk(self, chunk_dict: 'dict[str, Any]') -> _ChunkResult:
294
315
  ingest = False
295
- chunk_dict = json.loads(chunk)
316
+
296
317
  metrics = chunk_dict.get("amazon-bedrock-invocationMetrics", {})
297
318
  if metrics:
298
319
  input = metrics.get("inputTokenCount", 0)
299
320
  output = metrics.get("outputTokenCount", 0)
300
321
  self._ingest["units"]["text"] = Units(input=input, output=output)
322
+
323
+ text_cache_read = metrics.get("cacheReadInputTokenCount", None)
324
+ if text_cache_read:
325
+ self._ingest["units"]["text_cache_read"] = text_cache_read
326
+
327
+ text_cache_write = metrics.get("cacheWriteInputTokenCount", None)
328
+ if text_cache_write:
329
+ self._ingest["units"]["text_cache_write"] = text_cache_write
330
+
301
331
  ingest = True
302
332
 
303
333
  return _ChunkResult(send_chunk_to_caller=True, ingest=ingest)
@@ -356,7 +386,9 @@ class _BedrockConverseProviderRequest(_BedrockProviderRequest):
356
386
  response_without_metadata.pop("ResponseMetadata", None)
357
387
  self._ingest["provider_response_json"] = json.dumps(response_without_metadata)
358
388
 
359
- return None
389
+ bedrock_converse_process_synchronous_function_call(self, response)
390
+
391
+ return None
360
392
 
361
393
  @override
362
394
  def process_chunk(self, chunk: 'dict[str, Any]') -> _ChunkResult:
@@ -371,4 +403,46 @@ class _BedrockConverseProviderRequest(_BedrockProviderRequest):
371
403
 
372
404
  ingest = True
373
405
 
374
- return _ChunkResult(send_chunk_to_caller=True, ingest=ingest)
406
+ bedrock_converse_process_streaming_for_function_call(self, chunk)
407
+
408
+ return _ChunkResult(send_chunk_to_caller=True, ingest=ingest)
409
+
410
+ def bedrock_converse_process_streaming_for_function_call(request: _ProviderRequest, chunk: 'dict[str, Any]') -> None:
411
+ contentBlockStart = chunk.get("contentBlockStart", {})
412
+ tool_use = contentBlockStart.get("start", {}).get("toolUse", {})
413
+ if tool_use:
414
+ index = contentBlockStart.get("contentBlockIndex", None)
415
+ name = tool_use.get("name", "")
416
+
417
+ if name and index is not None:
418
+ request.add_streaming_function_call(index=index, name=name, arguments=None)
419
+
420
+ return
421
+
422
+ contentBlockDelta = chunk.get("contentBlockDelta", {})
423
+ tool_use = contentBlockDelta.get("delta", {}).get("toolUse", {})
424
+ if tool_use:
425
+ index = contentBlockDelta.get("contentBlockIndex", None)
426
+ input = tool_use.get("input", "")
427
+
428
+ if input and index is not None:
429
+ request.add_streaming_function_call(index=index, name=None, arguments=input)
430
+
431
+ return
432
+
433
+ def bedrock_converse_process_synchronous_function_call(request: _ProviderRequest, response: 'dict[str, Any]') -> None:
434
+ content = response.get("output", {}).get("message", {}).get("content", [])
435
+ if content:
436
+ for item in content:
437
+ tool_use = item.get("toolUse", {})
438
+ if tool_use:
439
+ name = tool_use.get("name", "")
440
+ input = tool_use.get("input", {})
441
+ arguments: Optional[str] = None
442
+
443
+ if input and isinstance(input, dict):
444
+ arguments = json.dumps(input)
445
+
446
+ if name:
447
+ request.add_synchronous_function_call(name=name, arguments=arguments)
448
+
@@ -1,12 +1,10 @@
1
- import json
2
- from typing import Any, List, Union, Optional, Sequence
1
+ from typing import Any, List, Union, Sequence
3
2
  from typing_extensions import override
4
3
 
5
4
  from wrapt import wrap_function_wrapper # type: ignore
6
5
 
7
- from payi.lib.helpers import PayiCategories
8
-
9
- from .instrument import _ChunkResult, _IsStreaming, _StreamingType, _ProviderRequest, _PayiInstrumentor
6
+ from .instrument import _ChunkResult, _IsStreaming, _PayiInstrumentor
7
+ from .VertexRequest import _VertexRequest
10
8
 
11
9
 
12
10
  class GoogleGenAiInstrumentor:
@@ -113,13 +111,10 @@ async def agenerate_stream_wrapper(
113
111
  kwargs,
114
112
  )
115
113
 
116
- class _GoogleGenAiRequest(_ProviderRequest):
114
+ class _GoogleGenAiRequest(_VertexRequest):
117
115
  def __init__(self, instrumentor: _PayiInstrumentor):
118
116
  super().__init__(
119
117
  instrumentor=instrumentor,
120
- category=PayiCategories.google_vertex,
121
- streaming_type=_StreamingType.generator,
122
- is_google_vertex_or_genai_client=True,
123
118
  )
124
119
  self._prompt_character_count = 0
125
120
  self._candidates_character_count = 0
@@ -154,8 +149,6 @@ class _GoogleGenAiRequest(_ProviderRequest):
154
149
  if isinstance(value, list):
155
150
  items = value # type: ignore
156
151
 
157
- from .VertexInstrumentor import count_chars_skip_spaces
158
-
159
152
  for item in items: # type: ignore
160
153
  text = ""
161
154
  if isinstance(item, Part):
@@ -166,8 +159,8 @@ class _GoogleGenAiRequest(_ProviderRequest):
166
159
  text = item
167
160
 
168
161
  if text != "":
169
- self._prompt_character_count += count_chars_skip_spaces(text) # type: ignore
170
-
162
+ self._prompt_character_count += self.count_chars_skip_spaces(text) # type: ignore
163
+
171
164
  return True
172
165
 
173
166
  @override
@@ -247,65 +240,21 @@ class _GoogleGenAiRequest(_ProviderRequest):
247
240
 
248
241
  @override
249
242
  def process_chunk(self, chunk: Any) -> _ChunkResult:
250
- from .VertexInstrumentor import vertex_compute_usage, count_chars_skip_spaces
251
-
252
- ingest = False
253
243
  response_dict: dict[str, Any] = chunk.to_json_dict()
254
- if "provider_response_id" not in self._ingest:
255
- id = response_dict.get("response_id", None)
256
- if id:
257
- self._ingest["provider_response_id"] = id
258
244
 
259
245
  model: str = response_dict.get("model_version", "")
260
-
261
246
  self._ingest["resource"] = "google." + model
262
247
 
248
+ return self.process_chunk_dict(response_dict=response_dict)
263
249
 
264
- for candidate in response_dict.get("candidates", []):
265
- parts = candidate.get("content", {}).get("parts", [])
266
- for part in parts:
267
- self._candidates_character_count += count_chars_skip_spaces(part.get("text", ""))
268
-
269
- usage = response_dict.get("usage_metadata", {})
270
- if usage and "prompt_token_count" in usage and "candidates_token_count" in usage:
271
- vertex_compute_usage(
272
- request=self,
273
- model=model,
274
- response_dict=response_dict,
275
- prompt_character_count=self._prompt_character_count,
276
- streaming_candidates_characters=self._candidates_character_count
277
- )
278
- ingest = True
279
-
280
- return _ChunkResult(send_chunk_to_caller=True, ingest=ingest)
281
-
282
250
  @override
283
251
  def process_synchronous_response(
284
252
  self,
285
253
  response: Any,
286
254
  log_prompt_and_response: bool,
287
255
  kwargs: Any) -> Any:
288
- response_dict = response.to_json_dict()
289
-
290
- from .VertexInstrumentor import vertex_compute_usage
291
-
292
- id: Optional[str] = response_dict.get("response_id", None)
293
- if id:
294
- self._ingest["provider_response_id"] = id
295
-
296
- model: Optional[str] = response_dict.get("model_version", None)
297
- if model:
298
- self._ingest["resource"] = "google." + model
299
-
300
- vertex_compute_usage(
301
- request=self,
302
- model=model,
303
- response_dict=response_dict,
304
- prompt_character_count=self._prompt_character_count,
305
- streaming_candidates_characters=self._candidates_character_count
306
- )
307
-
308
- if log_prompt_and_response:
309
- self._ingest["provider_response_json"] = [json.dumps(response_dict)]
310
256
 
311
- return None
257
+ return self.vertex_process_synchronous_response(
258
+ response_dict=response.to_json_dict(),
259
+ log_prompt_and_response=log_prompt_and_response,
260
+ )
@@ -349,6 +349,19 @@ class _OpenAiChatProviderRequest(_OpenAiProviderRequest):
349
349
 
350
350
  send_chunk_to_client = True
351
351
 
352
+ choices = model.get("choices", [])
353
+ if choices:
354
+ for choice in choices:
355
+ function = choice.get("delta", {}).get("function_call", {})
356
+ index = choice.get("index", None)
357
+
358
+ if function and index is not None:
359
+ name = function.get("name", None)
360
+ arguments = function.get("arguments", None)
361
+
362
+ if name or arguments:
363
+ self.add_streaming_function_call(index=index, name=name, arguments=arguments)
364
+
352
365
  usage = model.get("usage")
353
366
  if usage:
354
367
  self.add_usage_units(usage)
@@ -379,7 +392,7 @@ class _OpenAiChatProviderRequest(_OpenAiProviderRequest):
379
392
  try:
380
393
  enc = tiktoken.get_encoding("o200k_base") # type: ignore
381
394
  except Exception:
382
- self._instrumentor._logger.warning("Error getting encoding for fallback o200k_base")
395
+ self._instrumentor._logger.info("OpenAI skipping vision token calc, could not load o200k_base")
383
396
  enc = None
384
397
 
385
398
  if enc:
@@ -411,6 +424,22 @@ class _OpenAiChatProviderRequest(_OpenAiProviderRequest):
411
424
  response: Any,
412
425
  log_prompt_and_response: bool,
413
426
  kwargs: Any) -> Any:
427
+
428
+ response_dict = model_to_dict(response)
429
+ choices = response_dict.get("choices", [])
430
+ if choices:
431
+ for choice in choices:
432
+ function = choice.get("message", {}).get("function_call", {})
433
+
434
+ if not function:
435
+ continue
436
+
437
+ name = function.get("name", None)
438
+ arguments = function.get("arguments", None)
439
+
440
+ if name:
441
+ self.add_synchronous_function_call(name=name, arguments=arguments)
442
+
414
443
  return self.process_synchronous_response_worker(response, log_prompt_and_response)
415
444
 
416
445
  class _OpenAiResponsesProviderRequest(_OpenAiProviderRequest):
@@ -432,6 +461,16 @@ class _OpenAiResponsesProviderRequest(_OpenAiProviderRequest):
432
461
  if response_id:
433
462
  self._ingest["provider_response_id"] = response_id
434
463
 
464
+ type = model.get("type", "")
465
+ if type and type == "response.output_item.done":
466
+ item = model.get("item", {})
467
+ if item and item.get("type", "") == "function_call":
468
+ name = item.get("name", None)
469
+ arguments = item.get("arguments", None)
470
+
471
+ if name:
472
+ self.add_synchronous_function_call(name=name, arguments=arguments)
473
+
435
474
  usage = response.get("usage")
436
475
  if usage:
437
476
  self.add_usage_units(usage)
@@ -459,7 +498,7 @@ class _OpenAiResponsesProviderRequest(_OpenAiProviderRequest):
459
498
  try:
460
499
  enc = tiktoken.get_encoding("o200k_base") # type: ignore
461
500
  except Exception:
462
- self._instrumentor._logger.warning("Error getting encoding for fallback o200k_base")
501
+ self._instrumentor._logger.info("OpenAI skipping vision token calc, could not load o200k_base")
463
502
  enc = None
464
503
 
465
504
  # find each content..type="input_text" and count tokens
@@ -498,6 +537,21 @@ class _OpenAiResponsesProviderRequest(_OpenAiProviderRequest):
498
537
  response: Any,
499
538
  log_prompt_and_response: bool,
500
539
  kwargs: Any) -> Any:
540
+
541
+ response_dict = model_to_dict(response)
542
+ output = response_dict.get("output", [])
543
+ if output:
544
+ for o in output:
545
+ type = o.get("type", "")
546
+ if type != "function_call":
547
+ continue
548
+
549
+ name = o.get("name", None)
550
+ arguments = o.get("arguments", None)
551
+
552
+ if name:
553
+ self.add_synchronous_function_call(name=name, arguments=arguments)
554
+
501
555
  return self.process_synchronous_response_worker(response, log_prompt_and_response)
502
556
 
503
557
  def model_to_dict(model: Any) -> Any: