vellum-ai 0.3.10__tar.gz → 0.3.12__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (324) hide show
  1. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/PKG-INFO +1 -1
  2. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/pyproject.toml +1 -1
  3. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/__init__.py +38 -0
  4. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/client.py +14 -14
  5. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/core/client_wrapper.py +1 -1
  6. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/deployments/client.py +30 -0
  7. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/document_indexes/client.py +282 -0
  8. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/__init__.py +40 -0
  9. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/array_variable_value_item.py +11 -0
  10. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/chat_message.py +3 -0
  11. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/chat_message_request.py +3 -0
  12. vellum_ai-0.3.12/src/vellum/types/execution_array_vellum_value.py +31 -0
  13. vellum_ai-0.3.12/src/vellum/types/execution_chat_history_vellum_value.py +31 -0
  14. vellum_ai-0.3.12/src/vellum/types/execution_error_vellum_value.py +31 -0
  15. vellum_ai-0.3.12/src/vellum/types/execution_function_call_vellum_value.py +31 -0
  16. vellum_ai-0.3.12/src/vellum/types/execution_json_vellum_value.py +30 -0
  17. vellum_ai-0.3.12/src/vellum/types/execution_number_vellum_value.py +30 -0
  18. vellum_ai-0.3.12/src/vellum/types/execution_search_results_vellum_value.py +31 -0
  19. vellum_ai-0.3.12/src/vellum/types/execution_string_vellum_value.py +30 -0
  20. vellum_ai-0.3.12/src/vellum/types/execution_vellum_value.py +100 -0
  21. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_workflow_node_result_event.py +2 -0
  22. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_request.py +1 -1
  23. vellum_ai-0.3.12/src/vellum/types/image_variable_value.py +33 -0
  24. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/initiated_workflow_node_result_event.py +1 -0
  25. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_properties.py +1 -0
  26. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_properties_request.py +1 -0
  27. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/rejected_workflow_node_result_event.py +1 -0
  28. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/streaming_workflow_node_result_event.py +1 -0
  29. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_event_error_code.py +5 -0
  30. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_event_type.py +2 -2
  31. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event.py +2 -0
  32. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/LICENSE +0 -0
  33. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/README.md +0 -0
  34. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/core/__init__.py +0 -0
  35. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/core/api_error.py +0 -0
  36. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/core/datetime_utils.py +0 -0
  37. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/core/jsonable_encoder.py +0 -0
  38. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/core/remove_none_from_dict.py +0 -0
  39. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/environment.py +0 -0
  40. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/errors/__init__.py +0 -0
  41. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/errors/bad_request_error.py +0 -0
  42. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/errors/conflict_error.py +0 -0
  43. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/errors/forbidden_error.py +0 -0
  44. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/errors/internal_server_error.py +0 -0
  45. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/errors/not_found_error.py +0 -0
  46. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/py.typed +0 -0
  47. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/__init__.py +0 -0
  48. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/deployments/__init__.py +0 -0
  49. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/deployments/types/__init__.py +0 -0
  50. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/deployments/types/deployments_list_request_status.py +0 -0
  51. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/document_indexes/__init__.py +0 -0
  52. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/document_indexes/types/__init__.py +0 -0
  53. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/document_indexes/types/document_indexes_list_request_status.py +0 -0
  54. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/documents/__init__.py +0 -0
  55. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/documents/client.py +0 -0
  56. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/folder_entities/__init__.py +0 -0
  57. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/folder_entities/client.py +0 -0
  58. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/model_versions/__init__.py +0 -0
  59. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/model_versions/client.py +0 -0
  60. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/registered_prompts/__init__.py +0 -0
  61. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/registered_prompts/client.py +0 -0
  62. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/sandboxes/__init__.py +0 -0
  63. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/sandboxes/client.py +0 -0
  64. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/test_suites/__init__.py +0 -0
  65. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/test_suites/client.py +0 -0
  66. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/workflow_deployments/__init__.py +0 -0
  67. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/workflow_deployments/client.py +0 -0
  68. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/workflow_deployments/types/__init__.py +0 -0
  69. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/resources/workflow_deployments/types/workflow_deployments_list_request_status.py +0 -0
  70. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/api_node_result.py +0 -0
  71. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/api_node_result_data.py +0 -0
  72. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content.py +0 -0
  73. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content_item.py +0 -0
  74. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content_item_request.py +0 -0
  75. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/array_chat_message_content_request.py +0 -0
  76. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/array_enum.py +0 -0
  77. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/block_type_enum.py +0 -0
  78. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/chat_history_enum.py +0 -0
  79. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/chat_history_input_request.py +0 -0
  80. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/chat_history_variable_value.py +0 -0
  81. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/chat_message_content.py +0 -0
  82. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/chat_message_content_request.py +0 -0
  83. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/chat_message_role.py +0 -0
  84. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_chat_history_result.py +0 -0
  85. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_error_result.py +0 -0
  86. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_json_result.py +0 -0
  87. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_number_result.py +0 -0
  88. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_result.py +0 -0
  89. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_result_data.py +0 -0
  90. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_result_output.py +0 -0
  91. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_search_results_result.py +0 -0
  92. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/code_execution_node_string_result.py +0 -0
  93. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/conditional_node_result.py +0 -0
  94. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/conditional_node_result_data.py +0 -0
  95. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/deployment_provider_payload_response.py +0 -0
  96. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/deployment_read.py +0 -0
  97. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/document_document_to_document_index.py +0 -0
  98. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/document_index_read.py +0 -0
  99. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/document_read.py +0 -0
  100. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/document_status.py +0 -0
  101. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/enriched_normalized_completion.py +0 -0
  102. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/entity_status.py +0 -0
  103. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/environment_enum.py +0 -0
  104. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/error_enum.py +0 -0
  105. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/error_variable_value.py +0 -0
  106. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/execute_prompt_api_error_response.py +0 -0
  107. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/execute_prompt_event.py +0 -0
  108. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/execute_prompt_response.py +0 -0
  109. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/execute_workflow_error_response.py +0 -0
  110. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/execute_workflow_response.py +0 -0
  111. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/execute_workflow_stream_error_response.py +0 -0
  112. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/execute_workflow_workflow_result_event.py +0 -0
  113. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/finish_reason_enum.py +0 -0
  114. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_enum.py +0 -0
  115. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_execute_prompt_event.py +0 -0
  116. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_execute_prompt_response.py +0 -0
  117. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_execute_workflow_workflow_result_event.py +0 -0
  118. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_function_call.py +0 -0
  119. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/fulfilled_prompt_execution_meta.py +0 -0
  120. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/function_call.py +0 -0
  121. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/function_call_chat_message_content.py +0 -0
  122. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/function_call_chat_message_content_request.py +0 -0
  123. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/function_call_chat_message_content_value.py +0 -0
  124. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/function_call_chat_message_content_value_request.py +0 -0
  125. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/function_call_enum.py +0 -0
  126. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/function_call_variable_value.py +0 -0
  127. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_error_response.py +0 -0
  128. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_options_request.py +0 -0
  129. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_response.py +0 -0
  130. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_result.py +0 -0
  131. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_result_data.py +0 -0
  132. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_result_error.py +0 -0
  133. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_stream_response.py +0 -0
  134. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_stream_result.py +0 -0
  135. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/generate_stream_result_data.py +0 -0
  136. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/image_chat_message_content.py +0 -0
  137. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/image_chat_message_content_request.py +0 -0
  138. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/image_enum.py +0 -0
  139. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/indexing_state_enum.py +0 -0
  140. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/initiated_enum.py +0 -0
  141. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/initiated_execute_prompt_event.py +0 -0
  142. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/initiated_prompt_execution_meta.py +0 -0
  143. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/json_enum.py +0 -0
  144. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/json_input_request.py +0 -0
  145. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/json_variable_value.py +0 -0
  146. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/logical_operator.py +0 -0
  147. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/logprobs_enum.py +0 -0
  148. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/metadata_filter_config_request.py +0 -0
  149. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/metadata_filter_rule_combinator.py +0 -0
  150. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/metadata_filter_rule_request.py +0 -0
  151. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/model_version_build_config.py +0 -0
  152. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/model_version_exec_config.py +0 -0
  153. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/model_version_exec_config_parameters.py +0 -0
  154. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/model_version_read.py +0 -0
  155. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/model_version_read_status_enum.py +0 -0
  156. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/model_version_sandbox_snapshot.py +0 -0
  157. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_chat_history_variable_value_request.py +0 -0
  158. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_error_variable_value_request.py +0 -0
  159. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_json_variable_value_request.py +0 -0
  160. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_number_variable_value_request.py +0 -0
  161. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_search_results_variable_value_request.py +0 -0
  162. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_string_variable_value_request.py +0 -0
  163. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/named_test_case_variable_value_request.py +0 -0
  164. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_array_value.py +0 -0
  165. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_chat_history_value.py +0 -0
  166. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_error_value.py +0 -0
  167. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_json_value.py +0 -0
  168. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_number_value.py +0 -0
  169. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_search_results_value.py +0 -0
  170. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_input_compiled_string_value.py +0 -0
  171. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_input_variable_compiled_value.py +0 -0
  172. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_array_value.py +0 -0
  173. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_chat_history_value.py +0 -0
  174. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_error_value.py +0 -0
  175. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_function_value.py +0 -0
  176. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_json_value.py +0 -0
  177. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_number_value.py +0 -0
  178. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_search_results_value.py +0 -0
  179. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_string_value.py +0 -0
  180. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/node_output_compiled_value.py +0 -0
  181. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/normalized_log_probs.py +0 -0
  182. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/normalized_token_log_probs.py +0 -0
  183. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/number_enum.py +0 -0
  184. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/number_variable_value.py +0 -0
  185. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/paginated_document_index_read_list.py +0 -0
  186. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/paginated_slim_deployment_read_list.py +0 -0
  187. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/paginated_slim_document_list.py +0 -0
  188. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/paginated_slim_workflow_deployment_list.py +0 -0
  189. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/processing_failure_reason_enum.py +0 -0
  190. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/processing_state_enum.py +0 -0
  191. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_deployment_expand_meta_request_request.py +0 -0
  192. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_deployment_input_request.py +0 -0
  193. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_execution_meta.py +0 -0
  194. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_node_result.py +0 -0
  195. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_node_result_data.py +0 -0
  196. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_output.py +0 -0
  197. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block.py +0 -0
  198. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_data.py +0 -0
  199. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_data_request.py +0 -0
  200. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/prompt_template_block_request.py +0 -0
  201. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/provider_enum.py +0 -0
  202. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/raw_prompt_execution_overrides_request.py +0 -0
  203. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_error_response.py +0 -0
  204. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_model_parameters_request.py +0 -0
  205. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_prompt.py +0 -0
  206. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_prompt_info_request.py +0 -0
  207. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/register_prompt_response.py +0 -0
  208. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_deployment.py +0 -0
  209. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_input_variable_request.py +0 -0
  210. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_model_version.py +0 -0
  211. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_sandbox.py +0 -0
  212. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/registered_prompt_sandbox_snapshot.py +0 -0
  213. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/rejected_enum.py +0 -0
  214. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/rejected_execute_prompt_event.py +0 -0
  215. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/rejected_execute_prompt_response.py +0 -0
  216. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/rejected_execute_workflow_workflow_result_event.py +0 -0
  217. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/rejected_function_call.py +0 -0
  218. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/rejected_prompt_execution_meta.py +0 -0
  219. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/sandbox_scenario.py +0 -0
  220. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/scenario_input.py +0 -0
  221. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/scenario_input_request.py +0 -0
  222. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/scenario_input_type_enum.py +0 -0
  223. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_error_response.py +0 -0
  224. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_filters_request.py +0 -0
  225. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_node_result.py +0 -0
  226. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_node_result_data.py +0 -0
  227. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_request_options_request.py +0 -0
  228. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_response.py +0 -0
  229. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_result.py +0 -0
  230. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_result_document.py +0 -0
  231. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_result_document_request.py +0 -0
  232. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_result_merging_request.py +0 -0
  233. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_result_request.py +0 -0
  234. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_results_enum.py +0 -0
  235. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_results_variable_value.py +0 -0
  236. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/search_weights_request.py +0 -0
  237. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/slim_deployment_read.py +0 -0
  238. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/slim_document.py +0 -0
  239. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/slim_workflow_deployment.py +0 -0
  240. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/streaming_enum.py +0 -0
  241. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/streaming_execute_prompt_event.py +0 -0
  242. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/streaming_prompt_execution_meta.py +0 -0
  243. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/string_chat_message_content.py +0 -0
  244. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/string_chat_message_content_request.py +0 -0
  245. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/string_enum.py +0 -0
  246. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/string_input_request.py +0 -0
  247. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/string_variable_value.py +0 -0
  248. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/submit_completion_actual_request.py +0 -0
  249. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/submit_completion_actuals_error_response.py +0 -0
  250. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/submit_workflow_execution_actual_request.py +0 -0
  251. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/subworkflow_enum.py +0 -0
  252. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/subworkflow_node_result.py +0 -0
  253. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/templating_node_chat_history_result.py +0 -0
  254. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/templating_node_error_result.py +0 -0
  255. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/templating_node_json_result.py +0 -0
  256. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/templating_node_number_result.py +0 -0
  257. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/templating_node_result.py +0 -0
  258. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/templating_node_result_data.py +0 -0
  259. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/templating_node_result_output.py +0 -0
  260. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/templating_node_search_results_result.py +0 -0
  261. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/templating_node_string_result.py +0 -0
  262. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_array_result.py +0 -0
  263. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_chat_history_result.py +0 -0
  264. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_error_result.py +0 -0
  265. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_function_call_result.py +0 -0
  266. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_json_result.py +0 -0
  267. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_number_result.py +0 -0
  268. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_result.py +0 -0
  269. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_result_data.py +0 -0
  270. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_result_output.py +0 -0
  271. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_search_results_result.py +0 -0
  272. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/terminal_node_string_result.py +0 -0
  273. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/test_case_chat_history_variable_value.py +0 -0
  274. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/test_case_error_variable_value.py +0 -0
  275. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/test_case_json_variable_value.py +0 -0
  276. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/test_case_number_variable_value.py +0 -0
  277. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/test_case_search_results_variable_value.py +0 -0
  278. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/test_case_string_variable_value.py +0 -0
  279. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/test_case_variable_value.py +0 -0
  280. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/test_suite_test_case.py +0 -0
  281. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/upload_document_error_response.py +0 -0
  282. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/upload_document_response.py +0 -0
  283. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/vellum_error.py +0 -0
  284. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/vellum_error_code_enum.py +0 -0
  285. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/vellum_error_request.py +0 -0
  286. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/vellum_image.py +0 -0
  287. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/vellum_image_request.py +0 -0
  288. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/vellum_variable.py +0 -0
  289. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/vellum_variable_type.py +0 -0
  290. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_deployment_read.py +0 -0
  291. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_event_error.py +0 -0
  292. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_actual_chat_history_request.py +0 -0
  293. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_actual_json_request.py +0 -0
  294. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_actual_string_request.py +0 -0
  295. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_node_result_event.py +0 -0
  296. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_execution_workflow_result_event.py +0 -0
  297. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_node_result_data.py +0 -0
  298. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_node_result_event.py +0 -0
  299. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_node_result_event_state.py +0 -0
  300. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output.py +0 -0
  301. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_array.py +0 -0
  302. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_chat_history.py +0 -0
  303. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_error.py +0 -0
  304. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_function_call.py +0 -0
  305. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_image.py +0 -0
  306. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_json.py +0 -0
  307. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_number.py +0 -0
  308. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_search_results.py +0 -0
  309. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_output_string.py +0 -0
  310. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_chat_history_input_request.py +0 -0
  311. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_input_request.py +0 -0
  312. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_json_input_request.py +0 -0
  313. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_number_input_request.py +0 -0
  314. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_request_string_input_request.py +0 -0
  315. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data.py +0 -0
  316. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_array.py +0 -0
  317. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_chat_history.py +0 -0
  318. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_error.py +0 -0
  319. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_function_call.py +0 -0
  320. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_json.py +0 -0
  321. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_number.py +0 -0
  322. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_search_results.py +0 -0
  323. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_result_event_output_data_string.py +0 -0
  324. {vellum_ai-0.3.10 → vellum_ai-0.3.12}/src/vellum/types/workflow_stream_event.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.3.10
3
+ Version: 0.3.12
4
4
  Summary:
5
5
  Requires-Python: >=3.7,<4.0
6
6
  Classifier: Programming Language :: Python :: 3
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "vellum-ai"
3
- version = "0.3.10"
3
+ version = "0.3.12"
4
4
  description = ""
5
5
  readme = "README.md"
6
6
  authors = []
@@ -18,6 +18,7 @@ from .types import (
18
18
  ArrayVariableValueItem_ChatHistory,
19
19
  ArrayVariableValueItem_Error,
20
20
  ArrayVariableValueItem_FunctionCall,
21
+ ArrayVariableValueItem_Image,
21
22
  ArrayVariableValueItem_Json,
22
23
  ArrayVariableValueItem_Number,
23
24
  ArrayVariableValueItem_SearchResults,
@@ -82,6 +83,23 @@ from .types import (
82
83
  ExecuteWorkflowWorkflowResultEvent,
83
84
  ExecuteWorkflowWorkflowResultEvent_Fulfilled,
84
85
  ExecuteWorkflowWorkflowResultEvent_Rejected,
86
+ ExecutionArrayVellumValue,
87
+ ExecutionChatHistoryVellumValue,
88
+ ExecutionErrorVellumValue,
89
+ ExecutionFunctionCallVellumValue,
90
+ ExecutionJsonVellumValue,
91
+ ExecutionNumberVellumValue,
92
+ ExecutionSearchResultsVellumValue,
93
+ ExecutionStringVellumValue,
94
+ ExecutionVellumValue,
95
+ ExecutionVellumValue_Array,
96
+ ExecutionVellumValue_ChatHistory,
97
+ ExecutionVellumValue_Error,
98
+ ExecutionVellumValue_FunctionCall,
99
+ ExecutionVellumValue_Json,
100
+ ExecutionVellumValue_Number,
101
+ ExecutionVellumValue_SearchResults,
102
+ ExecutionVellumValue_String,
85
103
  FinishReasonEnum,
86
104
  FulfilledEnum,
87
105
  FulfilledExecutePromptEvent,
@@ -112,6 +130,7 @@ from .types import (
112
130
  ImageChatMessageContent,
113
131
  ImageChatMessageContentRequest,
114
132
  ImageEnum,
133
+ ImageVariableValue,
115
134
  IndexingStateEnum,
116
135
  InitiatedEnum,
117
136
  InitiatedExecutePromptEvent,
@@ -428,6 +447,7 @@ __all__ = [
428
447
  "ArrayVariableValueItem_ChatHistory",
429
448
  "ArrayVariableValueItem_Error",
430
449
  "ArrayVariableValueItem_FunctionCall",
450
+ "ArrayVariableValueItem_Image",
431
451
  "ArrayVariableValueItem_Json",
432
452
  "ArrayVariableValueItem_Number",
433
453
  "ArrayVariableValueItem_SearchResults",
@@ -496,6 +516,23 @@ __all__ = [
496
516
  "ExecuteWorkflowWorkflowResultEvent",
497
517
  "ExecuteWorkflowWorkflowResultEvent_Fulfilled",
498
518
  "ExecuteWorkflowWorkflowResultEvent_Rejected",
519
+ "ExecutionArrayVellumValue",
520
+ "ExecutionChatHistoryVellumValue",
521
+ "ExecutionErrorVellumValue",
522
+ "ExecutionFunctionCallVellumValue",
523
+ "ExecutionJsonVellumValue",
524
+ "ExecutionNumberVellumValue",
525
+ "ExecutionSearchResultsVellumValue",
526
+ "ExecutionStringVellumValue",
527
+ "ExecutionVellumValue",
528
+ "ExecutionVellumValue_Array",
529
+ "ExecutionVellumValue_ChatHistory",
530
+ "ExecutionVellumValue_Error",
531
+ "ExecutionVellumValue_FunctionCall",
532
+ "ExecutionVellumValue_Json",
533
+ "ExecutionVellumValue_Number",
534
+ "ExecutionVellumValue_SearchResults",
535
+ "ExecutionVellumValue_String",
499
536
  "FinishReasonEnum",
500
537
  "ForbiddenError",
501
538
  "FulfilledEnum",
@@ -527,6 +564,7 @@ __all__ = [
527
564
  "ImageChatMessageContent",
528
565
  "ImageChatMessageContentRequest",
529
566
  "ImageEnum",
567
+ "ImageVariableValue",
530
568
  "IndexingStateEnum",
531
569
  "InitiatedEnum",
532
570
  "InitiatedExecutePromptEvent",
@@ -233,25 +233,25 @@ class Vellum:
233
233
  def execute_workflow(
234
234
  self,
235
235
  *,
236
+ inputs: typing.List[WorkflowRequestInputRequest],
236
237
  workflow_deployment_id: typing.Optional[str] = OMIT,
237
238
  workflow_deployment_name: typing.Optional[str] = OMIT,
238
239
  release_tag: typing.Optional[str] = OMIT,
239
- inputs: typing.List[WorkflowRequestInputRequest],
240
240
  external_id: typing.Optional[str] = OMIT,
241
241
  ) -> ExecuteWorkflowResponse:
242
242
  """
243
243
  Executes a deployed Workflow and returns its outputs.
244
244
 
245
245
  Parameters:
246
+ - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
247
+
246
248
  - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
247
249
 
248
250
  - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
249
251
 
250
252
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
251
253
 
252
- - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
253
-
254
- - external_id: typing.Optional[str]. Optionally include a unique identifier for monitoring purposes. Must be unique for a given workflow deployment.
254
+ - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique for a given workflow deployment.
255
255
  """
256
256
  _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
257
257
  if workflow_deployment_id is not OMIT:
@@ -286,10 +286,10 @@ class Vellum:
286
286
  def execute_workflow_stream(
287
287
  self,
288
288
  *,
289
+ inputs: typing.List[WorkflowRequestInputRequest],
289
290
  workflow_deployment_id: typing.Optional[str] = OMIT,
290
291
  workflow_deployment_name: typing.Optional[str] = OMIT,
291
292
  release_tag: typing.Optional[str] = OMIT,
292
- inputs: typing.List[WorkflowRequestInputRequest],
293
293
  external_id: typing.Optional[str] = OMIT,
294
294
  event_types: typing.Optional[typing.List[WorkflowExecutionEventType]] = OMIT,
295
295
  ) -> typing.Iterator[WorkflowStreamEvent]:
@@ -297,14 +297,14 @@ class Vellum:
297
297
  Executes a deployed Workflow and streams back its results.
298
298
 
299
299
  Parameters:
300
+ - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
301
+
300
302
  - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
301
303
 
302
304
  - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
303
305
 
304
306
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
305
307
 
306
- - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's deployment with their corresponding values.
307
-
308
308
  - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique for a given workflow deployment.
309
309
 
310
310
  - event_types: typing.Optional[typing.List[WorkflowExecutionEventType]]. Optionally specify which events you want to receive. Defaults to only WORKFLOW events. Note that the schema of non-WORKFLOW events is unstable and should be used with caution.
@@ -816,25 +816,25 @@ class AsyncVellum:
816
816
  async def execute_workflow(
817
817
  self,
818
818
  *,
819
+ inputs: typing.List[WorkflowRequestInputRequest],
819
820
  workflow_deployment_id: typing.Optional[str] = OMIT,
820
821
  workflow_deployment_name: typing.Optional[str] = OMIT,
821
822
  release_tag: typing.Optional[str] = OMIT,
822
- inputs: typing.List[WorkflowRequestInputRequest],
823
823
  external_id: typing.Optional[str] = OMIT,
824
824
  ) -> ExecuteWorkflowResponse:
825
825
  """
826
826
  Executes a deployed Workflow and returns its outputs.
827
827
 
828
828
  Parameters:
829
+ - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
830
+
829
831
  - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
830
832
 
831
833
  - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
832
834
 
833
835
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
834
836
 
835
- - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
836
-
837
- - external_id: typing.Optional[str]. Optionally include a unique identifier for monitoring purposes. Must be unique for a given workflow deployment.
837
+ - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique for a given workflow deployment.
838
838
  """
839
839
  _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
840
840
  if workflow_deployment_id is not OMIT:
@@ -869,10 +869,10 @@ class AsyncVellum:
869
869
  async def execute_workflow_stream(
870
870
  self,
871
871
  *,
872
+ inputs: typing.List[WorkflowRequestInputRequest],
872
873
  workflow_deployment_id: typing.Optional[str] = OMIT,
873
874
  workflow_deployment_name: typing.Optional[str] = OMIT,
874
875
  release_tag: typing.Optional[str] = OMIT,
875
- inputs: typing.List[WorkflowRequestInputRequest],
876
876
  external_id: typing.Optional[str] = OMIT,
877
877
  event_types: typing.Optional[typing.List[WorkflowExecutionEventType]] = OMIT,
878
878
  ) -> typing.AsyncIterator[WorkflowStreamEvent]:
@@ -880,14 +880,14 @@ class AsyncVellum:
880
880
  Executes a deployed Workflow and streams back its results.
881
881
 
882
882
  Parameters:
883
+ - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
884
+
883
885
  - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
884
886
 
885
887
  - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
886
888
 
887
889
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
888
890
 
889
- - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's deployment with their corresponding values.
890
-
891
891
  - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique for a given workflow deployment.
892
892
 
893
893
  - event_types: typing.Optional[typing.List[WorkflowExecutionEventType]]. Optionally specify which events you want to receive. Defaults to only WORKFLOW events. Note that the schema of non-WORKFLOW events is unstable and should be used with caution.
@@ -16,7 +16,7 @@ class BaseClientWrapper:
16
16
  headers: typing.Dict[str, str] = {
17
17
  "X-Fern-Language": "Python",
18
18
  "X-Fern-SDK-Name": "vellum-ai",
19
- "X-Fern-SDK-Version": "0.3.10",
19
+ "X-Fern-SDK-Version": "0.3.12",
20
20
  }
21
21
  headers["X_API_KEY"] = self.api_key
22
22
  return headers
@@ -8,6 +8,10 @@ from ...core.api_error import ApiError
8
8
  from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
9
  from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...errors.bad_request_error import BadRequestError
12
+ from ...errors.forbidden_error import ForbiddenError
13
+ from ...errors.internal_server_error import InternalServerError
14
+ from ...errors.not_found_error import NotFoundError
11
15
  from ...types.deployment_provider_payload_response import DeploymentProviderPayloadResponse
12
16
  from ...types.deployment_read import DeploymentRead
13
17
  from ...types.paginated_slim_deployment_read_list import PaginatedSlimDeploymentReadList
@@ -103,6 +107,7 @@ class DeploymentsClient:
103
107
  deployment_id: typing.Optional[str] = OMIT,
104
108
  deployment_name: typing.Optional[str] = OMIT,
105
109
  inputs: typing.List[PromptDeploymentInputRequest],
110
+ release_tag: typing.Optional[str] = OMIT,
106
111
  ) -> DeploymentProviderPayloadResponse:
107
112
  """
108
113
  Parameters:
@@ -111,6 +116,8 @@ class DeploymentsClient:
111
116
  - deployment_name: typing.Optional[str]. The name of the deployment. Must provide either this or deployment_id.
112
117
 
113
118
  - inputs: typing.List[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
119
+
120
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
114
121
  ---
115
122
  from vellum.client import Vellum
116
123
 
@@ -126,6 +133,8 @@ class DeploymentsClient:
126
133
  _request["deployment_id"] = deployment_id
127
134
  if deployment_name is not OMIT:
128
135
  _request["deployment_name"] = deployment_name
136
+ if release_tag is not OMIT:
137
+ _request["release_tag"] = release_tag
129
138
  _response = self._client_wrapper.httpx_client.request(
130
139
  "POST",
131
140
  urllib.parse.urljoin(
@@ -137,6 +146,14 @@ class DeploymentsClient:
137
146
  )
138
147
  if 200 <= _response.status_code < 300:
139
148
  return pydantic.parse_obj_as(DeploymentProviderPayloadResponse, _response.json()) # type: ignore
149
+ if _response.status_code == 400:
150
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
151
+ if _response.status_code == 403:
152
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
153
+ if _response.status_code == 404:
154
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
155
+ if _response.status_code == 500:
156
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
140
157
  try:
141
158
  _response_json = _response.json()
142
159
  except JSONDecodeError:
@@ -224,6 +241,7 @@ class AsyncDeploymentsClient:
224
241
  deployment_id: typing.Optional[str] = OMIT,
225
242
  deployment_name: typing.Optional[str] = OMIT,
226
243
  inputs: typing.List[PromptDeploymentInputRequest],
244
+ release_tag: typing.Optional[str] = OMIT,
227
245
  ) -> DeploymentProviderPayloadResponse:
228
246
  """
229
247
  Parameters:
@@ -232,6 +250,8 @@ class AsyncDeploymentsClient:
232
250
  - deployment_name: typing.Optional[str]. The name of the deployment. Must provide either this or deployment_id.
233
251
 
234
252
  - inputs: typing.List[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
253
+
254
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
235
255
  ---
236
256
  from vellum.client import AsyncVellum
237
257
 
@@ -247,6 +267,8 @@ class AsyncDeploymentsClient:
247
267
  _request["deployment_id"] = deployment_id
248
268
  if deployment_name is not OMIT:
249
269
  _request["deployment_name"] = deployment_name
270
+ if release_tag is not OMIT:
271
+ _request["release_tag"] = release_tag
250
272
  _response = await self._client_wrapper.httpx_client.request(
251
273
  "POST",
252
274
  urllib.parse.urljoin(
@@ -258,6 +280,14 @@ class AsyncDeploymentsClient:
258
280
  )
259
281
  if 200 <= _response.status_code < 300:
260
282
  return pydantic.parse_obj_as(DeploymentProviderPayloadResponse, _response.json()) # type: ignore
283
+ if _response.status_code == 400:
284
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
285
+ if _response.status_code == 403:
286
+ raise ForbiddenError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
287
+ if _response.status_code == 404:
288
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
289
+ if _response.status_code == 500:
290
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
261
291
  try:
262
292
  _response_json = _response.json()
263
293
  except JSONDecodeError:
@@ -187,6 +187,147 @@ class DocumentIndexesClient:
187
187
  raise ApiError(status_code=_response.status_code, body=_response.text)
188
188
  raise ApiError(status_code=_response.status_code, body=_response_json)
189
189
 
190
+ def update(
191
+ self,
192
+ id: str,
193
+ *,
194
+ label: str,
195
+ status: typing.Optional[EntityStatus] = OMIT,
196
+ environment: typing.Optional[EnvironmentEnum] = OMIT,
197
+ ) -> DocumentIndexRead:
198
+ """
199
+ Used to fully update a Document Index given its ID.
200
+
201
+ Parameters:
202
+ - id: str. A UUID string identifying this document index.
203
+
204
+ - label: str. A human-readable label for the document index
205
+
206
+ - status: typing.Optional[EntityStatus]. The current status of the document index
207
+
208
+ * `ACTIVE` - Active
209
+ * `ARCHIVED` - Archived
210
+ - environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
211
+
212
+ * `DEVELOPMENT` - Development
213
+ * `STAGING` - Staging
214
+ * `PRODUCTION` - Production---
215
+ from vellum.client import Vellum
216
+
217
+ client = Vellum(
218
+ api_key="YOUR_API_KEY",
219
+ )
220
+ client.document_indexes.update(
221
+ id="id",
222
+ label="label",
223
+ )
224
+ """
225
+ _request: typing.Dict[str, typing.Any] = {"label": label}
226
+ if status is not OMIT:
227
+ _request["status"] = status
228
+ if environment is not OMIT:
229
+ _request["environment"] = environment
230
+ _response = self._client_wrapper.httpx_client.request(
231
+ "PUT",
232
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
233
+ json=jsonable_encoder(_request),
234
+ headers=self._client_wrapper.get_headers(),
235
+ timeout=None,
236
+ )
237
+ if 200 <= _response.status_code < 300:
238
+ return pydantic.parse_obj_as(DocumentIndexRead, _response.json()) # type: ignore
239
+ try:
240
+ _response_json = _response.json()
241
+ except JSONDecodeError:
242
+ raise ApiError(status_code=_response.status_code, body=_response.text)
243
+ raise ApiError(status_code=_response.status_code, body=_response_json)
244
+
245
+ def destroy(self, id: str) -> None:
246
+ """
247
+ Used to delete a Document Index given its ID.
248
+
249
+ Parameters:
250
+ - id: str. A UUID string identifying this document index.
251
+ ---
252
+ from vellum.client import Vellum
253
+
254
+ client = Vellum(
255
+ api_key="YOUR_API_KEY",
256
+ )
257
+ client.document_indexes.destroy(
258
+ id="id",
259
+ )
260
+ """
261
+ _response = self._client_wrapper.httpx_client.request(
262
+ "DELETE",
263
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
264
+ headers=self._client_wrapper.get_headers(),
265
+ timeout=None,
266
+ )
267
+ if 200 <= _response.status_code < 300:
268
+ return
269
+ try:
270
+ _response_json = _response.json()
271
+ except JSONDecodeError:
272
+ raise ApiError(status_code=_response.status_code, body=_response.text)
273
+ raise ApiError(status_code=_response.status_code, body=_response_json)
274
+
275
+ def partial_update(
276
+ self,
277
+ id: str,
278
+ *,
279
+ label: typing.Optional[str] = OMIT,
280
+ status: typing.Optional[EntityStatus] = OMIT,
281
+ environment: typing.Optional[EnvironmentEnum] = OMIT,
282
+ ) -> DocumentIndexRead:
283
+ """
284
+ Used to partial update a Document Index given its ID.
285
+
286
+ Parameters:
287
+ - id: str. A UUID string identifying this document index.
288
+
289
+ - label: typing.Optional[str]. A human-readable label for the document index
290
+
291
+ - status: typing.Optional[EntityStatus]. The current status of the document index
292
+
293
+ * `ACTIVE` - Active
294
+ * `ARCHIVED` - Archived
295
+ - environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
296
+
297
+ * `DEVELOPMENT` - Development
298
+ * `STAGING` - Staging
299
+ * `PRODUCTION` - Production---
300
+ from vellum.client import Vellum
301
+
302
+ client = Vellum(
303
+ api_key="YOUR_API_KEY",
304
+ )
305
+ client.document_indexes.partial_update(
306
+ id="id",
307
+ )
308
+ """
309
+ _request: typing.Dict[str, typing.Any] = {}
310
+ if label is not OMIT:
311
+ _request["label"] = label
312
+ if status is not OMIT:
313
+ _request["status"] = status
314
+ if environment is not OMIT:
315
+ _request["environment"] = environment
316
+ _response = self._client_wrapper.httpx_client.request(
317
+ "PATCH",
318
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
319
+ json=jsonable_encoder(_request),
320
+ headers=self._client_wrapper.get_headers(),
321
+ timeout=None,
322
+ )
323
+ if 200 <= _response.status_code < 300:
324
+ return pydantic.parse_obj_as(DocumentIndexRead, _response.json()) # type: ignore
325
+ try:
326
+ _response_json = _response.json()
327
+ except JSONDecodeError:
328
+ raise ApiError(status_code=_response.status_code, body=_response.text)
329
+ raise ApiError(status_code=_response.status_code, body=_response_json)
330
+
190
331
 
191
332
  class AsyncDocumentIndexesClient:
192
333
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -351,3 +492,144 @@ class AsyncDocumentIndexesClient:
351
492
  except JSONDecodeError:
352
493
  raise ApiError(status_code=_response.status_code, body=_response.text)
353
494
  raise ApiError(status_code=_response.status_code, body=_response_json)
495
+
496
+ async def update(
497
+ self,
498
+ id: str,
499
+ *,
500
+ label: str,
501
+ status: typing.Optional[EntityStatus] = OMIT,
502
+ environment: typing.Optional[EnvironmentEnum] = OMIT,
503
+ ) -> DocumentIndexRead:
504
+ """
505
+ Used to fully update a Document Index given its ID.
506
+
507
+ Parameters:
508
+ - id: str. A UUID string identifying this document index.
509
+
510
+ - label: str. A human-readable label for the document index
511
+
512
+ - status: typing.Optional[EntityStatus]. The current status of the document index
513
+
514
+ * `ACTIVE` - Active
515
+ * `ARCHIVED` - Archived
516
+ - environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
517
+
518
+ * `DEVELOPMENT` - Development
519
+ * `STAGING` - Staging
520
+ * `PRODUCTION` - Production---
521
+ from vellum.client import AsyncVellum
522
+
523
+ client = AsyncVellum(
524
+ api_key="YOUR_API_KEY",
525
+ )
526
+ await client.document_indexes.update(
527
+ id="id",
528
+ label="label",
529
+ )
530
+ """
531
+ _request: typing.Dict[str, typing.Any] = {"label": label}
532
+ if status is not OMIT:
533
+ _request["status"] = status
534
+ if environment is not OMIT:
535
+ _request["environment"] = environment
536
+ _response = await self._client_wrapper.httpx_client.request(
537
+ "PUT",
538
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
539
+ json=jsonable_encoder(_request),
540
+ headers=self._client_wrapper.get_headers(),
541
+ timeout=None,
542
+ )
543
+ if 200 <= _response.status_code < 300:
544
+ return pydantic.parse_obj_as(DocumentIndexRead, _response.json()) # type: ignore
545
+ try:
546
+ _response_json = _response.json()
547
+ except JSONDecodeError:
548
+ raise ApiError(status_code=_response.status_code, body=_response.text)
549
+ raise ApiError(status_code=_response.status_code, body=_response_json)
550
+
551
+ async def destroy(self, id: str) -> None:
552
+ """
553
+ Used to delete a Document Index given its ID.
554
+
555
+ Parameters:
556
+ - id: str. A UUID string identifying this document index.
557
+ ---
558
+ from vellum.client import AsyncVellum
559
+
560
+ client = AsyncVellum(
561
+ api_key="YOUR_API_KEY",
562
+ )
563
+ await client.document_indexes.destroy(
564
+ id="id",
565
+ )
566
+ """
567
+ _response = await self._client_wrapper.httpx_client.request(
568
+ "DELETE",
569
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
570
+ headers=self._client_wrapper.get_headers(),
571
+ timeout=None,
572
+ )
573
+ if 200 <= _response.status_code < 300:
574
+ return
575
+ try:
576
+ _response_json = _response.json()
577
+ except JSONDecodeError:
578
+ raise ApiError(status_code=_response.status_code, body=_response.text)
579
+ raise ApiError(status_code=_response.status_code, body=_response_json)
580
+
581
+ async def partial_update(
582
+ self,
583
+ id: str,
584
+ *,
585
+ label: typing.Optional[str] = OMIT,
586
+ status: typing.Optional[EntityStatus] = OMIT,
587
+ environment: typing.Optional[EnvironmentEnum] = OMIT,
588
+ ) -> DocumentIndexRead:
589
+ """
590
+ Used to partial update a Document Index given its ID.
591
+
592
+ Parameters:
593
+ - id: str. A UUID string identifying this document index.
594
+
595
+ - label: typing.Optional[str]. A human-readable label for the document index
596
+
597
+ - status: typing.Optional[EntityStatus]. The current status of the document index
598
+
599
+ * `ACTIVE` - Active
600
+ * `ARCHIVED` - Archived
601
+ - environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
602
+
603
+ * `DEVELOPMENT` - Development
604
+ * `STAGING` - Staging
605
+ * `PRODUCTION` - Production---
606
+ from vellum.client import AsyncVellum
607
+
608
+ client = AsyncVellum(
609
+ api_key="YOUR_API_KEY",
610
+ )
611
+ await client.document_indexes.partial_update(
612
+ id="id",
613
+ )
614
+ """
615
+ _request: typing.Dict[str, typing.Any] = {}
616
+ if label is not OMIT:
617
+ _request["label"] = label
618
+ if status is not OMIT:
619
+ _request["status"] = status
620
+ if environment is not OMIT:
621
+ _request["environment"] = environment
622
+ _response = await self._client_wrapper.httpx_client.request(
623
+ "PATCH",
624
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/document-indexes/{id}"),
625
+ json=jsonable_encoder(_request),
626
+ headers=self._client_wrapper.get_headers(),
627
+ timeout=None,
628
+ )
629
+ if 200 <= _response.status_code < 300:
630
+ return pydantic.parse_obj_as(DocumentIndexRead, _response.json()) # type: ignore
631
+ try:
632
+ _response_json = _response.json()
633
+ except JSONDecodeError:
634
+ raise ApiError(status_code=_response.status_code, body=_response.text)
635
+ raise ApiError(status_code=_response.status_code, body=_response_json)