eval-studio-client 1.0.0a1__py3-none-any.whl → 1.1.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (575) hide show
  1. eval_studio_client/api/__init__.py +79 -1
  2. eval_studio_client/api/api/__init__.py +9 -0
  3. eval_studio_client/api/api/adversarial_inputs_service_api.py +321 -0
  4. eval_studio_client/api/api/dashboard_service_api.py +1 -1
  5. eval_studio_client/api/api/document_service_api.py +1 -1
  6. eval_studio_client/api/api/evaluation_service_api.py +1 -1
  7. eval_studio_client/api/api/evaluator_service_api.py +1 -1
  8. eval_studio_client/api/api/generated_questions_validation_service_api.py +321 -0
  9. eval_studio_client/api/api/human_calibration_service_api.py +304 -0
  10. eval_studio_client/api/api/info_service_api.py +1 -1
  11. eval_studio_client/api/api/leaderboard_report_service_api.py +292 -0
  12. eval_studio_client/api/api/leaderboard_service_api.py +17 -17
  13. eval_studio_client/api/api/model_service_api.py +17 -17
  14. eval_studio_client/api/api/operation_progress_service_api.py +1 -1
  15. eval_studio_client/api/api/operation_service_api.py +272 -17
  16. eval_studio_client/api/api/perturbation_service_api.py +1 -1
  17. eval_studio_client/api/api/perturbator_service_api.py +285 -18
  18. eval_studio_client/api/api/prompt_generation_service_api.py +1 -1
  19. eval_studio_client/api/api/prompt_library_service_api.py +669 -0
  20. eval_studio_client/api/api/test_case_relationship_service_api.py +292 -0
  21. eval_studio_client/api/api/test_case_service_api.py +17 -17
  22. eval_studio_client/api/api/test_class_service_api.py +17 -17
  23. eval_studio_client/api/api/test_lab_service_api.py +1 -1
  24. eval_studio_client/api/api/test_service_api.py +1238 -102
  25. eval_studio_client/api/api/who_am_i_service_api.py +1 -1
  26. eval_studio_client/api/api/workflow_edge_service_api.py +835 -0
  27. eval_studio_client/api/api/workflow_node_service_api.py +2431 -0
  28. eval_studio_client/api/api/workflow_service_api.py +1893 -0
  29. eval_studio_client/api/api_client.py +1 -1
  30. eval_studio_client/api/configuration.py +1 -1
  31. eval_studio_client/api/docs/AdversarialInputsServiceApi.md +78 -0
  32. eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +45 -0
  33. eval_studio_client/api/docs/GeneratedQuestionsValidationServiceApi.md +78 -0
  34. eval_studio_client/api/docs/GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.md +30 -0
  35. eval_studio_client/api/docs/HumanCalibrationServiceApi.md +77 -0
  36. eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -0
  37. eval_studio_client/api/docs/LeaderboardServiceApi.md +5 -5
  38. eval_studio_client/api/docs/ModelServiceApi.md +5 -5
  39. eval_studio_client/api/docs/OperationServiceApi.md +72 -5
  40. eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +1 -0
  41. eval_studio_client/api/docs/PerturbatorServiceApi.md +38 -8
  42. eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +4 -2
  43. eval_studio_client/api/docs/PromptLibraryServiceApi.md +155 -0
  44. eval_studio_client/api/docs/ProtobufNullValue.md +12 -0
  45. eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +3 -0
  46. eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +47 -0
  47. eval_studio_client/api/docs/RequiredTheUpdatedWorkflowNode.md +44 -0
  48. eval_studio_client/api/docs/TestCaseRelationshipServiceApi.md +75 -0
  49. eval_studio_client/api/docs/TestCaseServiceApi.md +5 -5
  50. eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
  51. eval_studio_client/api/docs/TestServiceApi.md +285 -5
  52. eval_studio_client/api/docs/TestServiceCloneTestRequest.md +30 -0
  53. eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +3 -1
  54. eval_studio_client/api/docs/TestServiceImportTestCasesFromLibraryRequest.md +32 -0
  55. eval_studio_client/api/docs/TestServiceListTestCaseLibraryItemsRequest.md +35 -0
  56. eval_studio_client/api/docs/TestServicePerturbTestInPlaceRequest.md +30 -0
  57. eval_studio_client/api/docs/TestServicePerturbTestRequest.md +1 -0
  58. eval_studio_client/api/docs/V1AbortOperationResponse.md +29 -0
  59. eval_studio_client/api/docs/V1BatchDeleteWorkflowsRequest.md +29 -0
  60. eval_studio_client/api/docs/V1BatchDeleteWorkflowsResponse.md +29 -0
  61. eval_studio_client/api/docs/V1BatchGetWorkflowEdgesResponse.md +29 -0
  62. eval_studio_client/api/docs/V1BatchGetWorkflowNodesResponse.md +29 -0
  63. eval_studio_client/api/docs/V1CloneTestResponse.md +29 -0
  64. eval_studio_client/api/docs/V1CloneWorkflowResponse.md +29 -0
  65. eval_studio_client/api/docs/V1Context.md +32 -0
  66. eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
  67. eval_studio_client/api/docs/V1CreateWorkflowEdgeResponse.md +29 -0
  68. eval_studio_client/api/docs/V1CreateWorkflowNodeResponse.md +29 -0
  69. eval_studio_client/api/docs/V1CreateWorkflowResponse.md +29 -0
  70. eval_studio_client/api/docs/V1DeleteWorkflowEdgeResponse.md +29 -0
  71. eval_studio_client/api/docs/V1DeleteWorkflowNodeResponse.md +29 -0
  72. eval_studio_client/api/docs/V1DeleteWorkflowResponse.md +29 -0
  73. eval_studio_client/api/docs/V1EstimateThresholdRequest.md +33 -0
  74. eval_studio_client/api/docs/V1GeneratedTestCase.md +30 -0
  75. eval_studio_client/api/docs/V1GetLeaderboardReportResponse.md +29 -0
  76. eval_studio_client/api/docs/V1GetWorkflowNodePrerequisitesResponse.md +30 -0
  77. eval_studio_client/api/docs/V1GetWorkflowNodeResponse.md +29 -0
  78. eval_studio_client/api/docs/V1GetWorkflowResponse.md +29 -0
  79. eval_studio_client/api/docs/V1ImportEvaluationRequest.md +1 -0
  80. eval_studio_client/api/docs/V1ImportTestCasesFromLibraryResponse.md +29 -0
  81. eval_studio_client/api/docs/V1ImportTestCasesRequest.md +33 -0
  82. eval_studio_client/api/docs/V1Info.md +3 -0
  83. eval_studio_client/api/docs/V1InitWorkflowNodeResponse.md +29 -0
  84. eval_studio_client/api/docs/V1LabeledTestCase.md +31 -0
  85. eval_studio_client/api/docs/V1LeaderboardReport.md +32 -0
  86. eval_studio_client/api/docs/V1LeaderboardReportActualOutputData.md +31 -0
  87. eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +31 -0
  88. eval_studio_client/api/docs/V1LeaderboardReportEvaluator.md +42 -0
  89. eval_studio_client/api/docs/V1LeaderboardReportEvaluatorParameter.md +38 -0
  90. eval_studio_client/api/docs/V1LeaderboardReportExplanation.md +34 -0
  91. eval_studio_client/api/docs/V1LeaderboardReportMetricsMetaEntry.md +41 -0
  92. eval_studio_client/api/docs/V1LeaderboardReportModel.md +39 -0
  93. eval_studio_client/api/docs/V1LeaderboardReportResult.md +45 -0
  94. eval_studio_client/api/docs/V1LeaderboardReportResultRelationship.md +32 -0
  95. eval_studio_client/api/docs/V1ListPromptLibraryItemsResponse.md +29 -0
  96. eval_studio_client/api/docs/V1ListTestCaseLibraryItemsResponse.md +29 -0
  97. eval_studio_client/api/docs/V1ListTestCaseRelationshipsResponse.md +29 -0
  98. eval_studio_client/api/docs/V1ListWorkflowsResponse.md +29 -0
  99. eval_studio_client/api/docs/V1MetricScore.md +31 -0
  100. eval_studio_client/api/docs/V1MetricScores.md +29 -0
  101. eval_studio_client/api/docs/V1PerturbTestInPlaceResponse.md +29 -0
  102. eval_studio_client/api/docs/V1ProcessWorkflowNodeResponse.md +29 -0
  103. eval_studio_client/api/docs/V1PromptLibraryItem.md +42 -0
  104. eval_studio_client/api/docs/V1RepeatedString.md +29 -0
  105. eval_studio_client/api/docs/V1ResetWorkflowNodeResponse.md +29 -0
  106. eval_studio_client/api/docs/V1TestCase.md +3 -0
  107. eval_studio_client/api/docs/V1TestSuiteEvaluates.md +11 -0
  108. eval_studio_client/api/docs/V1UpdateWorkflowNodeResponse.md +29 -0
  109. eval_studio_client/api/docs/V1UpdateWorkflowResponse.md +29 -0
  110. eval_studio_client/api/docs/V1Workflow.md +49 -0
  111. eval_studio_client/api/docs/V1WorkflowEdge.md +40 -0
  112. eval_studio_client/api/docs/V1WorkflowEdgeType.md +12 -0
  113. eval_studio_client/api/docs/V1WorkflowNode.md +46 -0
  114. eval_studio_client/api/docs/V1WorkflowNodeArtifact.md +40 -0
  115. eval_studio_client/api/docs/V1WorkflowNodeArtifacts.md +29 -0
  116. eval_studio_client/api/docs/V1WorkflowNodeAttributes.md +30 -0
  117. eval_studio_client/api/docs/V1WorkflowNodeStatus.md +12 -0
  118. eval_studio_client/api/docs/V1WorkflowNodeType.md +12 -0
  119. eval_studio_client/api/docs/V1WorkflowNodeView.md +12 -0
  120. eval_studio_client/api/docs/V1WorkflowType.md +12 -0
  121. eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +215 -0
  122. eval_studio_client/api/docs/WorkflowNodeServiceApi.md +632 -0
  123. eval_studio_client/api/docs/WorkflowServiceApi.md +488 -0
  124. eval_studio_client/api/docs/WorkflowServiceCloneWorkflowRequest.md +33 -0
  125. eval_studio_client/api/exceptions.py +1 -1
  126. eval_studio_client/api/models/__init__.py +70 -1
  127. eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +143 -0
  128. eval_studio_client/api/models/generated_questions_validation_service_validate_generated_questions_request.py +97 -0
  129. eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +9 -3
  130. eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +17 -6
  131. eval_studio_client/api/models/protobuf_any.py +1 -1
  132. eval_studio_client/api/models/protobuf_null_value.py +36 -0
  133. eval_studio_client/api/models/required_the_dashboard_to_update.py +1 -1
  134. eval_studio_client/api/models/required_the_document_to_update.py +1 -1
  135. eval_studio_client/api/models/required_the_leaderboard_to_update.py +1 -1
  136. eval_studio_client/api/models/required_the_model_to_update.py +1 -1
  137. eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
  138. eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
  139. eval_studio_client/api/models/required_the_test_case_to_update.py +14 -3
  140. eval_studio_client/api/models/required_the_test_to_update.py +1 -1
  141. eval_studio_client/api/models/required_the_updated_workflow.py +160 -0
  142. eval_studio_client/api/models/required_the_updated_workflow_node.py +152 -0
  143. eval_studio_client/api/models/rpc_status.py +1 -1
  144. eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
  145. eval_studio_client/api/models/test_service_clone_test_request.py +89 -0
  146. eval_studio_client/api/models/test_service_generate_test_cases_request.py +8 -4
  147. eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +93 -0
  148. eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +99 -0
  149. eval_studio_client/api/models/test_service_perturb_test_in_place_request.py +97 -0
  150. eval_studio_client/api/models/test_service_perturb_test_request.py +5 -3
  151. eval_studio_client/api/models/v1_abort_operation_response.py +91 -0
  152. eval_studio_client/api/models/v1_batch_create_leaderboards_request.py +1 -1
  153. eval_studio_client/api/models/v1_batch_create_leaderboards_response.py +1 -1
  154. eval_studio_client/api/models/v1_batch_delete_dashboards_request.py +1 -1
  155. eval_studio_client/api/models/v1_batch_delete_dashboards_response.py +1 -1
  156. eval_studio_client/api/models/v1_batch_delete_documents_request.py +1 -1
  157. eval_studio_client/api/models/v1_batch_delete_documents_response.py +1 -1
  158. eval_studio_client/api/models/v1_batch_delete_evaluators_request.py +1 -1
  159. eval_studio_client/api/models/v1_batch_delete_evaluators_response.py +1 -1
  160. eval_studio_client/api/models/v1_batch_delete_leaderboards_request.py +1 -1
  161. eval_studio_client/api/models/v1_batch_delete_leaderboards_response.py +1 -1
  162. eval_studio_client/api/models/v1_batch_delete_models_request.py +1 -1
  163. eval_studio_client/api/models/v1_batch_delete_models_response.py +1 -1
  164. eval_studio_client/api/models/v1_batch_delete_test_cases_response.py +1 -1
  165. eval_studio_client/api/models/v1_batch_delete_tests_request.py +1 -1
  166. eval_studio_client/api/models/v1_batch_delete_tests_response.py +1 -1
  167. eval_studio_client/api/models/v1_batch_delete_workflows_request.py +87 -0
  168. eval_studio_client/api/models/v1_batch_delete_workflows_response.py +95 -0
  169. eval_studio_client/api/models/v1_batch_get_dashboards_response.py +1 -1
  170. eval_studio_client/api/models/v1_batch_get_documents_response.py +1 -1
  171. eval_studio_client/api/models/v1_batch_get_leaderboards_response.py +1 -1
  172. eval_studio_client/api/models/v1_batch_get_models_response.py +1 -1
  173. eval_studio_client/api/models/v1_batch_get_operations_response.py +1 -1
  174. eval_studio_client/api/models/v1_batch_get_tests_response.py +1 -1
  175. eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +95 -0
  176. eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +95 -0
  177. eval_studio_client/api/models/v1_batch_import_leaderboard_request.py +1 -1
  178. eval_studio_client/api/models/v1_batch_import_leaderboard_response.py +1 -1
  179. eval_studio_client/api/models/v1_batch_import_tests_request.py +1 -1
  180. eval_studio_client/api/models/v1_batch_import_tests_response.py +1 -1
  181. eval_studio_client/api/models/v1_check_base_models_response.py +1 -1
  182. eval_studio_client/api/models/v1_clone_test_response.py +91 -0
  183. eval_studio_client/api/models/v1_clone_workflow_response.py +91 -0
  184. eval_studio_client/api/models/v1_collection_info.py +1 -1
  185. eval_studio_client/api/models/v1_context.py +93 -0
  186. eval_studio_client/api/models/v1_create_dashboard_response.py +1 -1
  187. eval_studio_client/api/models/v1_create_document_response.py +1 -1
  188. eval_studio_client/api/models/v1_create_evaluation_request.py +8 -3
  189. eval_studio_client/api/models/v1_create_evaluator_response.py +1 -1
  190. eval_studio_client/api/models/v1_create_leaderboard_request.py +1 -1
  191. eval_studio_client/api/models/v1_create_leaderboard_response.py +1 -1
  192. eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +1 -1
  193. eval_studio_client/api/models/v1_create_model_response.py +1 -1
  194. eval_studio_client/api/models/v1_create_perturbation_response.py +1 -1
  195. eval_studio_client/api/models/v1_create_test_case_response.py +1 -1
  196. eval_studio_client/api/models/v1_create_test_lab_response.py +1 -1
  197. eval_studio_client/api/models/v1_create_test_response.py +1 -1
  198. eval_studio_client/api/models/v1_create_workflow_edge_response.py +91 -0
  199. eval_studio_client/api/models/v1_create_workflow_node_response.py +91 -0
  200. eval_studio_client/api/models/v1_create_workflow_response.py +91 -0
  201. eval_studio_client/api/models/v1_dashboard.py +1 -1
  202. eval_studio_client/api/models/v1_dashboard_status.py +1 -1
  203. eval_studio_client/api/models/v1_delete_dashboard_response.py +1 -1
  204. eval_studio_client/api/models/v1_delete_document_response.py +1 -1
  205. eval_studio_client/api/models/v1_delete_evaluator_response.py +1 -1
  206. eval_studio_client/api/models/v1_delete_leaderboard_response.py +1 -1
  207. eval_studio_client/api/models/v1_delete_model_response.py +1 -1
  208. eval_studio_client/api/models/v1_delete_test_case_response.py +1 -1
  209. eval_studio_client/api/models/v1_delete_test_response.py +1 -1
  210. eval_studio_client/api/models/v1_delete_workflow_edge_response.py +91 -0
  211. eval_studio_client/api/models/v1_delete_workflow_node_response.py +91 -0
  212. eval_studio_client/api/models/v1_delete_workflow_response.py +91 -0
  213. eval_studio_client/api/models/v1_document.py +1 -1
  214. eval_studio_client/api/models/v1_estimate_threshold_request.py +103 -0
  215. eval_studio_client/api/models/v1_evaluation_test.py +1 -1
  216. eval_studio_client/api/models/v1_evaluator.py +1 -1
  217. eval_studio_client/api/models/v1_evaluator_param_type.py +1 -1
  218. eval_studio_client/api/models/v1_evaluator_parameter.py +1 -1
  219. eval_studio_client/api/models/v1_evaluator_view.py +1 -1
  220. eval_studio_client/api/models/v1_finalize_operation_response.py +1 -1
  221. eval_studio_client/api/models/v1_find_all_test_cases_by_id_response.py +1 -1
  222. eval_studio_client/api/models/v1_find_test_lab_response.py +1 -1
  223. eval_studio_client/api/models/v1_generate_test_cases_response.py +1 -1
  224. eval_studio_client/api/models/v1_generated_test_case.py +101 -0
  225. eval_studio_client/api/models/v1_get_dashboard_response.py +1 -1
  226. eval_studio_client/api/models/v1_get_document_response.py +1 -1
  227. eval_studio_client/api/models/v1_get_evaluator_response.py +1 -1
  228. eval_studio_client/api/models/v1_get_info_response.py +1 -1
  229. eval_studio_client/api/models/v1_get_leaderboard_report_response.py +91 -0
  230. eval_studio_client/api/models/v1_get_leaderboard_response.py +1 -1
  231. eval_studio_client/api/models/v1_get_model_response.py +1 -1
  232. eval_studio_client/api/models/v1_get_operation_progress_by_parent_response.py +1 -1
  233. eval_studio_client/api/models/v1_get_operation_response.py +1 -1
  234. eval_studio_client/api/models/v1_get_perturbator_response.py +1 -1
  235. eval_studio_client/api/models/v1_get_test_case_response.py +1 -1
  236. eval_studio_client/api/models/v1_get_test_class_response.py +1 -1
  237. eval_studio_client/api/models/v1_get_test_response.py +1 -1
  238. eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +89 -0
  239. eval_studio_client/api/models/v1_get_workflow_node_response.py +91 -0
  240. eval_studio_client/api/models/v1_get_workflow_response.py +91 -0
  241. eval_studio_client/api/models/v1_import_evaluation_request.py +8 -3
  242. eval_studio_client/api/models/v1_import_leaderboard_request.py +1 -1
  243. eval_studio_client/api/models/v1_import_leaderboard_response.py +1 -1
  244. eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +91 -0
  245. eval_studio_client/api/models/v1_import_test_cases_request.py +95 -0
  246. eval_studio_client/api/models/v1_info.py +10 -4
  247. eval_studio_client/api/models/v1_init_workflow_node_response.py +91 -0
  248. eval_studio_client/api/models/v1_insight.py +1 -1
  249. eval_studio_client/api/models/v1_labeled_test_case.py +91 -0
  250. eval_studio_client/api/models/v1_leaderboard.py +1 -1
  251. eval_studio_client/api/models/v1_leaderboard_report.py +115 -0
  252. eval_studio_client/api/models/v1_leaderboard_report_actual_output_data.py +93 -0
  253. eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +101 -0
  254. eval_studio_client/api/models/v1_leaderboard_report_evaluator.py +155 -0
  255. eval_studio_client/api/models/v1_leaderboard_report_evaluator_parameter.py +109 -0
  256. eval_studio_client/api/models/v1_leaderboard_report_explanation.py +103 -0
  257. eval_studio_client/api/models/v1_leaderboard_report_metrics_meta_entry.py +129 -0
  258. eval_studio_client/api/models/v1_leaderboard_report_model.py +121 -0
  259. eval_studio_client/api/models/v1_leaderboard_report_result.py +175 -0
  260. eval_studio_client/api/models/v1_leaderboard_report_result_relationship.py +97 -0
  261. eval_studio_client/api/models/v1_leaderboard_status.py +1 -1
  262. eval_studio_client/api/models/v1_leaderboard_type.py +1 -1
  263. eval_studio_client/api/models/v1_leaderboard_view.py +1 -1
  264. eval_studio_client/api/models/v1_list_base_models_response.py +1 -1
  265. eval_studio_client/api/models/v1_list_dashboards_response.py +1 -1
  266. eval_studio_client/api/models/v1_list_documents_response.py +1 -1
  267. eval_studio_client/api/models/v1_list_evaluators_response.py +1 -1
  268. eval_studio_client/api/models/v1_list_leaderboards_response.py +1 -1
  269. eval_studio_client/api/models/v1_list_llm_models_response.py +1 -1
  270. eval_studio_client/api/models/v1_list_model_collections_response.py +1 -1
  271. eval_studio_client/api/models/v1_list_models_response.py +1 -1
  272. eval_studio_client/api/models/v1_list_most_recent_dashboards_response.py +1 -1
  273. eval_studio_client/api/models/v1_list_most_recent_leaderboards_response.py +1 -1
  274. eval_studio_client/api/models/v1_list_most_recent_models_response.py +1 -1
  275. eval_studio_client/api/models/v1_list_most_recent_tests_response.py +1 -1
  276. eval_studio_client/api/models/v1_list_operations_response.py +1 -1
  277. eval_studio_client/api/models/v1_list_perturbators_response.py +1 -1
  278. eval_studio_client/api/models/v1_list_prompt_library_items_response.py +95 -0
  279. eval_studio_client/api/models/v1_list_rag_collections_response.py +1 -1
  280. eval_studio_client/api/models/v1_list_test_case_library_items_response.py +95 -0
  281. eval_studio_client/api/models/v1_list_test_case_relationships_response.py +95 -0
  282. eval_studio_client/api/models/v1_list_test_cases_response.py +1 -1
  283. eval_studio_client/api/models/v1_list_test_classes_response.py +1 -1
  284. eval_studio_client/api/models/v1_list_tests_response.py +1 -1
  285. eval_studio_client/api/models/v1_list_workflows_response.py +95 -0
  286. eval_studio_client/api/models/v1_metric_score.py +89 -0
  287. eval_studio_client/api/models/v1_metric_scores.py +95 -0
  288. eval_studio_client/api/models/v1_model.py +1 -1
  289. eval_studio_client/api/models/v1_model_type.py +1 -1
  290. eval_studio_client/api/models/v1_operation.py +1 -1
  291. eval_studio_client/api/models/v1_operation_progress.py +1 -1
  292. eval_studio_client/api/models/v1_perturb_test_in_place_response.py +91 -0
  293. eval_studio_client/api/models/v1_perturb_test_response.py +1 -1
  294. eval_studio_client/api/models/v1_perturbator.py +1 -1
  295. eval_studio_client/api/models/v1_perturbator_configuration.py +1 -1
  296. eval_studio_client/api/models/v1_perturbator_intensity.py +1 -1
  297. eval_studio_client/api/models/v1_problem_and_action.py +1 -1
  298. eval_studio_client/api/models/v1_process_workflow_node_response.py +91 -0
  299. eval_studio_client/api/models/v1_prompt_library_item.py +129 -0
  300. eval_studio_client/api/models/v1_repeated_string.py +87 -0
  301. eval_studio_client/api/models/v1_reset_workflow_node_response.py +91 -0
  302. eval_studio_client/api/models/v1_test.py +1 -1
  303. eval_studio_client/api/models/v1_test_case.py +14 -3
  304. eval_studio_client/api/models/v1_test_case_relationship.py +1 -1
  305. eval_studio_client/api/models/v1_test_cases_generator.py +1 -1
  306. eval_studio_client/api/models/v1_test_class.py +1 -1
  307. eval_studio_client/api/models/v1_test_class_type.py +1 -1
  308. eval_studio_client/api/models/v1_test_lab.py +1 -1
  309. eval_studio_client/api/models/v1_test_suite_evaluates.py +39 -0
  310. eval_studio_client/api/models/v1_update_dashboard_response.py +1 -1
  311. eval_studio_client/api/models/v1_update_document_response.py +1 -1
  312. eval_studio_client/api/models/v1_update_leaderboard_response.py +1 -1
  313. eval_studio_client/api/models/v1_update_model_response.py +1 -1
  314. eval_studio_client/api/models/v1_update_operation_response.py +1 -1
  315. eval_studio_client/api/models/v1_update_test_case_response.py +1 -1
  316. eval_studio_client/api/models/v1_update_test_response.py +1 -1
  317. eval_studio_client/api/models/v1_update_workflow_node_response.py +91 -0
  318. eval_studio_client/api/models/v1_update_workflow_response.py +91 -0
  319. eval_studio_client/api/models/v1_who_am_i_response.py +1 -1
  320. eval_studio_client/api/models/v1_workflow.py +164 -0
  321. eval_studio_client/api/models/v1_workflow_edge.py +123 -0
  322. eval_studio_client/api/models/v1_workflow_edge_type.py +37 -0
  323. eval_studio_client/api/models/v1_workflow_node.py +156 -0
  324. eval_studio_client/api/models/v1_workflow_node_artifact.py +122 -0
  325. eval_studio_client/api/models/v1_workflow_node_artifacts.py +97 -0
  326. eval_studio_client/api/models/v1_workflow_node_attributes.py +87 -0
  327. eval_studio_client/api/models/v1_workflow_node_status.py +40 -0
  328. eval_studio_client/api/models/v1_workflow_node_type.py +44 -0
  329. eval_studio_client/api/models/v1_workflow_node_view.py +38 -0
  330. eval_studio_client/api/models/v1_workflow_type.py +37 -0
  331. eval_studio_client/api/models/workflow_service_clone_workflow_request.py +95 -0
  332. eval_studio_client/api/rest.py +1 -1
  333. eval_studio_client/api/test/test_adversarial_inputs_service_api.py +37 -0
  334. eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +128 -0
  335. eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
  336. eval_studio_client/api/test/test_document_service_api.py +1 -1
  337. eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
  338. eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
  339. eval_studio_client/api/test/test_generated_questions_validation_service_api.py +37 -0
  340. eval_studio_client/api/test/test_generated_questions_validation_service_validate_generated_questions_request.py +83 -0
  341. eval_studio_client/api/test/test_human_calibration_service_api.py +38 -0
  342. eval_studio_client/api/test/test_info_service_api.py +1 -1
  343. eval_studio_client/api/test/test_leaderboard_report_service_api.py +37 -0
  344. eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
  345. eval_studio_client/api/test/test_model_service_api.py +1 -1
  346. eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
  347. eval_studio_client/api/test/test_operation_service_api.py +7 -1
  348. eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
  349. eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +25 -3
  350. eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
  351. eval_studio_client/api/test/test_prompt_generation_service_api.py +1 -1
  352. eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +13 -5
  353. eval_studio_client/api/test/test_prompt_library_service_api.py +43 -0
  354. eval_studio_client/api/test/test_protobuf_any.py +1 -1
  355. eval_studio_client/api/test/test_protobuf_null_value.py +33 -0
  356. eval_studio_client/api/test/test_required_the_dashboard_to_update.py +1 -1
  357. eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
  358. eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +1 -1
  359. eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
  360. eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
  361. eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
  362. eval_studio_client/api/test/test_required_the_test_case_to_update.py +9 -2
  363. eval_studio_client/api/test/test_required_the_test_to_update.py +1 -1
  364. eval_studio_client/api/test/test_required_the_updated_workflow.py +91 -0
  365. eval_studio_client/api/test/test_required_the_updated_workflow_node.py +80 -0
  366. eval_studio_client/api/test/test_rpc_status.py +1 -1
  367. eval_studio_client/api/test/test_test_case_relationship_service_api.py +37 -0
  368. eval_studio_client/api/test/test_test_case_service_api.py +1 -1
  369. eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
  370. eval_studio_client/api/test/test_test_class_service_api.py +1 -1
  371. eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
  372. eval_studio_client/api/test/test_test_service_api.py +25 -1
  373. eval_studio_client/api/test/test_test_service_clone_test_request.py +52 -0
  374. eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +8 -2
  375. eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +56 -0
  376. eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +63 -0
  377. eval_studio_client/api/test/test_test_service_perturb_test_in_place_request.py +59 -0
  378. eval_studio_client/api/test/test_test_service_perturb_test_request.py +5 -2
  379. eval_studio_client/api/test/test_v1_abort_operation_response.py +71 -0
  380. eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +1 -1
  381. eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +1 -1
  382. eval_studio_client/api/test/test_v1_batch_delete_dashboards_request.py +1 -1
  383. eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +1 -1
  384. eval_studio_client/api/test/test_v1_batch_delete_documents_request.py +1 -1
  385. eval_studio_client/api/test/test_v1_batch_delete_documents_response.py +1 -1
  386. eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +1 -1
  387. eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +1 -1
  388. eval_studio_client/api/test/test_v1_batch_delete_leaderboards_request.py +1 -1
  389. eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +1 -1
  390. eval_studio_client/api/test/test_v1_batch_delete_models_request.py +1 -1
  391. eval_studio_client/api/test/test_v1_batch_delete_models_response.py +1 -1
  392. eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +9 -2
  393. eval_studio_client/api/test/test_v1_batch_delete_tests_request.py +1 -1
  394. eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +1 -1
  395. eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +53 -0
  396. eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +95 -0
  397. eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +1 -1
  398. eval_studio_client/api/test/test_v1_batch_get_documents_response.py +1 -1
  399. eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +1 -1
  400. eval_studio_client/api/test/test_v1_batch_get_models_response.py +1 -1
  401. eval_studio_client/api/test/test_v1_batch_get_operations_response.py +1 -1
  402. eval_studio_client/api/test/test_v1_batch_get_tests_response.py +1 -1
  403. eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +64 -0
  404. eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +84 -0
  405. eval_studio_client/api/test/test_v1_batch_import_leaderboard_request.py +1 -1
  406. eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +1 -1
  407. eval_studio_client/api/test/test_v1_batch_import_tests_request.py +1 -1
  408. eval_studio_client/api/test/test_v1_batch_import_tests_response.py +1 -1
  409. eval_studio_client/api/test/test_v1_check_base_models_response.py +1 -1
  410. eval_studio_client/api/test/test_v1_clone_test_response.py +67 -0
  411. eval_studio_client/api/test/test_v1_clone_workflow_response.py +93 -0
  412. eval_studio_client/api/test/test_v1_collection_info.py +1 -1
  413. eval_studio_client/api/test/test_v1_context.py +54 -0
  414. eval_studio_client/api/test/test_v1_create_dashboard_response.py +1 -1
  415. eval_studio_client/api/test/test_v1_create_document_response.py +1 -1
  416. eval_studio_client/api/test/test_v1_create_evaluation_request.py +25 -3
  417. eval_studio_client/api/test/test_v1_create_evaluator_response.py +1 -1
  418. eval_studio_client/api/test/test_v1_create_leaderboard_request.py +1 -1
  419. eval_studio_client/api/test/test_v1_create_leaderboard_response.py +1 -1
  420. eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +1 -1
  421. eval_studio_client/api/test/test_v1_create_model_response.py +1 -1
  422. eval_studio_client/api/test/test_v1_create_perturbation_response.py +1 -1
  423. eval_studio_client/api/test/test_v1_create_test_case_response.py +9 -2
  424. eval_studio_client/api/test/test_v1_create_test_lab_response.py +1 -1
  425. eval_studio_client/api/test/test_v1_create_test_response.py +1 -1
  426. eval_studio_client/api/test/test_v1_create_workflow_edge_response.py +62 -0
  427. eval_studio_client/api/test/test_v1_create_workflow_node_response.py +82 -0
  428. eval_studio_client/api/test/test_v1_create_workflow_response.py +93 -0
  429. eval_studio_client/api/test/test_v1_dashboard.py +1 -1
  430. eval_studio_client/api/test/test_v1_dashboard_status.py +1 -1
  431. eval_studio_client/api/test/test_v1_delete_dashboard_response.py +1 -1
  432. eval_studio_client/api/test/test_v1_delete_document_response.py +1 -1
  433. eval_studio_client/api/test/test_v1_delete_evaluator_response.py +1 -1
  434. eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +1 -1
  435. eval_studio_client/api/test/test_v1_delete_model_response.py +1 -1
  436. eval_studio_client/api/test/test_v1_delete_test_case_response.py +9 -2
  437. eval_studio_client/api/test/test_v1_delete_test_response.py +1 -1
  438. eval_studio_client/api/test/test_v1_delete_workflow_edge_response.py +62 -0
  439. eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +82 -0
  440. eval_studio_client/api/test/test_v1_delete_workflow_response.py +93 -0
  441. eval_studio_client/api/test/test_v1_document.py +1 -1
  442. eval_studio_client/api/test/test_v1_estimate_threshold_request.py +60 -0
  443. eval_studio_client/api/test/test_v1_evaluation_test.py +9 -2
  444. eval_studio_client/api/test/test_v1_evaluator.py +1 -1
  445. eval_studio_client/api/test/test_v1_evaluator_param_type.py +1 -1
  446. eval_studio_client/api/test/test_v1_evaluator_parameter.py +1 -1
  447. eval_studio_client/api/test/test_v1_evaluator_view.py +1 -1
  448. eval_studio_client/api/test/test_v1_finalize_operation_response.py +1 -1
  449. eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +9 -2
  450. eval_studio_client/api/test/test_v1_find_test_lab_response.py +1 -1
  451. eval_studio_client/api/test/test_v1_generate_test_cases_response.py +1 -1
  452. eval_studio_client/api/test/test_v1_generated_test_case.py +79 -0
  453. eval_studio_client/api/test/test_v1_get_dashboard_response.py +1 -1
  454. eval_studio_client/api/test/test_v1_get_document_response.py +1 -1
  455. eval_studio_client/api/test/test_v1_get_evaluator_response.py +1 -1
  456. eval_studio_client/api/test/test_v1_get_info_response.py +7 -2
  457. eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +175 -0
  458. eval_studio_client/api/test/test_v1_get_leaderboard_response.py +1 -1
  459. eval_studio_client/api/test/test_v1_get_model_response.py +1 -1
  460. eval_studio_client/api/test/test_v1_get_operation_progress_by_parent_response.py +1 -1
  461. eval_studio_client/api/test/test_v1_get_operation_response.py +1 -1
  462. eval_studio_client/api/test/test_v1_get_perturbator_response.py +1 -1
  463. eval_studio_client/api/test/test_v1_get_test_case_response.py +9 -2
  464. eval_studio_client/api/test/test_v1_get_test_class_response.py +1 -1
  465. eval_studio_client/api/test/test_v1_get_test_response.py +1 -1
  466. eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +56 -0
  467. eval_studio_client/api/test/test_v1_get_workflow_node_response.py +82 -0
  468. eval_studio_client/api/test/test_v1_get_workflow_response.py +93 -0
  469. eval_studio_client/api/test/test_v1_import_evaluation_request.py +17 -2
  470. eval_studio_client/api/test/test_v1_import_leaderboard_request.py +1 -1
  471. eval_studio_client/api/test/test_v1_import_leaderboard_response.py +1 -1
  472. eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +71 -0
  473. eval_studio_client/api/test/test_v1_import_test_cases_request.py +57 -0
  474. eval_studio_client/api/test/test_v1_info.py +7 -2
  475. eval_studio_client/api/test/test_v1_init_workflow_node_response.py +82 -0
  476. eval_studio_client/api/test/test_v1_insight.py +1 -1
  477. eval_studio_client/api/test/test_v1_labeled_test_case.py +53 -0
  478. eval_studio_client/api/test/test_v1_leaderboard.py +1 -1
  479. eval_studio_client/api/test/test_v1_leaderboard_report.py +174 -0
  480. eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_data.py +52 -0
  481. eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +56 -0
  482. eval_studio_client/api/test/test_v1_leaderboard_report_evaluator.py +114 -0
  483. eval_studio_client/api/test/test_v1_leaderboard_report_evaluator_parameter.py +63 -0
  484. eval_studio_client/api/test/test_v1_leaderboard_report_explanation.py +58 -0
  485. eval_studio_client/api/test/test_v1_leaderboard_report_metrics_meta_entry.py +66 -0
  486. eval_studio_client/api/test/test_v1_leaderboard_report_model.py +62 -0
  487. eval_studio_client/api/test/test_v1_leaderboard_report_result.py +92 -0
  488. eval_studio_client/api/test/test_v1_leaderboard_report_result_relationship.py +53 -0
  489. eval_studio_client/api/test/test_v1_leaderboard_status.py +1 -1
  490. eval_studio_client/api/test/test_v1_leaderboard_type.py +1 -1
  491. eval_studio_client/api/test/test_v1_leaderboard_view.py +1 -1
  492. eval_studio_client/api/test/test_v1_list_base_models_response.py +1 -1
  493. eval_studio_client/api/test/test_v1_list_dashboards_response.py +1 -1
  494. eval_studio_client/api/test/test_v1_list_documents_response.py +1 -1
  495. eval_studio_client/api/test/test_v1_list_evaluators_response.py +1 -1
  496. eval_studio_client/api/test/test_v1_list_leaderboards_response.py +1 -1
  497. eval_studio_client/api/test/test_v1_list_llm_models_response.py +1 -1
  498. eval_studio_client/api/test/test_v1_list_model_collections_response.py +1 -1
  499. eval_studio_client/api/test/test_v1_list_models_response.py +1 -1
  500. eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +1 -1
  501. eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +1 -1
  502. eval_studio_client/api/test/test_v1_list_most_recent_models_response.py +1 -1
  503. eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +1 -1
  504. eval_studio_client/api/test/test_v1_list_operations_response.py +1 -1
  505. eval_studio_client/api/test/test_v1_list_perturbators_response.py +1 -1
  506. eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +71 -0
  507. eval_studio_client/api/test/test_v1_list_rag_collections_response.py +1 -1
  508. eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +71 -0
  509. eval_studio_client/api/test/test_v1_list_test_case_relationships_response.py +56 -0
  510. eval_studio_client/api/test/test_v1_list_test_cases_response.py +9 -2
  511. eval_studio_client/api/test/test_v1_list_test_classes_response.py +1 -1
  512. eval_studio_client/api/test/test_v1_list_tests_response.py +1 -1
  513. eval_studio_client/api/test/test_v1_list_workflows_response.py +95 -0
  514. eval_studio_client/api/test/test_v1_metric_score.py +52 -0
  515. eval_studio_client/api/test/test_v1_metric_scores.py +55 -0
  516. eval_studio_client/api/test/test_v1_model.py +1 -1
  517. eval_studio_client/api/test/test_v1_model_type.py +1 -1
  518. eval_studio_client/api/test/test_v1_operation.py +1 -1
  519. eval_studio_client/api/test/test_v1_operation_progress.py +1 -1
  520. eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +67 -0
  521. eval_studio_client/api/test/test_v1_perturb_test_response.py +1 -1
  522. eval_studio_client/api/test/test_v1_perturbator.py +1 -1
  523. eval_studio_client/api/test/test_v1_perturbator_configuration.py +1 -1
  524. eval_studio_client/api/test/test_v1_perturbator_intensity.py +1 -1
  525. eval_studio_client/api/test/test_v1_problem_and_action.py +1 -1
  526. eval_studio_client/api/test/test_v1_process_workflow_node_response.py +71 -0
  527. eval_studio_client/api/test/test_v1_prompt_library_item.py +68 -0
  528. eval_studio_client/api/test/test_v1_repeated_string.py +53 -0
  529. eval_studio_client/api/test/test_v1_reset_workflow_node_response.py +82 -0
  530. eval_studio_client/api/test/test_v1_test.py +1 -1
  531. eval_studio_client/api/test/test_v1_test_case.py +9 -2
  532. eval_studio_client/api/test/test_v1_test_case_relationship.py +1 -1
  533. eval_studio_client/api/test/test_v1_test_cases_generator.py +1 -1
  534. eval_studio_client/api/test/test_v1_test_class.py +1 -1
  535. eval_studio_client/api/test/test_v1_test_class_type.py +1 -1
  536. eval_studio_client/api/test/test_v1_test_lab.py +1 -1
  537. eval_studio_client/api/test/test_v1_test_suite_evaluates.py +33 -0
  538. eval_studio_client/api/test/test_v1_update_dashboard_response.py +1 -1
  539. eval_studio_client/api/test/test_v1_update_document_response.py +1 -1
  540. eval_studio_client/api/test/test_v1_update_leaderboard_response.py +1 -1
  541. eval_studio_client/api/test/test_v1_update_model_response.py +1 -1
  542. eval_studio_client/api/test/test_v1_update_operation_response.py +1 -1
  543. eval_studio_client/api/test/test_v1_update_test_case_response.py +9 -2
  544. eval_studio_client/api/test/test_v1_update_test_response.py +1 -1
  545. eval_studio_client/api/test/test_v1_update_workflow_node_response.py +82 -0
  546. eval_studio_client/api/test/test_v1_update_workflow_response.py +93 -0
  547. eval_studio_client/api/test/test_v1_who_am_i_response.py +1 -1
  548. eval_studio_client/api/test/test_v1_workflow.py +92 -0
  549. eval_studio_client/api/test/test_v1_workflow_edge.py +61 -0
  550. eval_studio_client/api/test/test_v1_workflow_edge_type.py +33 -0
  551. eval_studio_client/api/test/test_v1_workflow_node.py +81 -0
  552. eval_studio_client/api/test/test_v1_workflow_node_artifact.py +61 -0
  553. eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +64 -0
  554. eval_studio_client/api/test/test_v1_workflow_node_attributes.py +51 -0
  555. eval_studio_client/api/test/test_v1_workflow_node_status.py +33 -0
  556. eval_studio_client/api/test/test_v1_workflow_node_type.py +33 -0
  557. eval_studio_client/api/test/test_v1_workflow_node_view.py +33 -0
  558. eval_studio_client/api/test/test_v1_workflow_type.py +33 -0
  559. eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
  560. eval_studio_client/api/test/test_workflow_edge_service_api.py +52 -0
  561. eval_studio_client/api/test/test_workflow_node_service_api.py +94 -0
  562. eval_studio_client/api/test/test_workflow_service_api.py +80 -0
  563. eval_studio_client/api/test/test_workflow_service_clone_workflow_request.py +55 -0
  564. eval_studio_client/client.py +7 -0
  565. eval_studio_client/dashboards.py +66 -18
  566. eval_studio_client/gen/openapiv2/eval_studio.swagger.json +5132 -1847
  567. eval_studio_client/leaderboards.py +125 -0
  568. eval_studio_client/models.py +3 -42
  569. eval_studio_client/test_labs.py +49 -21
  570. eval_studio_client/tests.py +323 -58
  571. eval_studio_client/utils.py +26 -0
  572. {eval_studio_client-1.0.0a1.dist-info → eval_studio_client-1.1.0a5.dist-info}/METADATA +2 -3
  573. eval_studio_client-1.1.0a5.dist-info/RECORD +720 -0
  574. {eval_studio_client-1.0.0a1.dist-info → eval_studio_client-1.1.0a5.dist-info}/WHEEL +1 -1
  575. eval_studio_client-1.0.0a1.dist-info/RECORD +0 -485
@@ -2,7 +2,7 @@
2
2
 
3
3
  # flake8: noqa
4
4
  """
5
- ai/h2o/eval_studio/v1/collection.proto
5
+ ai/h2o/eval_studio/v1/insight.proto
6
6
 
7
7
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
8
8
 
@@ -14,9 +14,12 @@
14
14
 
15
15
 
16
16
  # import models into model package
17
+ from eval_studio_client.api.models.adversarial_inputs_service_test_adversarial_inputs_robustness_request import AdversarialInputsServiceTestAdversarialInputsRobustnessRequest
18
+ from eval_studio_client.api.models.generated_questions_validation_service_validate_generated_questions_request import GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest
17
19
  from eval_studio_client.api.models.perturbation_service_create_perturbation_request import PerturbationServiceCreatePerturbationRequest
18
20
  from eval_studio_client.api.models.prompt_generation_service_auto_generate_prompts_request import PromptGenerationServiceAutoGeneratePromptsRequest
19
21
  from eval_studio_client.api.models.protobuf_any import ProtobufAny
22
+ from eval_studio_client.api.models.protobuf_null_value import ProtobufNullValue
20
23
  from eval_studio_client.api.models.required_the_dashboard_to_update import RequiredTheDashboardToUpdate
21
24
  from eval_studio_client.api.models.required_the_document_to_update import RequiredTheDocumentToUpdate
22
25
  from eval_studio_client.api.models.required_the_leaderboard_to_update import RequiredTheLeaderboardToUpdate
@@ -25,10 +28,17 @@ from eval_studio_client.api.models.required_the_operation_to_finalize import Req
25
28
  from eval_studio_client.api.models.required_the_operation_to_update import RequiredTheOperationToUpdate
26
29
  from eval_studio_client.api.models.required_the_test_case_to_update import RequiredTheTestCaseToUpdate
27
30
  from eval_studio_client.api.models.required_the_test_to_update import RequiredTheTestToUpdate
31
+ from eval_studio_client.api.models.required_the_updated_workflow import RequiredTheUpdatedWorkflow
32
+ from eval_studio_client.api.models.required_the_updated_workflow_node import RequiredTheUpdatedWorkflowNode
28
33
  from eval_studio_client.api.models.rpc_status import RpcStatus
29
34
  from eval_studio_client.api.models.test_case_service_batch_delete_test_cases_request import TestCaseServiceBatchDeleteTestCasesRequest
35
+ from eval_studio_client.api.models.test_service_clone_test_request import TestServiceCloneTestRequest
30
36
  from eval_studio_client.api.models.test_service_generate_test_cases_request import TestServiceGenerateTestCasesRequest
37
+ from eval_studio_client.api.models.test_service_import_test_cases_from_library_request import TestServiceImportTestCasesFromLibraryRequest
38
+ from eval_studio_client.api.models.test_service_list_test_case_library_items_request import TestServiceListTestCaseLibraryItemsRequest
39
+ from eval_studio_client.api.models.test_service_perturb_test_in_place_request import TestServicePerturbTestInPlaceRequest
31
40
  from eval_studio_client.api.models.test_service_perturb_test_request import TestServicePerturbTestRequest
41
+ from eval_studio_client.api.models.v1_abort_operation_response import V1AbortOperationResponse
32
42
  from eval_studio_client.api.models.v1_batch_create_leaderboards_request import V1BatchCreateLeaderboardsRequest
33
43
  from eval_studio_client.api.models.v1_batch_create_leaderboards_response import V1BatchCreateLeaderboardsResponse
34
44
  from eval_studio_client.api.models.v1_batch_delete_dashboards_request import V1BatchDeleteDashboardsRequest
@@ -44,18 +54,25 @@ from eval_studio_client.api.models.v1_batch_delete_models_response import V1Batc
44
54
  from eval_studio_client.api.models.v1_batch_delete_test_cases_response import V1BatchDeleteTestCasesResponse
45
55
  from eval_studio_client.api.models.v1_batch_delete_tests_request import V1BatchDeleteTestsRequest
46
56
  from eval_studio_client.api.models.v1_batch_delete_tests_response import V1BatchDeleteTestsResponse
57
+ from eval_studio_client.api.models.v1_batch_delete_workflows_request import V1BatchDeleteWorkflowsRequest
58
+ from eval_studio_client.api.models.v1_batch_delete_workflows_response import V1BatchDeleteWorkflowsResponse
47
59
  from eval_studio_client.api.models.v1_batch_get_dashboards_response import V1BatchGetDashboardsResponse
48
60
  from eval_studio_client.api.models.v1_batch_get_documents_response import V1BatchGetDocumentsResponse
49
61
  from eval_studio_client.api.models.v1_batch_get_leaderboards_response import V1BatchGetLeaderboardsResponse
50
62
  from eval_studio_client.api.models.v1_batch_get_models_response import V1BatchGetModelsResponse
51
63
  from eval_studio_client.api.models.v1_batch_get_operations_response import V1BatchGetOperationsResponse
52
64
  from eval_studio_client.api.models.v1_batch_get_tests_response import V1BatchGetTestsResponse
65
+ from eval_studio_client.api.models.v1_batch_get_workflow_edges_response import V1BatchGetWorkflowEdgesResponse
66
+ from eval_studio_client.api.models.v1_batch_get_workflow_nodes_response import V1BatchGetWorkflowNodesResponse
53
67
  from eval_studio_client.api.models.v1_batch_import_leaderboard_request import V1BatchImportLeaderboardRequest
54
68
  from eval_studio_client.api.models.v1_batch_import_leaderboard_response import V1BatchImportLeaderboardResponse
55
69
  from eval_studio_client.api.models.v1_batch_import_tests_request import V1BatchImportTestsRequest
56
70
  from eval_studio_client.api.models.v1_batch_import_tests_response import V1BatchImportTestsResponse
57
71
  from eval_studio_client.api.models.v1_check_base_models_response import V1CheckBaseModelsResponse
72
+ from eval_studio_client.api.models.v1_clone_test_response import V1CloneTestResponse
73
+ from eval_studio_client.api.models.v1_clone_workflow_response import V1CloneWorkflowResponse
58
74
  from eval_studio_client.api.models.v1_collection_info import V1CollectionInfo
75
+ from eval_studio_client.api.models.v1_context import V1Context
59
76
  from eval_studio_client.api.models.v1_create_dashboard_response import V1CreateDashboardResponse
60
77
  from eval_studio_client.api.models.v1_create_document_response import V1CreateDocumentResponse
61
78
  from eval_studio_client.api.models.v1_create_evaluation_request import V1CreateEvaluationRequest
@@ -68,6 +85,9 @@ from eval_studio_client.api.models.v1_create_perturbation_response import V1Crea
68
85
  from eval_studio_client.api.models.v1_create_test_case_response import V1CreateTestCaseResponse
69
86
  from eval_studio_client.api.models.v1_create_test_lab_response import V1CreateTestLabResponse
70
87
  from eval_studio_client.api.models.v1_create_test_response import V1CreateTestResponse
88
+ from eval_studio_client.api.models.v1_create_workflow_edge_response import V1CreateWorkflowEdgeResponse
89
+ from eval_studio_client.api.models.v1_create_workflow_node_response import V1CreateWorkflowNodeResponse
90
+ from eval_studio_client.api.models.v1_create_workflow_response import V1CreateWorkflowResponse
71
91
  from eval_studio_client.api.models.v1_dashboard import V1Dashboard
72
92
  from eval_studio_client.api.models.v1_dashboard_status import V1DashboardStatus
73
93
  from eval_studio_client.api.models.v1_delete_dashboard_response import V1DeleteDashboardResponse
@@ -77,7 +97,11 @@ from eval_studio_client.api.models.v1_delete_leaderboard_response import V1Delet
77
97
  from eval_studio_client.api.models.v1_delete_model_response import V1DeleteModelResponse
78
98
  from eval_studio_client.api.models.v1_delete_test_case_response import V1DeleteTestCaseResponse
79
99
  from eval_studio_client.api.models.v1_delete_test_response import V1DeleteTestResponse
100
+ from eval_studio_client.api.models.v1_delete_workflow_edge_response import V1DeleteWorkflowEdgeResponse
101
+ from eval_studio_client.api.models.v1_delete_workflow_node_response import V1DeleteWorkflowNodeResponse
102
+ from eval_studio_client.api.models.v1_delete_workflow_response import V1DeleteWorkflowResponse
80
103
  from eval_studio_client.api.models.v1_document import V1Document
104
+ from eval_studio_client.api.models.v1_estimate_threshold_request import V1EstimateThresholdRequest
81
105
  from eval_studio_client.api.models.v1_evaluation_test import V1EvaluationTest
82
106
  from eval_studio_client.api.models.v1_evaluator import V1Evaluator
83
107
  from eval_studio_client.api.models.v1_evaluator_param_type import V1EvaluatorParamType
@@ -87,10 +111,12 @@ from eval_studio_client.api.models.v1_finalize_operation_response import V1Final
87
111
  from eval_studio_client.api.models.v1_find_all_test_cases_by_id_response import V1FindAllTestCasesByIDResponse
88
112
  from eval_studio_client.api.models.v1_find_test_lab_response import V1FindTestLabResponse
89
113
  from eval_studio_client.api.models.v1_generate_test_cases_response import V1GenerateTestCasesResponse
114
+ from eval_studio_client.api.models.v1_generated_test_case import V1GeneratedTestCase
90
115
  from eval_studio_client.api.models.v1_get_dashboard_response import V1GetDashboardResponse
91
116
  from eval_studio_client.api.models.v1_get_document_response import V1GetDocumentResponse
92
117
  from eval_studio_client.api.models.v1_get_evaluator_response import V1GetEvaluatorResponse
93
118
  from eval_studio_client.api.models.v1_get_info_response import V1GetInfoResponse
119
+ from eval_studio_client.api.models.v1_get_leaderboard_report_response import V1GetLeaderboardReportResponse
94
120
  from eval_studio_client.api.models.v1_get_leaderboard_response import V1GetLeaderboardResponse
95
121
  from eval_studio_client.api.models.v1_get_model_response import V1GetModelResponse
96
122
  from eval_studio_client.api.models.v1_get_operation_progress_by_parent_response import V1GetOperationProgressByParentResponse
@@ -99,12 +125,29 @@ from eval_studio_client.api.models.v1_get_perturbator_response import V1GetPertu
99
125
  from eval_studio_client.api.models.v1_get_test_case_response import V1GetTestCaseResponse
100
126
  from eval_studio_client.api.models.v1_get_test_class_response import V1GetTestClassResponse
101
127
  from eval_studio_client.api.models.v1_get_test_response import V1GetTestResponse
128
+ from eval_studio_client.api.models.v1_get_workflow_node_prerequisites_response import V1GetWorkflowNodePrerequisitesResponse
129
+ from eval_studio_client.api.models.v1_get_workflow_node_response import V1GetWorkflowNodeResponse
130
+ from eval_studio_client.api.models.v1_get_workflow_response import V1GetWorkflowResponse
102
131
  from eval_studio_client.api.models.v1_import_evaluation_request import V1ImportEvaluationRequest
103
132
  from eval_studio_client.api.models.v1_import_leaderboard_request import V1ImportLeaderboardRequest
104
133
  from eval_studio_client.api.models.v1_import_leaderboard_response import V1ImportLeaderboardResponse
134
+ from eval_studio_client.api.models.v1_import_test_cases_from_library_response import V1ImportTestCasesFromLibraryResponse
135
+ from eval_studio_client.api.models.v1_import_test_cases_request import V1ImportTestCasesRequest
105
136
  from eval_studio_client.api.models.v1_info import V1Info
137
+ from eval_studio_client.api.models.v1_init_workflow_node_response import V1InitWorkflowNodeResponse
106
138
  from eval_studio_client.api.models.v1_insight import V1Insight
139
+ from eval_studio_client.api.models.v1_labeled_test_case import V1LabeledTestCase
107
140
  from eval_studio_client.api.models.v1_leaderboard import V1Leaderboard
141
+ from eval_studio_client.api.models.v1_leaderboard_report import V1LeaderboardReport
142
+ from eval_studio_client.api.models.v1_leaderboard_report_actual_output_data import V1LeaderboardReportActualOutputData
143
+ from eval_studio_client.api.models.v1_leaderboard_report_actual_output_meta import V1LeaderboardReportActualOutputMeta
144
+ from eval_studio_client.api.models.v1_leaderboard_report_evaluator import V1LeaderboardReportEvaluator
145
+ from eval_studio_client.api.models.v1_leaderboard_report_evaluator_parameter import V1LeaderboardReportEvaluatorParameter
146
+ from eval_studio_client.api.models.v1_leaderboard_report_explanation import V1LeaderboardReportExplanation
147
+ from eval_studio_client.api.models.v1_leaderboard_report_metrics_meta_entry import V1LeaderboardReportMetricsMetaEntry
148
+ from eval_studio_client.api.models.v1_leaderboard_report_model import V1LeaderboardReportModel
149
+ from eval_studio_client.api.models.v1_leaderboard_report_result import V1LeaderboardReportResult
150
+ from eval_studio_client.api.models.v1_leaderboard_report_result_relationship import V1LeaderboardReportResultRelationship
108
151
  from eval_studio_client.api.models.v1_leaderboard_status import V1LeaderboardStatus
109
152
  from eval_studio_client.api.models.v1_leaderboard_type import V1LeaderboardType
110
153
  from eval_studio_client.api.models.v1_leaderboard_view import V1LeaderboardView
@@ -122,19 +165,30 @@ from eval_studio_client.api.models.v1_list_most_recent_models_response import V1
122
165
  from eval_studio_client.api.models.v1_list_most_recent_tests_response import V1ListMostRecentTestsResponse
123
166
  from eval_studio_client.api.models.v1_list_operations_response import V1ListOperationsResponse
124
167
  from eval_studio_client.api.models.v1_list_perturbators_response import V1ListPerturbatorsResponse
168
+ from eval_studio_client.api.models.v1_list_prompt_library_items_response import V1ListPromptLibraryItemsResponse
125
169
  from eval_studio_client.api.models.v1_list_rag_collections_response import V1ListRAGCollectionsResponse
170
+ from eval_studio_client.api.models.v1_list_test_case_library_items_response import V1ListTestCaseLibraryItemsResponse
171
+ from eval_studio_client.api.models.v1_list_test_case_relationships_response import V1ListTestCaseRelationshipsResponse
126
172
  from eval_studio_client.api.models.v1_list_test_cases_response import V1ListTestCasesResponse
127
173
  from eval_studio_client.api.models.v1_list_test_classes_response import V1ListTestClassesResponse
128
174
  from eval_studio_client.api.models.v1_list_tests_response import V1ListTestsResponse
175
+ from eval_studio_client.api.models.v1_list_workflows_response import V1ListWorkflowsResponse
176
+ from eval_studio_client.api.models.v1_metric_score import V1MetricScore
177
+ from eval_studio_client.api.models.v1_metric_scores import V1MetricScores
129
178
  from eval_studio_client.api.models.v1_model import V1Model
130
179
  from eval_studio_client.api.models.v1_model_type import V1ModelType
131
180
  from eval_studio_client.api.models.v1_operation import V1Operation
132
181
  from eval_studio_client.api.models.v1_operation_progress import V1OperationProgress
182
+ from eval_studio_client.api.models.v1_perturb_test_in_place_response import V1PerturbTestInPlaceResponse
133
183
  from eval_studio_client.api.models.v1_perturb_test_response import V1PerturbTestResponse
134
184
  from eval_studio_client.api.models.v1_perturbator import V1Perturbator
135
185
  from eval_studio_client.api.models.v1_perturbator_configuration import V1PerturbatorConfiguration
136
186
  from eval_studio_client.api.models.v1_perturbator_intensity import V1PerturbatorIntensity
137
187
  from eval_studio_client.api.models.v1_problem_and_action import V1ProblemAndAction
188
+ from eval_studio_client.api.models.v1_process_workflow_node_response import V1ProcessWorkflowNodeResponse
189
+ from eval_studio_client.api.models.v1_prompt_library_item import V1PromptLibraryItem
190
+ from eval_studio_client.api.models.v1_repeated_string import V1RepeatedString
191
+ from eval_studio_client.api.models.v1_reset_workflow_node_response import V1ResetWorkflowNodeResponse
138
192
  from eval_studio_client.api.models.v1_test import V1Test
139
193
  from eval_studio_client.api.models.v1_test_case import V1TestCase
140
194
  from eval_studio_client.api.models.v1_test_case_relationship import V1TestCaseRelationship
@@ -142,6 +196,7 @@ from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGen
142
196
  from eval_studio_client.api.models.v1_test_class import V1TestClass
143
197
  from eval_studio_client.api.models.v1_test_class_type import V1TestClassType
144
198
  from eval_studio_client.api.models.v1_test_lab import V1TestLab
199
+ from eval_studio_client.api.models.v1_test_suite_evaluates import V1TestSuiteEvaluates
145
200
  from eval_studio_client.api.models.v1_update_dashboard_response import V1UpdateDashboardResponse
146
201
  from eval_studio_client.api.models.v1_update_document_response import V1UpdateDocumentResponse
147
202
  from eval_studio_client.api.models.v1_update_leaderboard_response import V1UpdateLeaderboardResponse
@@ -149,4 +204,18 @@ from eval_studio_client.api.models.v1_update_model_response import V1UpdateModel
149
204
  from eval_studio_client.api.models.v1_update_operation_response import V1UpdateOperationResponse
150
205
  from eval_studio_client.api.models.v1_update_test_case_response import V1UpdateTestCaseResponse
151
206
  from eval_studio_client.api.models.v1_update_test_response import V1UpdateTestResponse
207
+ from eval_studio_client.api.models.v1_update_workflow_node_response import V1UpdateWorkflowNodeResponse
208
+ from eval_studio_client.api.models.v1_update_workflow_response import V1UpdateWorkflowResponse
152
209
  from eval_studio_client.api.models.v1_who_am_i_response import V1WhoAmIResponse
210
+ from eval_studio_client.api.models.v1_workflow import V1Workflow
211
+ from eval_studio_client.api.models.v1_workflow_edge import V1WorkflowEdge
212
+ from eval_studio_client.api.models.v1_workflow_edge_type import V1WorkflowEdgeType
213
+ from eval_studio_client.api.models.v1_workflow_node import V1WorkflowNode
214
+ from eval_studio_client.api.models.v1_workflow_node_artifact import V1WorkflowNodeArtifact
215
+ from eval_studio_client.api.models.v1_workflow_node_artifacts import V1WorkflowNodeArtifacts
216
+ from eval_studio_client.api.models.v1_workflow_node_attributes import V1WorkflowNodeAttributes
217
+ from eval_studio_client.api.models.v1_workflow_node_status import V1WorkflowNodeStatus
218
+ from eval_studio_client.api.models.v1_workflow_node_type import V1WorkflowNodeType
219
+ from eval_studio_client.api.models.v1_workflow_node_view import V1WorkflowNodeView
220
+ from eval_studio_client.api.models.v1_workflow_type import V1WorkflowType
221
+ from eval_studio_client.api.models.workflow_service_clone_workflow_request import WorkflowServiceCloneWorkflowRequest
@@ -0,0 +1,143 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from eval_studio_client.api.models.v1_metric_scores import V1MetricScores
23
+ from eval_studio_client.api.models.v1_model import V1Model
24
+ from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
25
+ from typing import Optional, Set
26
+ from typing_extensions import Self
27
+
28
+ class AdversarialInputsServiceTestAdversarialInputsRobustnessRequest(BaseModel):
29
+ """
30
+ AdversarialInputsServiceTestAdversarialInputsRobustnessRequest
31
+ """ # noqa: E501
32
+ operation: Optional[StrictStr] = Field(default=None, description="Required. The Operation processing adversarial inputs robustness testing.")
33
+ generator_input_types: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. The list of adversarial input types to generate.", alias="generatorInputTypes")
34
+ generator_document_urls: Optional[List[StrictStr]] = Field(default=None, description="Required. The document URLs which were used to generate the baseline TestCases.", alias="generatorDocumentUrls")
35
+ generator_model: Optional[V1Model] = Field(default=None, alias="generatorModel")
36
+ generator_base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to use for generation of adversarial the prompts.", alias="generatorBaseLlmModel")
37
+ generator_count: Optional[StrictInt] = Field(default=None, description="Required. The number of adversarial TestCases to generate.", alias="generatorCount")
38
+ generator_topics: Optional[List[StrictStr]] = Field(default=None, description="Optional. Topics to generate questions for. If not specified, use document summarization as topic generation.", alias="generatorTopics")
39
+ generator_chunks: Optional[List[StrictStr]] = Field(default=None, description="Optional. The list of chunks to use for generation. If set, the Documents assigned to the Test and h2ogpte_collection_id are ignored.", alias="generatorChunks")
40
+ generator_h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. ID of the h2oGPTe collection to use. If provided, documents referenced by Test and any specified chunks are ignored. This field is required if Test does not reference any documents and no chunks are provided. If this field is left empty, a temporary collection will be created.", alias="generatorH2ogpteCollectionId")
41
+ evaluator_identifiers: Optional[List[StrictStr]] = Field(default=None, description="Required. Evaluator identifiers to use for the model evaluation using the adversarial inputs.", alias="evaluatorIdentifiers")
42
+ evaluators_parameters: Optional[Dict[str, StrictStr]] = Field(default=None, description="Optional. Additional evaluators configuration, for all the evaluators used in the evaluation. Key is the evaluator identifier, and the value is a JSON string containing the configuration dictionary.", alias="evaluatorsParameters")
43
+ model: Optional[V1Model] = None
44
+ base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to be evaluated using the adversarial inputs.", alias="baseLlmModel")
45
+ model_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Parameters overrides for the Model host in JSON format.", alias="modelParameters")
46
+ default_h2ogpte_model: Optional[V1Model] = Field(default=None, alias="defaultH2ogpteModel")
47
+ baseline_eval: Optional[StrictStr] = Field(default=None, description="Required. Baseline evaluation name.", alias="baselineEval")
48
+ baseline_metrics: Optional[Dict[str, V1MetricScores]] = Field(default=None, description="Required. Map of baseline metrics from the evaluator to the metric scores for the evaluator.", alias="baselineMetrics")
49
+ __properties: ClassVar[List[str]] = ["operation", "generatorInputTypes", "generatorDocumentUrls", "generatorModel", "generatorBaseLlmModel", "generatorCount", "generatorTopics", "generatorChunks", "generatorH2ogpteCollectionId", "evaluatorIdentifiers", "evaluatorsParameters", "model", "baseLlmModel", "modelParameters", "defaultH2ogpteModel", "baselineEval", "baselineMetrics"]
50
+
51
+ model_config = ConfigDict(
52
+ populate_by_name=True,
53
+ validate_assignment=True,
54
+ protected_namespaces=(),
55
+ )
56
+
57
+
58
+ def to_str(self) -> str:
59
+ """Returns the string representation of the model using alias"""
60
+ return pprint.pformat(self.model_dump(by_alias=True))
61
+
62
+ def to_json(self) -> str:
63
+ """Returns the JSON representation of the model using alias"""
64
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
65
+ return json.dumps(self.to_dict())
66
+
67
+ @classmethod
68
+ def from_json(cls, json_str: str) -> Optional[Self]:
69
+ """Create an instance of AdversarialInputsServiceTestAdversarialInputsRobustnessRequest from a JSON string"""
70
+ return cls.from_dict(json.loads(json_str))
71
+
72
+ def to_dict(self) -> Dict[str, Any]:
73
+ """Return the dictionary representation of the model using alias.
74
+
75
+ This has the following differences from calling pydantic's
76
+ `self.model_dump(by_alias=True)`:
77
+
78
+ * `None` is only added to the output dict for nullable fields that
79
+ were set at model initialization. Other fields with value `None`
80
+ are ignored.
81
+ """
82
+ excluded_fields: Set[str] = set([
83
+ ])
84
+
85
+ _dict = self.model_dump(
86
+ by_alias=True,
87
+ exclude=excluded_fields,
88
+ exclude_none=True,
89
+ )
90
+ # override the default output from pydantic by calling `to_dict()` of generator_model
91
+ if self.generator_model:
92
+ _dict['generatorModel'] = self.generator_model.to_dict()
93
+ # override the default output from pydantic by calling `to_dict()` of model
94
+ if self.model:
95
+ _dict['model'] = self.model.to_dict()
96
+ # override the default output from pydantic by calling `to_dict()` of default_h2ogpte_model
97
+ if self.default_h2ogpte_model:
98
+ _dict['defaultH2ogpteModel'] = self.default_h2ogpte_model.to_dict()
99
+ # override the default output from pydantic by calling `to_dict()` of each value in baseline_metrics (dict)
100
+ _field_dict = {}
101
+ if self.baseline_metrics:
102
+ for _key in self.baseline_metrics:
103
+ if self.baseline_metrics[_key]:
104
+ _field_dict[_key] = self.baseline_metrics[_key].to_dict()
105
+ _dict['baselineMetrics'] = _field_dict
106
+ return _dict
107
+
108
+ @classmethod
109
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
110
+ """Create an instance of AdversarialInputsServiceTestAdversarialInputsRobustnessRequest from a dict"""
111
+ if obj is None:
112
+ return None
113
+
114
+ if not isinstance(obj, dict):
115
+ return cls.model_validate(obj)
116
+
117
+ _obj = cls.model_validate({
118
+ "operation": obj.get("operation"),
119
+ "generatorInputTypes": obj.get("generatorInputTypes"),
120
+ "generatorDocumentUrls": obj.get("generatorDocumentUrls"),
121
+ "generatorModel": V1Model.from_dict(obj["generatorModel"]) if obj.get("generatorModel") is not None else None,
122
+ "generatorBaseLlmModel": obj.get("generatorBaseLlmModel"),
123
+ "generatorCount": obj.get("generatorCount"),
124
+ "generatorTopics": obj.get("generatorTopics"),
125
+ "generatorChunks": obj.get("generatorChunks"),
126
+ "generatorH2ogpteCollectionId": obj.get("generatorH2ogpteCollectionId"),
127
+ "evaluatorIdentifiers": obj.get("evaluatorIdentifiers"),
128
+ "evaluatorsParameters": obj.get("evaluatorsParameters"),
129
+ "model": V1Model.from_dict(obj["model"]) if obj.get("model") is not None else None,
130
+ "baseLlmModel": obj.get("baseLlmModel"),
131
+ "modelParameters": obj.get("modelParameters"),
132
+ "defaultH2ogpteModel": V1Model.from_dict(obj["defaultH2ogpteModel"]) if obj.get("defaultH2ogpteModel") is not None else None,
133
+ "baselineEval": obj.get("baselineEval"),
134
+ "baselineMetrics": dict(
135
+ (_k, V1MetricScores.from_dict(_v))
136
+ for _k, _v in obj["baselineMetrics"].items()
137
+ )
138
+ if obj.get("baselineMetrics") is not None
139
+ else None
140
+ })
141
+ return _obj
142
+
143
+
@@ -0,0 +1,97 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from eval_studio_client.api.models.v1_generated_test_case import V1GeneratedTestCase
23
+ from typing import Optional, Set
24
+ from typing_extensions import Self
25
+
26
+ class GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest(BaseModel):
27
+ """
28
+ GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest
29
+ """ # noqa: E501
30
+ operation: Optional[StrictStr] = Field(default=None, description="Required. The Operation processing this question validation process.")
31
+ test_cases: Optional[List[V1GeneratedTestCase]] = Field(default=None, description="Required. Generated Test Cases, i.e., Test cases with context that was used for their generation.", alias="testCases")
32
+ __properties: ClassVar[List[str]] = ["operation", "testCases"]
33
+
34
+ model_config = ConfigDict(
35
+ populate_by_name=True,
36
+ validate_assignment=True,
37
+ protected_namespaces=(),
38
+ )
39
+
40
+
41
+ def to_str(self) -> str:
42
+ """Returns the string representation of the model using alias"""
43
+ return pprint.pformat(self.model_dump(by_alias=True))
44
+
45
+ def to_json(self) -> str:
46
+ """Returns the JSON representation of the model using alias"""
47
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
48
+ return json.dumps(self.to_dict())
49
+
50
+ @classmethod
51
+ def from_json(cls, json_str: str) -> Optional[Self]:
52
+ """Create an instance of GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest from a JSON string"""
53
+ return cls.from_dict(json.loads(json_str))
54
+
55
+ def to_dict(self) -> Dict[str, Any]:
56
+ """Return the dictionary representation of the model using alias.
57
+
58
+ This has the following differences from calling pydantic's
59
+ `self.model_dump(by_alias=True)`:
60
+
61
+ * `None` is only added to the output dict for nullable fields that
62
+ were set at model initialization. Other fields with value `None`
63
+ are ignored.
64
+ """
65
+ excluded_fields: Set[str] = set([
66
+ ])
67
+
68
+ _dict = self.model_dump(
69
+ by_alias=True,
70
+ exclude=excluded_fields,
71
+ exclude_none=True,
72
+ )
73
+ # override the default output from pydantic by calling `to_dict()` of each item in test_cases (list)
74
+ _items = []
75
+ if self.test_cases:
76
+ for _item in self.test_cases:
77
+ if _item:
78
+ _items.append(_item.to_dict())
79
+ _dict['testCases'] = _items
80
+ return _dict
81
+
82
+ @classmethod
83
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
84
+ """Create an instance of GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest from a dict"""
85
+ if obj is None:
86
+ return None
87
+
88
+ if not isinstance(obj, dict):
89
+ return cls.model_validate(obj)
90
+
91
+ _obj = cls.model_validate({
92
+ "operation": obj.get("operation"),
93
+ "testCases": [V1GeneratedTestCase.from_dict(_item) for _item in obj["testCases"]] if obj.get("testCases") is not None else None
94
+ })
95
+ return _obj
96
+
97
+
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1/collection.proto
4
+ ai/h2o/eval_studio/v1/insight.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -19,6 +19,7 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
+ from eval_studio_client.api.models.v1_model import V1Model
22
23
  from eval_studio_client.api.models.v1_perturbator_configuration import V1PerturbatorConfiguration
23
24
  from eval_studio_client.api.models.v1_test_case import V1TestCase
24
25
  from eval_studio_client.api.models.v1_test_case_relationship import V1TestCaseRelationship
@@ -32,7 +33,8 @@ class PerturbationServiceCreatePerturbationRequest(BaseModel):
32
33
  perturbator_configurations: Optional[List[V1PerturbatorConfiguration]] = Field(default=None, description="Required. PerturbatorConfiguration to apply to the parent Test.", alias="perturbatorConfigurations")
33
34
  test_cases: Optional[List[V1TestCase]] = Field(default=None, description="Required. List of test cases to perturbate. These are the test cases from the parent test. TODO: breaks https://google.aip.dev/144", alias="testCases")
34
35
  test_case_relationships: Optional[List[V1TestCaseRelationship]] = Field(default=None, description="Optional. List of relationships between test cases.", alias="testCaseRelationships")
35
- __properties: ClassVar[List[str]] = ["perturbatorConfigurations", "testCases", "testCaseRelationships"]
36
+ default_h2ogpte_model: Optional[V1Model] = Field(default=None, alias="defaultH2ogpteModel")
37
+ __properties: ClassVar[List[str]] = ["perturbatorConfigurations", "testCases", "testCaseRelationships", "defaultH2ogpteModel"]
36
38
 
37
39
  model_config = ConfigDict(
38
40
  populate_by_name=True,
@@ -94,6 +96,9 @@ class PerturbationServiceCreatePerturbationRequest(BaseModel):
94
96
  if _item:
95
97
  _items.append(_item.to_dict())
96
98
  _dict['testCaseRelationships'] = _items
99
+ # override the default output from pydantic by calling `to_dict()` of default_h2ogpte_model
100
+ if self.default_h2ogpte_model:
101
+ _dict['defaultH2ogpteModel'] = self.default_h2ogpte_model.to_dict()
97
102
  return _dict
98
103
 
99
104
  @classmethod
@@ -108,7 +113,8 @@ class PerturbationServiceCreatePerturbationRequest(BaseModel):
108
113
  _obj = cls.model_validate({
109
114
  "perturbatorConfigurations": [V1PerturbatorConfiguration.from_dict(_item) for _item in obj["perturbatorConfigurations"]] if obj.get("perturbatorConfigurations") is not None else None,
110
115
  "testCases": [V1TestCase.from_dict(_item) for _item in obj["testCases"]] if obj.get("testCases") is not None else None,
111
- "testCaseRelationships": [V1TestCaseRelationship.from_dict(_item) for _item in obj["testCaseRelationships"]] if obj.get("testCaseRelationships") is not None else None
116
+ "testCaseRelationships": [V1TestCaseRelationship.from_dict(_item) for _item in obj["testCaseRelationships"]] if obj.get("testCaseRelationships") is not None else None,
117
+ "defaultH2ogpteModel": V1Model.from_dict(obj["defaultH2ogpteModel"]) if obj.get("defaultH2ogpteModel") is not None else None
112
118
  })
113
119
  return _obj
114
120
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1/collection.proto
4
+ ai/h2o/eval_studio/v1/insight.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -20,6 +20,7 @@ import json
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
22
  from eval_studio_client.api.models.v1_model import V1Model
23
+ from eval_studio_client.api.models.v1_repeated_string import V1RepeatedString
23
24
  from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
24
25
  from typing import Optional, Set
25
26
  from typing_extensions import Self
@@ -32,10 +33,12 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
32
33
  model: Optional[V1Model] = None
33
34
  count: Optional[StrictInt] = Field(default=None, description="Required. The number of TestCases to generate.")
34
35
  base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to use for generating the prompts.", alias="baseLlmModel")
35
- document_urls: Optional[List[StrictStr]] = Field(default=None, description="Optional. The list of document URLs. The document URL might be a managed document URL or a public URL.", alias="documentUrls")
36
- generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. Topics to generate TestCases for. If not specified, all topics are selected.")
36
+ document_urls: Optional[V1RepeatedString] = Field(default=None, alias="documentUrls")
37
+ chunks: Optional[V1RepeatedString] = None
38
+ generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. Type of questions to generate TestCases for. If not specified, all types of questions are selected.")
37
39
  h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. The ID of the h2oGPTe collection to use. If empty, new temporary collection will be created.", alias="h2ogpteCollectionId")
38
- __properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "generators", "h2ogpteCollectionId"]
40
+ topics: Optional[List[StrictStr]] = Field(default=None, description="Optional. Topics to generate questions for. If not specified, use document summarization as topic generation.")
41
+ __properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "chunks", "generators", "h2ogpteCollectionId", "topics"]
39
42
 
40
43
  model_config = ConfigDict(
41
44
  populate_by_name=True,
@@ -79,6 +82,12 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
79
82
  # override the default output from pydantic by calling `to_dict()` of model
80
83
  if self.model:
81
84
  _dict['model'] = self.model.to_dict()
85
+ # override the default output from pydantic by calling `to_dict()` of document_urls
86
+ if self.document_urls:
87
+ _dict['documentUrls'] = self.document_urls.to_dict()
88
+ # override the default output from pydantic by calling `to_dict()` of chunks
89
+ if self.chunks:
90
+ _dict['chunks'] = self.chunks.to_dict()
82
91
  return _dict
83
92
 
84
93
  @classmethod
@@ -95,9 +104,11 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
95
104
  "model": V1Model.from_dict(obj["model"]) if obj.get("model") is not None else None,
96
105
  "count": obj.get("count"),
97
106
  "baseLlmModel": obj.get("baseLlmModel"),
98
- "documentUrls": obj.get("documentUrls"),
107
+ "documentUrls": V1RepeatedString.from_dict(obj["documentUrls"]) if obj.get("documentUrls") is not None else None,
108
+ "chunks": V1RepeatedString.from_dict(obj["chunks"]) if obj.get("chunks") is not None else None,
99
109
  "generators": obj.get("generators"),
100
- "h2ogpteCollectionId": obj.get("h2ogpteCollectionId")
110
+ "h2ogpteCollectionId": obj.get("h2ogpteCollectionId"),
111
+ "topics": obj.get("topics")
101
112
  })
102
113
  return _obj
103
114
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1/collection.proto
4
+ ai/h2o/eval_studio/v1/insight.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -0,0 +1,36 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import json
17
+ from enum import Enum
18
+ from typing_extensions import Self
19
+
20
+
21
+ class ProtobufNullValue(str, Enum):
22
+ """
23
+ `NullValue` is a singleton enumeration to represent the null value for the `Value` type union. The JSON representation for `NullValue` is JSON `null`. - NULL_VALUE: Null value.
24
+ """
25
+
26
+ """
27
+ allowed enum values
28
+ """
29
+ NULL_VALUE = 'NULL_VALUE'
30
+
31
+ @classmethod
32
+ def from_json(cls, json_str: str) -> Self:
33
+ """Create an instance of ProtobufNullValue from a JSON string"""
34
+ return cls(json.loads(json_str))
35
+
36
+
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1/collection.proto
4
+ ai/h2o/eval_studio/v1/insight.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1/collection.proto
4
+ ai/h2o/eval_studio/v1/insight.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1/collection.proto
4
+ ai/h2o/eval_studio/v1/insight.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1/collection.proto
4
+ ai/h2o/eval_studio/v1/insight.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1/collection.proto
4
+ ai/h2o/eval_studio/v1/insight.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7