eval-studio-client 1.0.3__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (592) hide show
  1. eval_studio_client/api/__init__.py +83 -1
  2. eval_studio_client/api/api/__init__.py +8 -0
  3. eval_studio_client/api/api/adversarial_inputs_service_api.py +321 -0
  4. eval_studio_client/api/api/dashboard_service_api.py +18 -1
  5. eval_studio_client/api/api/document_service_api.py +1 -1
  6. eval_studio_client/api/api/evaluation_service_api.py +1 -1
  7. eval_studio_client/api/api/evaluator_service_api.py +1 -1
  8. eval_studio_client/api/api/human_calibration_service_api.py +304 -0
  9. eval_studio_client/api/api/info_service_api.py +1 -1
  10. eval_studio_client/api/api/leaderboard_report_service_api.py +292 -0
  11. eval_studio_client/api/api/leaderboard_service_api.py +17 -17
  12. eval_studio_client/api/api/model_service_api.py +17 -17
  13. eval_studio_client/api/api/operation_progress_service_api.py +1 -1
  14. eval_studio_client/api/api/operation_service_api.py +272 -17
  15. eval_studio_client/api/api/perturbation_service_api.py +1 -1
  16. eval_studio_client/api/api/perturbator_service_api.py +285 -18
  17. eval_studio_client/api/api/prompt_generation_service_api.py +1 -1
  18. eval_studio_client/api/api/prompt_library_service_api.py +669 -0
  19. eval_studio_client/api/api/test_case_relationship_service_api.py +292 -0
  20. eval_studio_client/api/api/test_case_service_api.py +17 -17
  21. eval_studio_client/api/api/test_class_service_api.py +17 -17
  22. eval_studio_client/api/api/test_lab_service_api.py +1 -1
  23. eval_studio_client/api/api/test_service_api.py +1272 -102
  24. eval_studio_client/api/api/who_am_i_service_api.py +1 -1
  25. eval_studio_client/api/api/workflow_edge_service_api.py +835 -0
  26. eval_studio_client/api/api/workflow_node_service_api.py +2431 -0
  27. eval_studio_client/api/api/workflow_service_api.py +2403 -0
  28. eval_studio_client/api/api_client.py +1 -1
  29. eval_studio_client/api/configuration.py +1 -1
  30. eval_studio_client/api/docs/AdversarialInputsServiceApi.md +78 -0
  31. eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +45 -0
  32. eval_studio_client/api/docs/DashboardServiceApi.md +4 -2
  33. eval_studio_client/api/docs/HumanCalibrationServiceApi.md +77 -0
  34. eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -0
  35. eval_studio_client/api/docs/LeaderboardServiceApi.md +5 -5
  36. eval_studio_client/api/docs/ModelServiceApi.md +5 -5
  37. eval_studio_client/api/docs/OperationServiceApi.md +72 -5
  38. eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +1 -0
  39. eval_studio_client/api/docs/PerturbatorServiceApi.md +38 -8
  40. eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +4 -2
  41. eval_studio_client/api/docs/PromptLibraryServiceApi.md +155 -0
  42. eval_studio_client/api/docs/ProtobufNullValue.md +12 -0
  43. eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +1 -0
  44. eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +3 -0
  45. eval_studio_client/api/docs/RequiredTheTestToUpdate.md +1 -0
  46. eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +47 -0
  47. eval_studio_client/api/docs/RequiredTheUpdatedWorkflowNode.md +44 -0
  48. eval_studio_client/api/docs/TestCaseRelationshipServiceApi.md +75 -0
  49. eval_studio_client/api/docs/TestCaseServiceApi.md +5 -5
  50. eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
  51. eval_studio_client/api/docs/TestServiceApi.md +293 -9
  52. eval_studio_client/api/docs/TestServiceCloneTestRequest.md +30 -0
  53. eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +3 -1
  54. eval_studio_client/api/docs/TestServiceImportTestCasesFromLibraryRequest.md +32 -0
  55. eval_studio_client/api/docs/TestServiceListTestCaseLibraryItemsRequest.md +35 -0
  56. eval_studio_client/api/docs/TestServicePerturbTestInPlaceRequest.md +30 -0
  57. eval_studio_client/api/docs/TestServicePerturbTestRequest.md +1 -0
  58. eval_studio_client/api/docs/V1AbortOperationResponse.md +29 -0
  59. eval_studio_client/api/docs/V1BatchDeleteWorkflowsRequest.md +29 -0
  60. eval_studio_client/api/docs/V1BatchDeleteWorkflowsResponse.md +29 -0
  61. eval_studio_client/api/docs/V1BatchGetWorkflowEdgesResponse.md +29 -0
  62. eval_studio_client/api/docs/V1BatchGetWorkflowNodesResponse.md +29 -0
  63. eval_studio_client/api/docs/V1CloneTestResponse.md +29 -0
  64. eval_studio_client/api/docs/V1CloneWorkflowResponse.md +29 -0
  65. eval_studio_client/api/docs/V1Context.md +37 -0
  66. eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
  67. eval_studio_client/api/docs/V1CreateWorkflowEdgeResponse.md +29 -0
  68. eval_studio_client/api/docs/V1CreateWorkflowNodeResponse.md +29 -0
  69. eval_studio_client/api/docs/V1CreateWorkflowResponse.md +29 -0
  70. eval_studio_client/api/docs/V1Dashboard.md +1 -0
  71. eval_studio_client/api/docs/V1DashboardType.md +12 -0
  72. eval_studio_client/api/docs/V1DeleteWorkflowEdgeResponse.md +29 -0
  73. eval_studio_client/api/docs/V1DeleteWorkflowNodeResponse.md +29 -0
  74. eval_studio_client/api/docs/V1DeleteWorkflowResponse.md +29 -0
  75. eval_studio_client/api/docs/V1DependencyList.md +30 -0
  76. eval_studio_client/api/docs/V1EstimateThresholdRequest.md +33 -0
  77. eval_studio_client/api/docs/V1Evaluator.md +2 -0
  78. eval_studio_client/api/docs/V1GetGuardrailsConfigurationResponse.md +29 -0
  79. eval_studio_client/api/docs/V1GetLeaderboardReportResponse.md +29 -0
  80. eval_studio_client/api/docs/V1GetWorkflowNodePrerequisitesResponse.md +30 -0
  81. eval_studio_client/api/docs/V1GetWorkflowNodeResponse.md +29 -0
  82. eval_studio_client/api/docs/V1GetWorkflowResponse.md +29 -0
  83. eval_studio_client/api/docs/V1ImportEvaluationRequest.md +1 -0
  84. eval_studio_client/api/docs/V1ImportTestCasesFromLibraryResponse.md +29 -0
  85. eval_studio_client/api/docs/V1ImportTestCasesRequest.md +33 -0
  86. eval_studio_client/api/docs/V1Info.md +3 -0
  87. eval_studio_client/api/docs/V1InitWorkflowNodeResponse.md +29 -0
  88. eval_studio_client/api/docs/V1LabeledTestCase.md +31 -0
  89. eval_studio_client/api/docs/V1LeaderboardReport.md +32 -0
  90. eval_studio_client/api/docs/V1LeaderboardReportActualOutputData.md +31 -0
  91. eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +31 -0
  92. eval_studio_client/api/docs/V1LeaderboardReportEvaluator.md +42 -0
  93. eval_studio_client/api/docs/V1LeaderboardReportEvaluatorParameter.md +38 -0
  94. eval_studio_client/api/docs/V1LeaderboardReportExplanation.md +34 -0
  95. eval_studio_client/api/docs/V1LeaderboardReportMetricsMetaEntry.md +41 -0
  96. eval_studio_client/api/docs/V1LeaderboardReportModel.md +37 -0
  97. eval_studio_client/api/docs/V1LeaderboardReportResult.md +45 -0
  98. eval_studio_client/api/docs/V1LeaderboardReportResultRelationship.md +32 -0
  99. eval_studio_client/api/docs/V1ListPromptLibraryItemsResponse.md +29 -0
  100. eval_studio_client/api/docs/V1ListTestCaseLibraryItemsResponse.md +29 -0
  101. eval_studio_client/api/docs/V1ListTestCaseRelationshipsResponse.md +29 -0
  102. eval_studio_client/api/docs/V1ListWorkflowDependenciesResponse.md +30 -0
  103. eval_studio_client/api/docs/V1ListWorkflowsResponse.md +29 -0
  104. eval_studio_client/api/docs/V1MetricScore.md +31 -0
  105. eval_studio_client/api/docs/V1MetricScores.md +29 -0
  106. eval_studio_client/api/docs/V1PerturbTestInPlaceResponse.md +29 -0
  107. eval_studio_client/api/docs/V1ProcessWorkflowNodeResponse.md +29 -0
  108. eval_studio_client/api/docs/V1PromptLibraryItem.md +42 -0
  109. eval_studio_client/api/docs/V1RepeatedContext.md +29 -0
  110. eval_studio_client/api/docs/V1RepeatedString.md +29 -0
  111. eval_studio_client/api/docs/V1ResetWorkflowNodeResponse.md +29 -0
  112. eval_studio_client/api/docs/V1Test.md +1 -0
  113. eval_studio_client/api/docs/V1TestCase.md +3 -0
  114. eval_studio_client/api/docs/V1TestSuiteEvaluates.md +11 -0
  115. eval_studio_client/api/docs/V1TestType.md +12 -0
  116. eval_studio_client/api/docs/V1UpdateWorkflowNodeResponse.md +29 -0
  117. eval_studio_client/api/docs/V1UpdateWorkflowResponse.md +29 -0
  118. eval_studio_client/api/docs/V1Workflow.md +49 -0
  119. eval_studio_client/api/docs/V1WorkflowDependency.md +30 -0
  120. eval_studio_client/api/docs/V1WorkflowEdge.md +40 -0
  121. eval_studio_client/api/docs/V1WorkflowEdgeType.md +12 -0
  122. eval_studio_client/api/docs/V1WorkflowNode.md +46 -0
  123. eval_studio_client/api/docs/V1WorkflowNodeArtifact.md +41 -0
  124. eval_studio_client/api/docs/V1WorkflowNodeArtifacts.md +29 -0
  125. eval_studio_client/api/docs/V1WorkflowNodeAttributes.md +30 -0
  126. eval_studio_client/api/docs/V1WorkflowNodeStatus.md +12 -0
  127. eval_studio_client/api/docs/V1WorkflowNodeType.md +12 -0
  128. eval_studio_client/api/docs/V1WorkflowNodeView.md +12 -0
  129. eval_studio_client/api/docs/V1WorkflowType.md +12 -0
  130. eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +215 -0
  131. eval_studio_client/api/docs/WorkflowNodeServiceApi.md +632 -0
  132. eval_studio_client/api/docs/WorkflowServiceApi.md +623 -0
  133. eval_studio_client/api/docs/WorkflowServiceCloneWorkflowRequest.md +33 -0
  134. eval_studio_client/api/exceptions.py +1 -1
  135. eval_studio_client/api/models/__init__.py +75 -1
  136. eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +143 -0
  137. eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +9 -3
  138. eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +18 -6
  139. eval_studio_client/api/models/protobuf_any.py +1 -1
  140. eval_studio_client/api/models/protobuf_null_value.py +36 -0
  141. eval_studio_client/api/models/required_the_dashboard_to_update.py +6 -3
  142. eval_studio_client/api/models/required_the_document_to_update.py +1 -1
  143. eval_studio_client/api/models/required_the_leaderboard_to_update.py +1 -1
  144. eval_studio_client/api/models/required_the_model_to_update.py +1 -1
  145. eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
  146. eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
  147. eval_studio_client/api/models/required_the_test_case_to_update.py +14 -3
  148. eval_studio_client/api/models/required_the_test_to_update.py +6 -3
  149. eval_studio_client/api/models/required_the_updated_workflow.py +160 -0
  150. eval_studio_client/api/models/required_the_updated_workflow_node.py +152 -0
  151. eval_studio_client/api/models/rpc_status.py +1 -1
  152. eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
  153. eval_studio_client/api/models/test_service_clone_test_request.py +89 -0
  154. eval_studio_client/api/models/test_service_generate_test_cases_request.py +16 -4
  155. eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +93 -0
  156. eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +99 -0
  157. eval_studio_client/api/models/test_service_perturb_test_in_place_request.py +97 -0
  158. eval_studio_client/api/models/test_service_perturb_test_request.py +5 -3
  159. eval_studio_client/api/models/v1_abort_operation_response.py +91 -0
  160. eval_studio_client/api/models/v1_batch_create_leaderboards_request.py +1 -1
  161. eval_studio_client/api/models/v1_batch_create_leaderboards_response.py +1 -1
  162. eval_studio_client/api/models/v1_batch_delete_dashboards_request.py +1 -1
  163. eval_studio_client/api/models/v1_batch_delete_dashboards_response.py +1 -1
  164. eval_studio_client/api/models/v1_batch_delete_documents_request.py +1 -1
  165. eval_studio_client/api/models/v1_batch_delete_documents_response.py +1 -1
  166. eval_studio_client/api/models/v1_batch_delete_evaluators_request.py +1 -1
  167. eval_studio_client/api/models/v1_batch_delete_evaluators_response.py +1 -1
  168. eval_studio_client/api/models/v1_batch_delete_leaderboards_request.py +1 -1
  169. eval_studio_client/api/models/v1_batch_delete_leaderboards_response.py +1 -1
  170. eval_studio_client/api/models/v1_batch_delete_models_request.py +1 -1
  171. eval_studio_client/api/models/v1_batch_delete_models_response.py +1 -1
  172. eval_studio_client/api/models/v1_batch_delete_test_cases_response.py +1 -1
  173. eval_studio_client/api/models/v1_batch_delete_tests_request.py +1 -1
  174. eval_studio_client/api/models/v1_batch_delete_tests_response.py +1 -1
  175. eval_studio_client/api/models/v1_batch_delete_workflows_request.py +87 -0
  176. eval_studio_client/api/models/v1_batch_delete_workflows_response.py +95 -0
  177. eval_studio_client/api/models/v1_batch_get_dashboards_response.py +1 -1
  178. eval_studio_client/api/models/v1_batch_get_documents_response.py +1 -1
  179. eval_studio_client/api/models/v1_batch_get_leaderboards_response.py +1 -1
  180. eval_studio_client/api/models/v1_batch_get_models_response.py +1 -1
  181. eval_studio_client/api/models/v1_batch_get_operations_response.py +1 -1
  182. eval_studio_client/api/models/v1_batch_get_tests_response.py +1 -1
  183. eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +95 -0
  184. eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +95 -0
  185. eval_studio_client/api/models/v1_batch_import_leaderboard_request.py +1 -1
  186. eval_studio_client/api/models/v1_batch_import_leaderboard_response.py +1 -1
  187. eval_studio_client/api/models/v1_batch_import_tests_request.py +1 -1
  188. eval_studio_client/api/models/v1_batch_import_tests_response.py +1 -1
  189. eval_studio_client/api/models/v1_check_base_models_response.py +1 -1
  190. eval_studio_client/api/models/v1_clone_test_response.py +91 -0
  191. eval_studio_client/api/models/v1_clone_workflow_response.py +91 -0
  192. eval_studio_client/api/models/v1_collection_info.py +1 -1
  193. eval_studio_client/api/models/v1_context.py +103 -0
  194. eval_studio_client/api/models/v1_create_dashboard_response.py +1 -1
  195. eval_studio_client/api/models/v1_create_document_response.py +1 -1
  196. eval_studio_client/api/models/v1_create_evaluation_request.py +8 -3
  197. eval_studio_client/api/models/v1_create_evaluator_response.py +1 -1
  198. eval_studio_client/api/models/v1_create_leaderboard_request.py +1 -1
  199. eval_studio_client/api/models/v1_create_leaderboard_response.py +1 -1
  200. eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +1 -1
  201. eval_studio_client/api/models/v1_create_model_response.py +1 -1
  202. eval_studio_client/api/models/v1_create_perturbation_response.py +1 -1
  203. eval_studio_client/api/models/v1_create_test_case_response.py +1 -1
  204. eval_studio_client/api/models/v1_create_test_lab_response.py +1 -1
  205. eval_studio_client/api/models/v1_create_test_response.py +1 -1
  206. eval_studio_client/api/models/v1_create_workflow_edge_response.py +91 -0
  207. eval_studio_client/api/models/v1_create_workflow_node_response.py +91 -0
  208. eval_studio_client/api/models/v1_create_workflow_response.py +91 -0
  209. eval_studio_client/api/models/v1_dashboard.py +6 -3
  210. eval_studio_client/api/models/v1_dashboard_status.py +1 -1
  211. eval_studio_client/api/models/v1_dashboard_type.py +38 -0
  212. eval_studio_client/api/models/v1_delete_dashboard_response.py +1 -1
  213. eval_studio_client/api/models/v1_delete_document_response.py +1 -1
  214. eval_studio_client/api/models/v1_delete_evaluator_response.py +1 -1
  215. eval_studio_client/api/models/v1_delete_leaderboard_response.py +1 -1
  216. eval_studio_client/api/models/v1_delete_model_response.py +1 -1
  217. eval_studio_client/api/models/v1_delete_test_case_response.py +1 -1
  218. eval_studio_client/api/models/v1_delete_test_response.py +1 -1
  219. eval_studio_client/api/models/v1_delete_workflow_edge_response.py +91 -0
  220. eval_studio_client/api/models/v1_delete_workflow_node_response.py +91 -0
  221. eval_studio_client/api/models/v1_delete_workflow_response.py +91 -0
  222. eval_studio_client/api/models/v1_dependency_list.py +97 -0
  223. eval_studio_client/api/models/v1_document.py +1 -1
  224. eval_studio_client/api/models/v1_estimate_threshold_request.py +103 -0
  225. eval_studio_client/api/models/v1_evaluation_test.py +1 -1
  226. eval_studio_client/api/models/v1_evaluator.py +12 -4
  227. eval_studio_client/api/models/v1_evaluator_param_type.py +1 -1
  228. eval_studio_client/api/models/v1_evaluator_parameter.py +1 -1
  229. eval_studio_client/api/models/v1_evaluator_view.py +1 -1
  230. eval_studio_client/api/models/v1_finalize_operation_response.py +1 -1
  231. eval_studio_client/api/models/v1_find_all_test_cases_by_id_response.py +1 -1
  232. eval_studio_client/api/models/v1_find_test_lab_response.py +1 -1
  233. eval_studio_client/api/models/v1_generate_test_cases_response.py +1 -1
  234. eval_studio_client/api/models/v1_get_dashboard_response.py +1 -1
  235. eval_studio_client/api/models/v1_get_document_response.py +1 -1
  236. eval_studio_client/api/models/v1_get_evaluator_response.py +1 -1
  237. eval_studio_client/api/models/v1_get_guardrails_configuration_response.py +87 -0
  238. eval_studio_client/api/models/v1_get_info_response.py +1 -1
  239. eval_studio_client/api/models/v1_get_leaderboard_report_response.py +91 -0
  240. eval_studio_client/api/models/v1_get_leaderboard_response.py +1 -1
  241. eval_studio_client/api/models/v1_get_model_response.py +1 -1
  242. eval_studio_client/api/models/v1_get_operation_progress_by_parent_response.py +1 -1
  243. eval_studio_client/api/models/v1_get_operation_response.py +1 -1
  244. eval_studio_client/api/models/v1_get_perturbator_response.py +1 -1
  245. eval_studio_client/api/models/v1_get_test_case_response.py +1 -1
  246. eval_studio_client/api/models/v1_get_test_class_response.py +1 -1
  247. eval_studio_client/api/models/v1_get_test_response.py +1 -1
  248. eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +89 -0
  249. eval_studio_client/api/models/v1_get_workflow_node_response.py +91 -0
  250. eval_studio_client/api/models/v1_get_workflow_response.py +91 -0
  251. eval_studio_client/api/models/v1_import_evaluation_request.py +8 -3
  252. eval_studio_client/api/models/v1_import_leaderboard_request.py +1 -1
  253. eval_studio_client/api/models/v1_import_leaderboard_response.py +1 -1
  254. eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +91 -0
  255. eval_studio_client/api/models/v1_import_test_cases_request.py +95 -0
  256. eval_studio_client/api/models/v1_info.py +10 -4
  257. eval_studio_client/api/models/v1_init_workflow_node_response.py +91 -0
  258. eval_studio_client/api/models/v1_insight.py +1 -1
  259. eval_studio_client/api/models/v1_labeled_test_case.py +91 -0
  260. eval_studio_client/api/models/v1_leaderboard.py +1 -1
  261. eval_studio_client/api/models/v1_leaderboard_report.py +115 -0
  262. eval_studio_client/api/models/v1_leaderboard_report_actual_output_data.py +93 -0
  263. eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +101 -0
  264. eval_studio_client/api/models/v1_leaderboard_report_evaluator.py +155 -0
  265. eval_studio_client/api/models/v1_leaderboard_report_evaluator_parameter.py +109 -0
  266. eval_studio_client/api/models/v1_leaderboard_report_explanation.py +103 -0
  267. eval_studio_client/api/models/v1_leaderboard_report_metrics_meta_entry.py +129 -0
  268. eval_studio_client/api/models/v1_leaderboard_report_model.py +113 -0
  269. eval_studio_client/api/models/v1_leaderboard_report_result.py +175 -0
  270. eval_studio_client/api/models/v1_leaderboard_report_result_relationship.py +97 -0
  271. eval_studio_client/api/models/v1_leaderboard_status.py +1 -1
  272. eval_studio_client/api/models/v1_leaderboard_type.py +1 -1
  273. eval_studio_client/api/models/v1_leaderboard_view.py +1 -1
  274. eval_studio_client/api/models/v1_list_base_models_response.py +1 -1
  275. eval_studio_client/api/models/v1_list_dashboards_response.py +1 -1
  276. eval_studio_client/api/models/v1_list_documents_response.py +1 -1
  277. eval_studio_client/api/models/v1_list_evaluators_response.py +1 -1
  278. eval_studio_client/api/models/v1_list_leaderboards_response.py +1 -1
  279. eval_studio_client/api/models/v1_list_llm_models_response.py +1 -1
  280. eval_studio_client/api/models/v1_list_model_collections_response.py +1 -1
  281. eval_studio_client/api/models/v1_list_models_response.py +1 -1
  282. eval_studio_client/api/models/v1_list_most_recent_dashboards_response.py +1 -1
  283. eval_studio_client/api/models/v1_list_most_recent_leaderboards_response.py +1 -1
  284. eval_studio_client/api/models/v1_list_most_recent_models_response.py +1 -1
  285. eval_studio_client/api/models/v1_list_most_recent_tests_response.py +1 -1
  286. eval_studio_client/api/models/v1_list_operations_response.py +1 -1
  287. eval_studio_client/api/models/v1_list_perturbators_response.py +1 -1
  288. eval_studio_client/api/models/v1_list_prompt_library_items_response.py +95 -0
  289. eval_studio_client/api/models/v1_list_rag_collections_response.py +1 -1
  290. eval_studio_client/api/models/v1_list_test_case_library_items_response.py +95 -0
  291. eval_studio_client/api/models/v1_list_test_case_relationships_response.py +95 -0
  292. eval_studio_client/api/models/v1_list_test_cases_response.py +1 -1
  293. eval_studio_client/api/models/v1_list_test_classes_response.py +1 -1
  294. eval_studio_client/api/models/v1_list_tests_response.py +1 -1
  295. eval_studio_client/api/models/v1_list_workflow_dependencies_response.py +105 -0
  296. eval_studio_client/api/models/v1_list_workflows_response.py +95 -0
  297. eval_studio_client/api/models/v1_metric_score.py +89 -0
  298. eval_studio_client/api/models/v1_metric_scores.py +95 -0
  299. eval_studio_client/api/models/v1_model.py +1 -1
  300. eval_studio_client/api/models/v1_model_type.py +1 -1
  301. eval_studio_client/api/models/v1_operation.py +1 -1
  302. eval_studio_client/api/models/v1_operation_progress.py +1 -1
  303. eval_studio_client/api/models/v1_perturb_test_in_place_response.py +91 -0
  304. eval_studio_client/api/models/v1_perturb_test_response.py +1 -1
  305. eval_studio_client/api/models/v1_perturbator.py +1 -1
  306. eval_studio_client/api/models/v1_perturbator_configuration.py +1 -1
  307. eval_studio_client/api/models/v1_perturbator_intensity.py +1 -1
  308. eval_studio_client/api/models/v1_problem_and_action.py +1 -1
  309. eval_studio_client/api/models/v1_process_workflow_node_response.py +91 -0
  310. eval_studio_client/api/models/v1_prompt_library_item.py +129 -0
  311. eval_studio_client/api/models/v1_repeated_context.py +95 -0
  312. eval_studio_client/api/models/v1_repeated_string.py +87 -0
  313. eval_studio_client/api/models/v1_reset_workflow_node_response.py +91 -0
  314. eval_studio_client/api/models/v1_test.py +6 -3
  315. eval_studio_client/api/models/v1_test_case.py +14 -3
  316. eval_studio_client/api/models/v1_test_case_relationship.py +1 -1
  317. eval_studio_client/api/models/v1_test_cases_generator.py +1 -1
  318. eval_studio_client/api/models/v1_test_class.py +1 -1
  319. eval_studio_client/api/models/v1_test_class_type.py +1 -1
  320. eval_studio_client/api/models/v1_test_lab.py +1 -1
  321. eval_studio_client/api/models/v1_test_suite_evaluates.py +39 -0
  322. eval_studio_client/api/models/v1_test_type.py +38 -0
  323. eval_studio_client/api/models/v1_update_dashboard_response.py +1 -1
  324. eval_studio_client/api/models/v1_update_document_response.py +1 -1
  325. eval_studio_client/api/models/v1_update_leaderboard_response.py +1 -1
  326. eval_studio_client/api/models/v1_update_model_response.py +1 -1
  327. eval_studio_client/api/models/v1_update_operation_response.py +1 -1
  328. eval_studio_client/api/models/v1_update_test_case_response.py +1 -1
  329. eval_studio_client/api/models/v1_update_test_response.py +1 -1
  330. eval_studio_client/api/models/v1_update_workflow_node_response.py +91 -0
  331. eval_studio_client/api/models/v1_update_workflow_response.py +91 -0
  332. eval_studio_client/api/models/v1_who_am_i_response.py +1 -1
  333. eval_studio_client/api/models/v1_workflow.py +164 -0
  334. eval_studio_client/api/models/v1_workflow_dependency.py +89 -0
  335. eval_studio_client/api/models/v1_workflow_edge.py +123 -0
  336. eval_studio_client/api/models/v1_workflow_edge_type.py +38 -0
  337. eval_studio_client/api/models/v1_workflow_node.py +156 -0
  338. eval_studio_client/api/models/v1_workflow_node_artifact.py +126 -0
  339. eval_studio_client/api/models/v1_workflow_node_artifacts.py +97 -0
  340. eval_studio_client/api/models/v1_workflow_node_attributes.py +87 -0
  341. eval_studio_client/api/models/v1_workflow_node_status.py +40 -0
  342. eval_studio_client/api/models/v1_workflow_node_type.py +44 -0
  343. eval_studio_client/api/models/v1_workflow_node_view.py +38 -0
  344. eval_studio_client/api/models/v1_workflow_type.py +37 -0
  345. eval_studio_client/api/models/workflow_service_clone_workflow_request.py +95 -0
  346. eval_studio_client/api/rest.py +1 -1
  347. eval_studio_client/api/test/test_adversarial_inputs_service_api.py +37 -0
  348. eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +128 -0
  349. eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
  350. eval_studio_client/api/test/test_document_service_api.py +1 -1
  351. eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
  352. eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
  353. eval_studio_client/api/test/test_human_calibration_service_api.py +38 -0
  354. eval_studio_client/api/test/test_info_service_api.py +1 -1
  355. eval_studio_client/api/test/test_leaderboard_report_service_api.py +37 -0
  356. eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
  357. eval_studio_client/api/test/test_model_service_api.py +1 -1
  358. eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
  359. eval_studio_client/api/test/test_operation_service_api.py +7 -1
  360. eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
  361. eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +25 -3
  362. eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
  363. eval_studio_client/api/test/test_prompt_generation_service_api.py +1 -1
  364. eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +21 -5
  365. eval_studio_client/api/test/test_prompt_library_service_api.py +43 -0
  366. eval_studio_client/api/test/test_protobuf_any.py +1 -1
  367. eval_studio_client/api/test/test_protobuf_null_value.py +33 -0
  368. eval_studio_client/api/test/test_required_the_dashboard_to_update.py +3 -2
  369. eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
  370. eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +1 -1
  371. eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
  372. eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
  373. eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
  374. eval_studio_client/api/test/test_required_the_test_case_to_update.py +9 -2
  375. eval_studio_client/api/test/test_required_the_test_to_update.py +3 -2
  376. eval_studio_client/api/test/test_required_the_updated_workflow.py +92 -0
  377. eval_studio_client/api/test/test_required_the_updated_workflow_node.py +81 -0
  378. eval_studio_client/api/test/test_rpc_status.py +1 -1
  379. eval_studio_client/api/test/test_test_case_relationship_service_api.py +37 -0
  380. eval_studio_client/api/test/test_test_case_service_api.py +1 -1
  381. eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
  382. eval_studio_client/api/test/test_test_class_service_api.py +1 -1
  383. eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
  384. eval_studio_client/api/test/test_test_service_api.py +25 -1
  385. eval_studio_client/api/test/test_test_service_clone_test_request.py +52 -0
  386. eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +17 -2
  387. eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +56 -0
  388. eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +63 -0
  389. eval_studio_client/api/test/test_test_service_perturb_test_in_place_request.py +59 -0
  390. eval_studio_client/api/test/test_test_service_perturb_test_request.py +5 -2
  391. eval_studio_client/api/test/test_v1_abort_operation_response.py +71 -0
  392. eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +1 -1
  393. eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +1 -1
  394. eval_studio_client/api/test/test_v1_batch_delete_dashboards_request.py +1 -1
  395. eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +3 -2
  396. eval_studio_client/api/test/test_v1_batch_delete_documents_request.py +1 -1
  397. eval_studio_client/api/test/test_v1_batch_delete_documents_response.py +1 -1
  398. eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +1 -1
  399. eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +4 -2
  400. eval_studio_client/api/test/test_v1_batch_delete_leaderboards_request.py +1 -1
  401. eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +1 -1
  402. eval_studio_client/api/test/test_v1_batch_delete_models_request.py +1 -1
  403. eval_studio_client/api/test/test_v1_batch_delete_models_response.py +1 -1
  404. eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +9 -2
  405. eval_studio_client/api/test/test_v1_batch_delete_tests_request.py +1 -1
  406. eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +3 -2
  407. eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +53 -0
  408. eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +95 -0
  409. eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +3 -2
  410. eval_studio_client/api/test/test_v1_batch_get_documents_response.py +1 -1
  411. eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +1 -1
  412. eval_studio_client/api/test/test_v1_batch_get_models_response.py +1 -1
  413. eval_studio_client/api/test/test_v1_batch_get_operations_response.py +1 -1
  414. eval_studio_client/api/test/test_v1_batch_get_tests_response.py +3 -2
  415. eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +64 -0
  416. eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +84 -0
  417. eval_studio_client/api/test/test_v1_batch_import_leaderboard_request.py +1 -1
  418. eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +1 -1
  419. eval_studio_client/api/test/test_v1_batch_import_tests_request.py +1 -1
  420. eval_studio_client/api/test/test_v1_batch_import_tests_response.py +3 -2
  421. eval_studio_client/api/test/test_v1_check_base_models_response.py +1 -1
  422. eval_studio_client/api/test/test_v1_clone_test_response.py +68 -0
  423. eval_studio_client/api/test/test_v1_clone_workflow_response.py +93 -0
  424. eval_studio_client/api/test/test_v1_collection_info.py +1 -1
  425. eval_studio_client/api/test/test_v1_context.py +59 -0
  426. eval_studio_client/api/test/test_v1_create_dashboard_response.py +3 -2
  427. eval_studio_client/api/test/test_v1_create_document_response.py +1 -1
  428. eval_studio_client/api/test/test_v1_create_evaluation_request.py +25 -3
  429. eval_studio_client/api/test/test_v1_create_evaluator_response.py +4 -2
  430. eval_studio_client/api/test/test_v1_create_leaderboard_request.py +1 -1
  431. eval_studio_client/api/test/test_v1_create_leaderboard_response.py +1 -1
  432. eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +1 -1
  433. eval_studio_client/api/test/test_v1_create_model_response.py +1 -1
  434. eval_studio_client/api/test/test_v1_create_perturbation_response.py +1 -1
  435. eval_studio_client/api/test/test_v1_create_test_case_response.py +9 -2
  436. eval_studio_client/api/test/test_v1_create_test_lab_response.py +1 -1
  437. eval_studio_client/api/test/test_v1_create_test_response.py +3 -2
  438. eval_studio_client/api/test/test_v1_create_workflow_edge_response.py +62 -0
  439. eval_studio_client/api/test/test_v1_create_workflow_node_response.py +82 -0
  440. eval_studio_client/api/test/test_v1_create_workflow_response.py +93 -0
  441. eval_studio_client/api/test/test_v1_dashboard.py +3 -2
  442. eval_studio_client/api/test/test_v1_dashboard_status.py +1 -1
  443. eval_studio_client/api/test/test_v1_dashboard_type.py +33 -0
  444. eval_studio_client/api/test/test_v1_delete_dashboard_response.py +3 -2
  445. eval_studio_client/api/test/test_v1_delete_document_response.py +1 -1
  446. eval_studio_client/api/test/test_v1_delete_evaluator_response.py +4 -2
  447. eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +1 -1
  448. eval_studio_client/api/test/test_v1_delete_model_response.py +1 -1
  449. eval_studio_client/api/test/test_v1_delete_test_case_response.py +9 -2
  450. eval_studio_client/api/test/test_v1_delete_test_response.py +3 -2
  451. eval_studio_client/api/test/test_v1_delete_workflow_edge_response.py +62 -0
  452. eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +82 -0
  453. eval_studio_client/api/test/test_v1_delete_workflow_response.py +93 -0
  454. eval_studio_client/api/test/test_v1_dependency_list.py +56 -0
  455. eval_studio_client/api/test/test_v1_document.py +1 -1
  456. eval_studio_client/api/test/test_v1_estimate_threshold_request.py +60 -0
  457. eval_studio_client/api/test/test_v1_evaluation_test.py +9 -2
  458. eval_studio_client/api/test/test_v1_evaluator.py +4 -2
  459. eval_studio_client/api/test/test_v1_evaluator_param_type.py +1 -1
  460. eval_studio_client/api/test/test_v1_evaluator_parameter.py +1 -1
  461. eval_studio_client/api/test/test_v1_evaluator_view.py +1 -1
  462. eval_studio_client/api/test/test_v1_finalize_operation_response.py +1 -1
  463. eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +9 -2
  464. eval_studio_client/api/test/test_v1_find_test_lab_response.py +1 -1
  465. eval_studio_client/api/test/test_v1_generate_test_cases_response.py +1 -1
  466. eval_studio_client/api/test/test_v1_get_dashboard_response.py +3 -2
  467. eval_studio_client/api/test/test_v1_get_document_response.py +1 -1
  468. eval_studio_client/api/test/test_v1_get_evaluator_response.py +4 -2
  469. eval_studio_client/api/test/test_v1_get_guardrails_configuration_response.py +51 -0
  470. eval_studio_client/api/test/test_v1_get_info_response.py +7 -2
  471. eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +173 -0
  472. eval_studio_client/api/test/test_v1_get_leaderboard_response.py +1 -1
  473. eval_studio_client/api/test/test_v1_get_model_response.py +1 -1
  474. eval_studio_client/api/test/test_v1_get_operation_progress_by_parent_response.py +1 -1
  475. eval_studio_client/api/test/test_v1_get_operation_response.py +1 -1
  476. eval_studio_client/api/test/test_v1_get_perturbator_response.py +1 -1
  477. eval_studio_client/api/test/test_v1_get_test_case_response.py +9 -2
  478. eval_studio_client/api/test/test_v1_get_test_class_response.py +1 -1
  479. eval_studio_client/api/test/test_v1_get_test_response.py +3 -2
  480. eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +56 -0
  481. eval_studio_client/api/test/test_v1_get_workflow_node_response.py +82 -0
  482. eval_studio_client/api/test/test_v1_get_workflow_response.py +93 -0
  483. eval_studio_client/api/test/test_v1_import_evaluation_request.py +17 -2
  484. eval_studio_client/api/test/test_v1_import_leaderboard_request.py +1 -1
  485. eval_studio_client/api/test/test_v1_import_leaderboard_response.py +1 -1
  486. eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +71 -0
  487. eval_studio_client/api/test/test_v1_import_test_cases_request.py +57 -0
  488. eval_studio_client/api/test/test_v1_info.py +7 -2
  489. eval_studio_client/api/test/test_v1_init_workflow_node_response.py +82 -0
  490. eval_studio_client/api/test/test_v1_insight.py +1 -1
  491. eval_studio_client/api/test/test_v1_labeled_test_case.py +53 -0
  492. eval_studio_client/api/test/test_v1_leaderboard.py +1 -1
  493. eval_studio_client/api/test/test_v1_leaderboard_report.py +172 -0
  494. eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_data.py +52 -0
  495. eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +56 -0
  496. eval_studio_client/api/test/test_v1_leaderboard_report_evaluator.py +114 -0
  497. eval_studio_client/api/test/test_v1_leaderboard_report_evaluator_parameter.py +63 -0
  498. eval_studio_client/api/test/test_v1_leaderboard_report_explanation.py +58 -0
  499. eval_studio_client/api/test/test_v1_leaderboard_report_metrics_meta_entry.py +66 -0
  500. eval_studio_client/api/test/test_v1_leaderboard_report_model.py +60 -0
  501. eval_studio_client/api/test/test_v1_leaderboard_report_result.py +92 -0
  502. eval_studio_client/api/test/test_v1_leaderboard_report_result_relationship.py +53 -0
  503. eval_studio_client/api/test/test_v1_leaderboard_status.py +1 -1
  504. eval_studio_client/api/test/test_v1_leaderboard_type.py +1 -1
  505. eval_studio_client/api/test/test_v1_leaderboard_view.py +1 -1
  506. eval_studio_client/api/test/test_v1_list_base_models_response.py +1 -1
  507. eval_studio_client/api/test/test_v1_list_dashboards_response.py +3 -2
  508. eval_studio_client/api/test/test_v1_list_documents_response.py +1 -1
  509. eval_studio_client/api/test/test_v1_list_evaluators_response.py +4 -2
  510. eval_studio_client/api/test/test_v1_list_leaderboards_response.py +1 -1
  511. eval_studio_client/api/test/test_v1_list_llm_models_response.py +1 -1
  512. eval_studio_client/api/test/test_v1_list_model_collections_response.py +1 -1
  513. eval_studio_client/api/test/test_v1_list_models_response.py +1 -1
  514. eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +3 -2
  515. eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +1 -1
  516. eval_studio_client/api/test/test_v1_list_most_recent_models_response.py +1 -1
  517. eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +3 -2
  518. eval_studio_client/api/test/test_v1_list_operations_response.py +1 -1
  519. eval_studio_client/api/test/test_v1_list_perturbators_response.py +1 -1
  520. eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +71 -0
  521. eval_studio_client/api/test/test_v1_list_rag_collections_response.py +1 -1
  522. eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +71 -0
  523. eval_studio_client/api/test/test_v1_list_test_case_relationships_response.py +56 -0
  524. eval_studio_client/api/test/test_v1_list_test_cases_response.py +9 -2
  525. eval_studio_client/api/test/test_v1_list_test_classes_response.py +1 -1
  526. eval_studio_client/api/test/test_v1_list_tests_response.py +3 -2
  527. eval_studio_client/api/test/test_v1_list_workflow_dependencies_response.py +93 -0
  528. eval_studio_client/api/test/test_v1_list_workflows_response.py +95 -0
  529. eval_studio_client/api/test/test_v1_metric_score.py +52 -0
  530. eval_studio_client/api/test/test_v1_metric_scores.py +55 -0
  531. eval_studio_client/api/test/test_v1_model.py +1 -1
  532. eval_studio_client/api/test/test_v1_model_type.py +1 -1
  533. eval_studio_client/api/test/test_v1_operation.py +1 -1
  534. eval_studio_client/api/test/test_v1_operation_progress.py +1 -1
  535. eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +68 -0
  536. eval_studio_client/api/test/test_v1_perturb_test_response.py +3 -2
  537. eval_studio_client/api/test/test_v1_perturbator.py +1 -1
  538. eval_studio_client/api/test/test_v1_perturbator_configuration.py +1 -1
  539. eval_studio_client/api/test/test_v1_perturbator_intensity.py +1 -1
  540. eval_studio_client/api/test/test_v1_problem_and_action.py +1 -1
  541. eval_studio_client/api/test/test_v1_process_workflow_node_response.py +71 -0
  542. eval_studio_client/api/test/test_v1_prompt_library_item.py +68 -0
  543. eval_studio_client/api/test/test_v1_repeated_context.py +62 -0
  544. eval_studio_client/api/test/test_v1_repeated_string.py +53 -0
  545. eval_studio_client/api/test/test_v1_reset_workflow_node_response.py +82 -0
  546. eval_studio_client/api/test/test_v1_test.py +3 -2
  547. eval_studio_client/api/test/test_v1_test_case.py +9 -2
  548. eval_studio_client/api/test/test_v1_test_case_relationship.py +1 -1
  549. eval_studio_client/api/test/test_v1_test_cases_generator.py +1 -1
  550. eval_studio_client/api/test/test_v1_test_class.py +1 -1
  551. eval_studio_client/api/test/test_v1_test_class_type.py +1 -1
  552. eval_studio_client/api/test/test_v1_test_lab.py +1 -1
  553. eval_studio_client/api/test/test_v1_test_suite_evaluates.py +33 -0
  554. eval_studio_client/api/test/test_v1_test_type.py +33 -0
  555. eval_studio_client/api/test/test_v1_update_dashboard_response.py +3 -2
  556. eval_studio_client/api/test/test_v1_update_document_response.py +1 -1
  557. eval_studio_client/api/test/test_v1_update_leaderboard_response.py +1 -1
  558. eval_studio_client/api/test/test_v1_update_model_response.py +1 -1
  559. eval_studio_client/api/test/test_v1_update_operation_response.py +1 -1
  560. eval_studio_client/api/test/test_v1_update_test_case_response.py +9 -2
  561. eval_studio_client/api/test/test_v1_update_test_response.py +3 -2
  562. eval_studio_client/api/test/test_v1_update_workflow_node_response.py +82 -0
  563. eval_studio_client/api/test/test_v1_update_workflow_response.py +93 -0
  564. eval_studio_client/api/test/test_v1_who_am_i_response.py +1 -1
  565. eval_studio_client/api/test/test_v1_workflow.py +93 -0
  566. eval_studio_client/api/test/test_v1_workflow_dependency.py +52 -0
  567. eval_studio_client/api/test/test_v1_workflow_edge.py +61 -0
  568. eval_studio_client/api/test/test_v1_workflow_edge_type.py +33 -0
  569. eval_studio_client/api/test/test_v1_workflow_node.py +82 -0
  570. eval_studio_client/api/test/test_v1_workflow_node_artifact.py +62 -0
  571. eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +65 -0
  572. eval_studio_client/api/test/test_v1_workflow_node_attributes.py +51 -0
  573. eval_studio_client/api/test/test_v1_workflow_node_status.py +33 -0
  574. eval_studio_client/api/test/test_v1_workflow_node_type.py +33 -0
  575. eval_studio_client/api/test/test_v1_workflow_node_view.py +33 -0
  576. eval_studio_client/api/test/test_v1_workflow_type.py +33 -0
  577. eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
  578. eval_studio_client/api/test/test_workflow_edge_service_api.py +52 -0
  579. eval_studio_client/api/test/test_workflow_node_service_api.py +94 -0
  580. eval_studio_client/api/test/test_workflow_service_api.py +93 -0
  581. eval_studio_client/api/test/test_workflow_service_clone_workflow_request.py +55 -0
  582. eval_studio_client/client.py +7 -0
  583. eval_studio_client/dashboards.py +29 -0
  584. eval_studio_client/gen/openapiv2/eval_studio.swagger.json +5318 -1884
  585. eval_studio_client/leaderboards.py +123 -0
  586. eval_studio_client/models.py +3 -42
  587. eval_studio_client/test_labs.py +49 -21
  588. eval_studio_client/tests.py +290 -8
  589. {eval_studio_client-1.0.3.dist-info → eval_studio_client-1.1.0.dist-info}/METADATA +1 -2
  590. eval_studio_client-1.1.0.dist-info/RECORD +732 -0
  591. eval_studio_client-1.0.3.dist-info/RECORD +0 -486
  592. {eval_studio_client-1.0.3.dist-info → eval_studio_client-1.1.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,29 @@
1
+ # V1InitWorkflowNodeResponse
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **node** | [**V1WorkflowNode**](V1WorkflowNode.md) | | [optional]
9
+
10
+ ## Example
11
+
12
+ ```python
13
+ from eval_studio_client.api.models.v1_init_workflow_node_response import V1InitWorkflowNodeResponse
14
+
15
+ # TODO update the JSON string below
16
+ json = "{}"
17
+ # create an instance of V1InitWorkflowNodeResponse from a JSON string
18
+ v1_init_workflow_node_response_instance = V1InitWorkflowNodeResponse.from_json(json)
19
+ # print the JSON string representation of the object
20
+ print(V1InitWorkflowNodeResponse.to_json())
21
+
22
+ # convert the object into a dict
23
+ v1_init_workflow_node_response_dict = v1_init_workflow_node_response_instance.to_dict()
24
+ # create an instance of V1InitWorkflowNodeResponse from a dict
25
+ v1_init_workflow_node_response_from_dict = V1InitWorkflowNodeResponse.from_dict(v1_init_workflow_node_response_dict)
26
+ ```
27
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
28
+
29
+
@@ -0,0 +1,31 @@
1
+ # V1LabeledTestCase
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **name** | **str** | Required. The test case resource name. | [optional]
9
+ **metric_value** | **float** | Required. The metric value. | [optional]
10
+ **label** | **bool** | Required. Human label. True means the test case should be labeled as passed (positive), false means failed (negative). | [optional]
11
+
12
+ ## Example
13
+
14
+ ```python
15
+ from eval_studio_client.api.models.v1_labeled_test_case import V1LabeledTestCase
16
+
17
+ # TODO update the JSON string below
18
+ json = "{}"
19
+ # create an instance of V1LabeledTestCase from a JSON string
20
+ v1_labeled_test_case_instance = V1LabeledTestCase.from_json(json)
21
+ # print the JSON string representation of the object
22
+ print(V1LabeledTestCase.to_json())
23
+
24
+ # convert the object into a dict
25
+ v1_labeled_test_case_dict = v1_labeled_test_case_instance.to_dict()
26
+ # create an instance of V1LabeledTestCase from a dict
27
+ v1_labeled_test_case_from_dict = V1LabeledTestCase.from_dict(v1_labeled_test_case_dict)
28
+ ```
29
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
30
+
31
+
@@ -0,0 +1,32 @@
1
+ # V1LeaderboardReport
2
+
3
+ LeaderboardReport represents the leaderboard report which is formed by the results, models and evaluator.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **results** | [**List[V1LeaderboardReportResult]**](V1LeaderboardReportResult.md) | Output only. List of per test case results. | [optional] [readonly]
10
+ **models** | [**List[V1LeaderboardReportModel]**](V1LeaderboardReportModel.md) | Output only. List of models which were used to create the results. | [optional] [readonly]
11
+ **evaluator** | [**V1LeaderboardReportEvaluator**](V1LeaderboardReportEvaluator.md) | | [optional]
12
+
13
+ ## Example
14
+
15
+ ```python
16
+ from eval_studio_client.api.models.v1_leaderboard_report import V1LeaderboardReport
17
+
18
+ # TODO update the JSON string below
19
+ json = "{}"
20
+ # create an instance of V1LeaderboardReport from a JSON string
21
+ v1_leaderboard_report_instance = V1LeaderboardReport.from_json(json)
22
+ # print the JSON string representation of the object
23
+ print(V1LeaderboardReport.to_json())
24
+
25
+ # convert the object into a dict
26
+ v1_leaderboard_report_dict = v1_leaderboard_report_instance.to_dict()
27
+ # create an instance of V1LeaderboardReport from a dict
28
+ v1_leaderboard_report_from_dict = V1LeaderboardReport.from_dict(v1_leaderboard_report_dict)
29
+ ```
30
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
31
+
32
+
@@ -0,0 +1,31 @@
1
+ # V1LeaderboardReportActualOutputData
2
+
3
+ ActualOutputData represents the actual output data.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **text** | **str** | Output only. Text fragment. | [optional] [readonly]
10
+ **metrics** | **object** | Output only. Metrics parsed as string to Value map. | [optional] [readonly]
11
+
12
+ ## Example
13
+
14
+ ```python
15
+ from eval_studio_client.api.models.v1_leaderboard_report_actual_output_data import V1LeaderboardReportActualOutputData
16
+
17
+ # TODO update the JSON string below
18
+ json = "{}"
19
+ # create an instance of V1LeaderboardReportActualOutputData from a JSON string
20
+ v1_leaderboard_report_actual_output_data_instance = V1LeaderboardReportActualOutputData.from_json(json)
21
+ # print the JSON string representation of the object
22
+ print(V1LeaderboardReportActualOutputData.to_json())
23
+
24
+ # convert the object into a dict
25
+ v1_leaderboard_report_actual_output_data_dict = v1_leaderboard_report_actual_output_data_instance.to_dict()
26
+ # create an instance of V1LeaderboardReportActualOutputData from a dict
27
+ v1_leaderboard_report_actual_output_data_from_dict = V1LeaderboardReportActualOutputData.from_dict(v1_leaderboard_report_actual_output_data_dict)
28
+ ```
29
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
30
+
31
+
@@ -0,0 +1,31 @@
1
+ # V1LeaderboardReportActualOutputMeta
2
+
3
+ ActualOutputMeta represents the metadata about the actual output.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **tokenization** | **str** | Output only. Actual output data tokenization like sentence_level_punkt. | [optional] [readonly]
10
+ **data** | [**List[V1LeaderboardReportActualOutputData]**](V1LeaderboardReportActualOutputData.md) | Output only. Actual output data - list of text fragments coupled with the metric values. | [optional] [readonly]
11
+
12
+ ## Example
13
+
14
+ ```python
15
+ from eval_studio_client.api.models.v1_leaderboard_report_actual_output_meta import V1LeaderboardReportActualOutputMeta
16
+
17
+ # TODO update the JSON string below
18
+ json = "{}"
19
+ # create an instance of V1LeaderboardReportActualOutputMeta from a JSON string
20
+ v1_leaderboard_report_actual_output_meta_instance = V1LeaderboardReportActualOutputMeta.from_json(json)
21
+ # print the JSON string representation of the object
22
+ print(V1LeaderboardReportActualOutputMeta.to_json())
23
+
24
+ # convert the object into a dict
25
+ v1_leaderboard_report_actual_output_meta_dict = v1_leaderboard_report_actual_output_meta_instance.to_dict()
26
+ # create an instance of V1LeaderboardReportActualOutputMeta from a dict
27
+ v1_leaderboard_report_actual_output_meta_from_dict = V1LeaderboardReportActualOutputMeta.from_dict(v1_leaderboard_report_actual_output_meta_dict)
28
+ ```
29
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
30
+
31
+
@@ -0,0 +1,42 @@
1
+ # V1LeaderboardReportEvaluator
2
+
3
+ Evaluator represents the evaluator which evaluated the model outputs to create the results.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **id** | **str** | Output only. Evaluator ID. | [optional] [readonly]
10
+ **name** | **str** | Output only. Evaluator short name based on its class name. | [optional] [readonly]
11
+ **display_name** | **str** | Output only. Evaluator display name. | [optional] [readonly]
12
+ **tagline** | **str** | Optional. Evaluator one row description. | [optional]
13
+ **description** | **str** | Output only. Evaluator description. | [optional] [readonly]
14
+ **brief_description** | **str** | Optional. Brief description. | [optional]
15
+ **model_types** | **List[str]** | Output only. List of model types like rag. | [optional] [readonly]
16
+ **can_explain** | **List[str]** | Optional. List of experiment types the Explainer can explain like regression or multinomial. | [optional]
17
+ **explanation_scopes** | **List[str]** | Output only. List of explanation scopes like global or local. | [optional] [readonly]
18
+ **explanations** | [**List[V1LeaderboardReportExplanation]**](V1LeaderboardReportExplanation.md) | Output only. List of explanation types created by the Evaluator. | [optional] [readonly]
19
+ **parameters** | [**List[V1LeaderboardReportEvaluatorParameter]**](V1LeaderboardReportEvaluatorParameter.md) | Output only. List of parameter type definitions. | [optional] [readonly]
20
+ **keywords** | **List[str]** | Output only. List of keywords. | [optional] [readonly]
21
+ **metrics_meta** | [**List[V1LeaderboardReportMetricsMetaEntry]**](V1LeaderboardReportMetricsMetaEntry.md) | Output only. List of metrics metadata for metrics created by the Evaluator. | [optional] [readonly]
22
+
23
+ ## Example
24
+
25
+ ```python
26
+ from eval_studio_client.api.models.v1_leaderboard_report_evaluator import V1LeaderboardReportEvaluator
27
+
28
+ # TODO update the JSON string below
29
+ json = "{}"
30
+ # create an instance of V1LeaderboardReportEvaluator from a JSON string
31
+ v1_leaderboard_report_evaluator_instance = V1LeaderboardReportEvaluator.from_json(json)
32
+ # print the JSON string representation of the object
33
+ print(V1LeaderboardReportEvaluator.to_json())
34
+
35
+ # convert the object into a dict
36
+ v1_leaderboard_report_evaluator_dict = v1_leaderboard_report_evaluator_instance.to_dict()
37
+ # create an instance of V1LeaderboardReportEvaluator from a dict
38
+ v1_leaderboard_report_evaluator_from_dict = V1LeaderboardReportEvaluator.from_dict(v1_leaderboard_report_evaluator_dict)
39
+ ```
40
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
41
+
42
+
@@ -0,0 +1,38 @@
1
+ # V1LeaderboardReportEvaluatorParameter
2
+
3
+ Evaluation parameter definition.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **name** | **str** | Output only. Evaluator parameter ID. | [optional] [readonly]
10
+ **description** | **str** | Output only. Parameter description. | [optional] [readonly]
11
+ **comment** | **str** | Optional. Parameter comment. | [optional]
12
+ **type** | **str** | Output only. Parameter type like float or string. | [optional] [readonly]
13
+ **predefined** | **List[object]** | Optional. Predefined parameter values - numeric or non-numeric enum. | [optional]
14
+ **tags** | **List[str]** | Optional. Parameter tags. | [optional]
15
+ **min** | **float** | Optional. Parameter value lower range. | [optional]
16
+ **max** | **float** | Optional. Parameter value upper range. | [optional]
17
+ **category** | **str** | Optional. Parameter category. | [optional]
18
+
19
+ ## Example
20
+
21
+ ```python
22
+ from eval_studio_client.api.models.v1_leaderboard_report_evaluator_parameter import V1LeaderboardReportEvaluatorParameter
23
+
24
+ # TODO update the JSON string below
25
+ json = "{}"
26
+ # create an instance of V1LeaderboardReportEvaluatorParameter from a JSON string
27
+ v1_leaderboard_report_evaluator_parameter_instance = V1LeaderboardReportEvaluatorParameter.from_json(json)
28
+ # print the JSON string representation of the object
29
+ print(V1LeaderboardReportEvaluatorParameter.to_json())
30
+
31
+ # convert the object into a dict
32
+ v1_leaderboard_report_evaluator_parameter_dict = v1_leaderboard_report_evaluator_parameter_instance.to_dict()
33
+ # create an instance of V1LeaderboardReportEvaluatorParameter from a dict
34
+ v1_leaderboard_report_evaluator_parameter_from_dict = V1LeaderboardReportEvaluatorParameter.from_dict(v1_leaderboard_report_evaluator_parameter_dict)
35
+ ```
36
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
37
+
38
+
@@ -0,0 +1,34 @@
1
+ # V1LeaderboardReportExplanation
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **explanation_type** | **str** | Output only. Explanation type ID. | [optional] [readonly]
9
+ **name** | **str** | Output only. Explanation display name. | [optional] [readonly]
10
+ **category** | **str** | Output only. Explanation display category. | [optional] [readonly]
11
+ **scope** | **str** | Optional. Explanation scope like global or local. | [optional]
12
+ **has_local** | **str** | Optional. Local explanation type id associated with (this) global explanation. | [optional]
13
+ **formats** | **List[str]** | Optional. List of formats available for the explanation. | [optional]
14
+
15
+ ## Example
16
+
17
+ ```python
18
+ from eval_studio_client.api.models.v1_leaderboard_report_explanation import V1LeaderboardReportExplanation
19
+
20
+ # TODO update the JSON string below
21
+ json = "{}"
22
+ # create an instance of V1LeaderboardReportExplanation from a JSON string
23
+ v1_leaderboard_report_explanation_instance = V1LeaderboardReportExplanation.from_json(json)
24
+ # print the JSON string representation of the object
25
+ print(V1LeaderboardReportExplanation.to_json())
26
+
27
+ # convert the object into a dict
28
+ v1_leaderboard_report_explanation_dict = v1_leaderboard_report_explanation_instance.to_dict()
29
+ # create an instance of V1LeaderboardReportExplanation from a dict
30
+ v1_leaderboard_report_explanation_from_dict = V1LeaderboardReportExplanation.from_dict(v1_leaderboard_report_explanation_dict)
31
+ ```
32
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
33
+
34
+
@@ -0,0 +1,41 @@
1
+ # V1LeaderboardReportMetricsMetaEntry
2
+
3
+ MetricsMetaEntry represents the metadata about the metric.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **key** | **str** | Output only. Metric key. | [optional] [readonly]
10
+ **display_name** | **str** | Output only. Metric display name. | [optional] [readonly]
11
+ **data_type** | **str** | Output only. Metric data type like float or string. | [optional] [readonly]
12
+ **display_value** | **str** | Output only. Metric display value. | [optional] [readonly]
13
+ **description** | **str** | Output only. Metric description. | [optional] [readonly]
14
+ **value_range** | **List[float]** | Optional. Metric value range for numeric scores. | [optional]
15
+ **value_enum** | **List[str]** | Optional. Metric value enum for non-numeric scores. | [optional]
16
+ **higher_is_better** | **bool** | Output only. Metric higher is better. | [optional] [readonly]
17
+ **threshold** | **float** | Output only. Metric threshold. | [optional] [readonly]
18
+ **is_primary_metric** | **bool** | Output only. Metric is primary. | [optional] [readonly]
19
+ **parent_metric** | **str** | Output only. This metric parent. | [optional] [readonly]
20
+ **exclude** | **bool** | Output only. Whether to exclude the metric. | [optional] [readonly]
21
+
22
+ ## Example
23
+
24
+ ```python
25
+ from eval_studio_client.api.models.v1_leaderboard_report_metrics_meta_entry import V1LeaderboardReportMetricsMetaEntry
26
+
27
+ # TODO update the JSON string below
28
+ json = "{}"
29
+ # create an instance of V1LeaderboardReportMetricsMetaEntry from a JSON string
30
+ v1_leaderboard_report_metrics_meta_entry_instance = V1LeaderboardReportMetricsMetaEntry.from_json(json)
31
+ # print the JSON string representation of the object
32
+ print(V1LeaderboardReportMetricsMetaEntry.to_json())
33
+
34
+ # convert the object into a dict
35
+ v1_leaderboard_report_metrics_meta_entry_dict = v1_leaderboard_report_metrics_meta_entry_instance.to_dict()
36
+ # create an instance of V1LeaderboardReportMetricsMetaEntry from a dict
37
+ v1_leaderboard_report_metrics_meta_entry_from_dict = V1LeaderboardReportMetricsMetaEntry.from_dict(v1_leaderboard_report_metrics_meta_entry_dict)
38
+ ```
39
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
40
+
41
+
@@ -0,0 +1,37 @@
1
+ # V1LeaderboardReportModel
2
+
3
+ Model represents the evaluated model whose outputs were evaluated to create the results.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **connection** | **str** | Output only. Connection key. | [optional] [readonly]
10
+ **model_type** | **str** | Output only. Model type. | [optional] [readonly]
11
+ **name** | **str** | Output only. Model display name. | [optional] [readonly]
12
+ **collection_id** | **str** | Optional. Collection ID. | [optional]
13
+ **collection_name** | **str** | Optional. Collection name. | [optional]
14
+ **llm_model_name** | **str** | Output only. LLM model name. | [optional] [readonly]
15
+ **documents** | **List[str]** | Output only. List of documents. | [optional] [readonly]
16
+ **key** | **str** | Output only. Model key. | [optional] [readonly]
17
+
18
+ ## Example
19
+
20
+ ```python
21
+ from eval_studio_client.api.models.v1_leaderboard_report_model import V1LeaderboardReportModel
22
+
23
+ # TODO update the JSON string below
24
+ json = "{}"
25
+ # create an instance of V1LeaderboardReportModel from a JSON string
26
+ v1_leaderboard_report_model_instance = V1LeaderboardReportModel.from_json(json)
27
+ # print the JSON string representation of the object
28
+ print(V1LeaderboardReportModel.to_json())
29
+
30
+ # convert the object into a dict
31
+ v1_leaderboard_report_model_dict = v1_leaderboard_report_model_instance.to_dict()
32
+ # create an instance of V1LeaderboardReportModel from a dict
33
+ v1_leaderboard_report_model_from_dict = V1LeaderboardReportModel.from_dict(v1_leaderboard_report_model_dict)
34
+ ```
35
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
36
+
37
+
@@ -0,0 +1,45 @@
1
+ # V1LeaderboardReportResult
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **key** | **str** | Output only. Composite unique key of the result formed by the model key and test case key. | [optional] [readonly]
9
+ **input** | **str** | Output only. Input prompt or text to be processed. | [optional] [readonly]
10
+ **corpus** | **List[str]** | Output only. Collection of corpus documents to be used during evaluation. | [optional] [readonly]
11
+ **context** | **List[str]** | Output only. List of contextual information or references. | [optional] [readonly]
12
+ **categories** | **List[str]** | Output only. List of categories or labels for classification. | [optional] [readonly]
13
+ **relationships** | [**List[V1LeaderboardReportResultRelationship]**](V1LeaderboardReportResultRelationship.md) | Output only. List of relationships or associations between entities. | [optional] [readonly]
14
+ **expected_output** | **str** | Output only. Expected output or target result. | [optional] [readonly]
15
+ **output_constraints** | **List[str]** | Output only. List of constraints that should be applied to the output. | [optional] [readonly]
16
+ **output_condition** | **str** | Output only. Condition that output should satisfy. | [optional] [readonly]
17
+ **actual_output** | **str** | Output only. Actual output produced by the model. | [optional] [readonly]
18
+ **actual_duration** | **float** | Output only. Duration of processing in seconds. | [optional] [readonly]
19
+ **cost** | **float** | Output only. Cost of processing in currency units. | [optional] [readonly]
20
+ **model_key** | **str** | Output only. Unique identifier for the model used. | [optional] [readonly]
21
+ **test_case_key** | **str** | Output only. Unique identifier for the test case. | [optional] [readonly]
22
+ **metrics** | [**List[V1MetricScore]**](V1MetricScore.md) | Optional. All metrics values for the result. | [optional]
23
+ **result_error_message** | **str** | Output only. Error message if processing resulted in failure. | [optional] [readonly]
24
+ **actual_output_meta** | [**List[V1LeaderboardReportActualOutputMeta]**](V1LeaderboardReportActualOutputMeta.md) | Output only. Additional metadata about the actual output. | [optional] [readonly]
25
+
26
+ ## Example
27
+
28
+ ```python
29
+ from eval_studio_client.api.models.v1_leaderboard_report_result import V1LeaderboardReportResult
30
+
31
+ # TODO update the JSON string below
32
+ json = "{}"
33
+ # create an instance of V1LeaderboardReportResult from a JSON string
34
+ v1_leaderboard_report_result_instance = V1LeaderboardReportResult.from_json(json)
35
+ # print the JSON string representation of the object
36
+ print(V1LeaderboardReportResult.to_json())
37
+
38
+ # convert the object into a dict
39
+ v1_leaderboard_report_result_dict = v1_leaderboard_report_result_instance.to_dict()
40
+ # create an instance of V1LeaderboardReportResult from a dict
41
+ v1_leaderboard_report_result_from_dict = V1LeaderboardReportResult.from_dict(v1_leaderboard_report_result_dict)
42
+ ```
43
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
44
+
45
+
@@ -0,0 +1,32 @@
1
+ # V1LeaderboardReportResultRelationship
2
+
3
+ Relationship represents the relationship between result entries.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **type** | **str** | Output only. Type of the relationship. | [optional] [readonly]
10
+ **target** | **str** | Output only. Source result of the relationship. | [optional] [readonly]
11
+ **target_type** | **str** | Output only. Target type of the relationship like test_case. | [optional] [readonly]
12
+
13
+ ## Example
14
+
15
+ ```python
16
+ from eval_studio_client.api.models.v1_leaderboard_report_result_relationship import V1LeaderboardReportResultRelationship
17
+
18
+ # TODO update the JSON string below
19
+ json = "{}"
20
+ # create an instance of V1LeaderboardReportResultRelationship from a JSON string
21
+ v1_leaderboard_report_result_relationship_instance = V1LeaderboardReportResultRelationship.from_json(json)
22
+ # print the JSON string representation of the object
23
+ print(V1LeaderboardReportResultRelationship.to_json())
24
+
25
+ # convert the object into a dict
26
+ v1_leaderboard_report_result_relationship_dict = v1_leaderboard_report_result_relationship_instance.to_dict()
27
+ # create an instance of V1LeaderboardReportResultRelationship from a dict
28
+ v1_leaderboard_report_result_relationship_from_dict = V1LeaderboardReportResultRelationship.from_dict(v1_leaderboard_report_result_relationship_dict)
29
+ ```
30
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
31
+
32
+
@@ -0,0 +1,29 @@
1
+ # V1ListPromptLibraryItemsResponse
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **prompt_library_items** | [**List[V1PromptLibraryItem]**](V1PromptLibraryItem.md) | Prompt library items (test suites). | [optional]
9
+
10
+ ## Example
11
+
12
+ ```python
13
+ from eval_studio_client.api.models.v1_list_prompt_library_items_response import V1ListPromptLibraryItemsResponse
14
+
15
+ # TODO update the JSON string below
16
+ json = "{}"
17
+ # create an instance of V1ListPromptLibraryItemsResponse from a JSON string
18
+ v1_list_prompt_library_items_response_instance = V1ListPromptLibraryItemsResponse.from_json(json)
19
+ # print the JSON string representation of the object
20
+ print(V1ListPromptLibraryItemsResponse.to_json())
21
+
22
+ # convert the object into a dict
23
+ v1_list_prompt_library_items_response_dict = v1_list_prompt_library_items_response_instance.to_dict()
24
+ # create an instance of V1ListPromptLibraryItemsResponse from a dict
25
+ v1_list_prompt_library_items_response_from_dict = V1ListPromptLibraryItemsResponse.from_dict(v1_list_prompt_library_items_response_dict)
26
+ ```
27
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
28
+
29
+
@@ -0,0 +1,29 @@
1
+ # V1ListTestCaseLibraryItemsResponse
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **prompt_library_items** | [**List[V1PromptLibraryItem]**](V1PromptLibraryItem.md) | Test suites library items. | [optional]
9
+
10
+ ## Example
11
+
12
+ ```python
13
+ from eval_studio_client.api.models.v1_list_test_case_library_items_response import V1ListTestCaseLibraryItemsResponse
14
+
15
+ # TODO update the JSON string below
16
+ json = "{}"
17
+ # create an instance of V1ListTestCaseLibraryItemsResponse from a JSON string
18
+ v1_list_test_case_library_items_response_instance = V1ListTestCaseLibraryItemsResponse.from_json(json)
19
+ # print the JSON string representation of the object
20
+ print(V1ListTestCaseLibraryItemsResponse.to_json())
21
+
22
+ # convert the object into a dict
23
+ v1_list_test_case_library_items_response_dict = v1_list_test_case_library_items_response_instance.to_dict()
24
+ # create an instance of V1ListTestCaseLibraryItemsResponse from a dict
25
+ v1_list_test_case_library_items_response_from_dict = V1ListTestCaseLibraryItemsResponse.from_dict(v1_list_test_case_library_items_response_dict)
26
+ ```
27
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
28
+
29
+
@@ -0,0 +1,29 @@
1
+ # V1ListTestCaseRelationshipsResponse
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **test_case_relationships** | [**List[V1TestCaseRelationship]**](V1TestCaseRelationship.md) | The TestCaseRelationships that were requested. | [optional]
9
+
10
+ ## Example
11
+
12
+ ```python
13
+ from eval_studio_client.api.models.v1_list_test_case_relationships_response import V1ListTestCaseRelationshipsResponse
14
+
15
+ # TODO update the JSON string below
16
+ json = "{}"
17
+ # create an instance of V1ListTestCaseRelationshipsResponse from a JSON string
18
+ v1_list_test_case_relationships_response_instance = V1ListTestCaseRelationshipsResponse.from_json(json)
19
+ # print the JSON string representation of the object
20
+ print(V1ListTestCaseRelationshipsResponse.to_json())
21
+
22
+ # convert the object into a dict
23
+ v1_list_test_case_relationships_response_dict = v1_list_test_case_relationships_response_instance.to_dict()
24
+ # create an instance of V1ListTestCaseRelationshipsResponse from a dict
25
+ v1_list_test_case_relationships_response_from_dict = V1ListTestCaseRelationshipsResponse.from_dict(v1_list_test_case_relationships_response_dict)
26
+ ```
27
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
28
+
29
+
@@ -0,0 +1,30 @@
1
+ # V1ListWorkflowDependenciesResponse
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **nodes** | [**List[V1WorkflowNode]**](V1WorkflowNode.md) | The list of the WorkflowNodes related to requested workflow. | [optional]
9
+ **dependencies** | [**List[V1DependencyList]**](V1DependencyList.md) | The dependency map for the workflow. | [optional]
10
+
11
+ ## Example
12
+
13
+ ```python
14
+ from eval_studio_client.api.models.v1_list_workflow_dependencies_response import V1ListWorkflowDependenciesResponse
15
+
16
+ # TODO update the JSON string below
17
+ json = "{}"
18
+ # create an instance of V1ListWorkflowDependenciesResponse from a JSON string
19
+ v1_list_workflow_dependencies_response_instance = V1ListWorkflowDependenciesResponse.from_json(json)
20
+ # print the JSON string representation of the object
21
+ print(V1ListWorkflowDependenciesResponse.to_json())
22
+
23
+ # convert the object into a dict
24
+ v1_list_workflow_dependencies_response_dict = v1_list_workflow_dependencies_response_instance.to_dict()
25
+ # create an instance of V1ListWorkflowDependenciesResponse from a dict
26
+ v1_list_workflow_dependencies_response_from_dict = V1ListWorkflowDependenciesResponse.from_dict(v1_list_workflow_dependencies_response_dict)
27
+ ```
28
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
29
+
30
+
@@ -0,0 +1,29 @@
1
+ # V1ListWorkflowsResponse
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **workflows** | [**List[V1Workflow]**](V1Workflow.md) | The Workflows requested. | [optional]
9
+
10
+ ## Example
11
+
12
+ ```python
13
+ from eval_studio_client.api.models.v1_list_workflows_response import V1ListWorkflowsResponse
14
+
15
+ # TODO update the JSON string below
16
+ json = "{}"
17
+ # create an instance of V1ListWorkflowsResponse from a JSON string
18
+ v1_list_workflows_response_instance = V1ListWorkflowsResponse.from_json(json)
19
+ # print the JSON string representation of the object
20
+ print(V1ListWorkflowsResponse.to_json())
21
+
22
+ # convert the object into a dict
23
+ v1_list_workflows_response_dict = v1_list_workflows_response_instance.to_dict()
24
+ # create an instance of V1ListWorkflowsResponse from a dict
25
+ v1_list_workflows_response_from_dict = V1ListWorkflowsResponse.from_dict(v1_list_workflows_response_dict)
26
+ ```
27
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
28
+
29
+
@@ -0,0 +1,31 @@
1
+ # V1MetricScore
2
+
3
+ MetricScore represents the metric score.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **key** | **str** | Required. Metric key. | [optional]
10
+ **value** | **float** | Required. Metric value - consider NaN, Infinity or -Infinity for float representation. | [optional]
11
+
12
+ ## Example
13
+
14
+ ```python
15
+ from eval_studio_client.api.models.v1_metric_score import V1MetricScore
16
+
17
+ # TODO update the JSON string below
18
+ json = "{}"
19
+ # create an instance of V1MetricScore from a JSON string
20
+ v1_metric_score_instance = V1MetricScore.from_json(json)
21
+ # print the JSON string representation of the object
22
+ print(V1MetricScore.to_json())
23
+
24
+ # convert the object into a dict
25
+ v1_metric_score_dict = v1_metric_score_instance.to_dict()
26
+ # create an instance of V1MetricScore from a dict
27
+ v1_metric_score_from_dict = V1MetricScore.from_dict(v1_metric_score_dict)
28
+ ```
29
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
30
+
31
+
@@ -0,0 +1,29 @@
1
+ # V1MetricScores
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **scores** | [**List[V1MetricScore]**](V1MetricScore.md) | Required. The metric scores. | [optional]
9
+
10
+ ## Example
11
+
12
+ ```python
13
+ from eval_studio_client.api.models.v1_metric_scores import V1MetricScores
14
+
15
+ # TODO update the JSON string below
16
+ json = "{}"
17
+ # create an instance of V1MetricScores from a JSON string
18
+ v1_metric_scores_instance = V1MetricScores.from_json(json)
19
+ # print the JSON string representation of the object
20
+ print(V1MetricScores.to_json())
21
+
22
+ # convert the object into a dict
23
+ v1_metric_scores_dict = v1_metric_scores_instance.to_dict()
24
+ # create an instance of V1MetricScores from a dict
25
+ v1_metric_scores_from_dict = V1MetricScores.from_dict(v1_metric_scores_dict)
26
+ ```
27
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
28
+
29
+