eval-studio-client 1.2.5__py3-none-any.whl → 1.3.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (306) hide show
  1. eval_studio_client/api/__init__.py +65 -0
  2. eval_studio_client/api/api/__init__.py +3 -0
  3. eval_studio_client/api/api/dashboard_report_service_api.py +292 -0
  4. eval_studio_client/api/api/dashboard_service_api.py +16 -16
  5. eval_studio_client/api/api/dashboard_test_case_annotation_service_api.py +611 -0
  6. eval_studio_client/api/api/document_service_api.py +16 -16
  7. eval_studio_client/api/api/evaluation_service_api.py +12 -12
  8. eval_studio_client/api/api/evaluator_service_api.py +16 -16
  9. eval_studio_client/api/api/leaderboard_report_service_api.py +304 -17
  10. eval_studio_client/api/api/leaderboard_service_api.py +554 -16
  11. eval_studio_client/api/api/leaderboard_test_case_annotation_service_api.py +611 -0
  12. eval_studio_client/api/api/model_service_api.py +16 -16
  13. eval_studio_client/api/api/operation_service_api.py +821 -17
  14. eval_studio_client/api/api/perturbator_service_api.py +22 -22
  15. eval_studio_client/api/api/test_case_service_api.py +300 -16
  16. eval_studio_client/api/api/test_class_service_api.py +16 -16
  17. eval_studio_client/api/api/test_service_api.py +285 -16
  18. eval_studio_client/api/api/workflow_node_service_api.py +16 -16
  19. eval_studio_client/api/api/workflow_service_api.py +16 -16
  20. eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +2 -1
  21. eval_studio_client/api/docs/DashboardReportServiceApi.md +75 -0
  22. eval_studio_client/api/docs/DashboardServiceApi.md +5 -5
  23. eval_studio_client/api/docs/DashboardTestCaseAnnotationServiceApi.md +149 -0
  24. eval_studio_client/api/docs/DocumentServiceApi.md +5 -5
  25. eval_studio_client/api/docs/EvaluationServiceApi.md +4 -4
  26. eval_studio_client/api/docs/EvaluatorServiceApi.md +5 -5
  27. eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -5
  28. eval_studio_client/api/docs/LeaderboardServiceApi.md +141 -5
  29. eval_studio_client/api/docs/LeaderboardTestCaseAnnotationServiceApi.md +149 -0
  30. eval_studio_client/api/docs/ModelServiceApi.md +5 -5
  31. eval_studio_client/api/docs/OperationServiceApi.md +215 -8
  32. eval_studio_client/api/docs/PerturbatorServiceApi.md +7 -7
  33. eval_studio_client/api/docs/RequiredTheDashboardTestCaseAnnotationToUpdate.md +35 -0
  34. eval_studio_client/api/docs/RequiredTheLeaderboardTestCaseAnnotationToUpdate.md +35 -0
  35. eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +1 -0
  36. eval_studio_client/api/docs/RequiredTheOperationToFinalize.md +1 -0
  37. eval_studio_client/api/docs/RequiredTheOperationToUpdate.md +1 -0
  38. eval_studio_client/api/docs/TestCaseServiceApi.md +75 -5
  39. eval_studio_client/api/docs/TestCaseServiceAppendTestCasesRequest.md +30 -0
  40. eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
  41. eval_studio_client/api/docs/TestServiceApi.md +73 -5
  42. eval_studio_client/api/docs/V1ActualOutputMeta.md +30 -0
  43. eval_studio_client/api/docs/V1ActualOutputMetaDiff.md +36 -0
  44. eval_studio_client/api/docs/V1AgentChatActivityDiagram.md +31 -0
  45. eval_studio_client/api/docs/V1AgentChatActivityDiagramEdge.md +32 -0
  46. eval_studio_client/api/docs/V1AgentChatActivityDiagramNode.md +32 -0
  47. eval_studio_client/api/docs/V1AgentChatActivityDiagramRow.md +30 -0
  48. eval_studio_client/api/docs/V1AgentChatScriptUsage.md +33 -0
  49. eval_studio_client/api/docs/V1AgentChatScriptsBarChart.md +30 -0
  50. eval_studio_client/api/docs/V1AgentChatToolUsage.md +33 -0
  51. eval_studio_client/api/docs/V1AgentChatToolsBarChart.md +30 -0
  52. eval_studio_client/api/docs/V1AllMetricScores.md +29 -0
  53. eval_studio_client/api/docs/V1AppendTestCasesResponse.md +29 -0
  54. eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheRequest.md +31 -0
  55. eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheResponse.md +29 -0
  56. eval_studio_client/api/docs/V1BatchMarkOperationSeenByCreatorResponse.md +29 -0
  57. eval_studio_client/api/docs/V1CmpLeaderboardReportsRequest.md +33 -0
  58. eval_studio_client/api/docs/V1CmpLeaderboardReportsResponse.md +29 -0
  59. eval_studio_client/api/docs/V1ComparisonItem.md +36 -0
  60. eval_studio_client/api/docs/V1ComparisonMetricScore.md +30 -0
  61. eval_studio_client/api/docs/V1ComparisonResult.md +31 -0
  62. eval_studio_client/api/docs/V1ComparisonSummary.md +31 -0
  63. eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
  64. eval_studio_client/api/docs/V1CreateTestFromTestCasesRequest.md +32 -0
  65. eval_studio_client/api/docs/V1CreateTestFromTestCasesResponse.md +29 -0
  66. eval_studio_client/api/docs/V1DashboardReport.md +31 -0
  67. eval_studio_client/api/docs/V1DashboardReportResult.md +39 -0
  68. eval_studio_client/api/docs/V1DashboardTestCaseAnnotation.md +36 -0
  69. eval_studio_client/api/docs/V1DataFragment.md +31 -0
  70. eval_studio_client/api/docs/V1DeepCompareLeaderboardsRequest.md +33 -0
  71. eval_studio_client/api/docs/V1DeepCompareLeaderboardsResponse.md +29 -0
  72. eval_studio_client/api/docs/V1DiffItem.md +36 -0
  73. eval_studio_client/api/docs/V1EvaluationType.md +12 -0
  74. eval_studio_client/api/docs/V1FlippedMetric.md +31 -0
  75. eval_studio_client/api/docs/V1GetDashboardReportResponse.md +29 -0
  76. eval_studio_client/api/docs/V1HumanDecision.md +12 -0
  77. eval_studio_client/api/docs/V1Info.md +1 -0
  78. eval_studio_client/api/docs/V1Leaderboard.md +1 -0
  79. eval_studio_client/api/docs/V1LeaderboardCmpReport.md +30 -0
  80. eval_studio_client/api/docs/V1LeaderboardComparisonItem.md +31 -0
  81. eval_studio_client/api/docs/V1LeaderboardInfo.md +30 -0
  82. eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +6 -3
  83. eval_studio_client/api/docs/V1LeaderboardReportResult.md +11 -8
  84. eval_studio_client/api/docs/V1LeaderboardReportResultView.md +12 -0
  85. eval_studio_client/api/docs/V1LeaderboardTestCaseAnnotation.md +36 -0
  86. eval_studio_client/api/docs/V1ListDashboardTestCaseAnnotationsResponse.md +29 -0
  87. eval_studio_client/api/docs/V1ListLeaderboardTestCaseAnnotationsResponse.md +29 -0
  88. eval_studio_client/api/docs/V1ListOperationsResponse.md +1 -0
  89. eval_studio_client/api/docs/V1ListUnseenOperationsResponse.md +30 -0
  90. eval_studio_client/api/docs/V1MarkOperationSeenByCreatorResponse.md +29 -0
  91. eval_studio_client/api/docs/V1Metric.md +30 -0
  92. eval_studio_client/api/docs/V1MetricAverage.md +36 -0
  93. eval_studio_client/api/docs/V1MetricMeta.md +40 -0
  94. eval_studio_client/api/docs/V1MetricScore.md +1 -1
  95. eval_studio_client/api/docs/V1MetricScores.md +1 -1
  96. eval_studio_client/api/docs/V1ModelType.md +1 -1
  97. eval_studio_client/api/docs/V1ModelsComparisons.md +32 -0
  98. eval_studio_client/api/docs/V1ModelsComparisonsMetrics.md +33 -0
  99. eval_studio_client/api/docs/V1ModelsOverview.md +34 -0
  100. eval_studio_client/api/docs/V1Operation.md +1 -0
  101. eval_studio_client/api/docs/V1OperationView.md +12 -0
  102. eval_studio_client/api/docs/V1RetrievedContextDiff.md +36 -0
  103. eval_studio_client/api/docs/V1Stats.md +2 -0
  104. eval_studio_client/api/docs/V1TechnicalMetrics.md +30 -0
  105. eval_studio_client/api/docs/V1TechnicalMetricsDetail.md +33 -0
  106. eval_studio_client/api/docs/V1TestCaseLeaderboardItem.md +31 -0
  107. eval_studio_client/api/docs/V1TestCaseRelationshipInfo.md +31 -0
  108. eval_studio_client/api/docs/V1TestCaseResult.md +48 -0
  109. eval_studio_client/api/docs/V1TextSimilarityMetric.md +12 -0
  110. eval_studio_client/api/docs/V1UpdateDashboardTestCaseAnnotationResponse.md +29 -0
  111. eval_studio_client/api/docs/V1UpdateLeaderboardTestCaseAnnotationResponse.md +29 -0
  112. eval_studio_client/api/docs/WorkflowNodeServiceApi.md +5 -5
  113. eval_studio_client/api/docs/WorkflowServiceApi.md +5 -5
  114. eval_studio_client/api/models/__init__.py +62 -0
  115. eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +17 -2
  116. eval_studio_client/api/models/required_the_dashboard_test_case_annotation_to_update.py +108 -0
  117. eval_studio_client/api/models/required_the_leaderboard_test_case_annotation_to_update.py +108 -0
  118. eval_studio_client/api/models/required_the_leaderboard_to_update.py +5 -2
  119. eval_studio_client/api/models/required_the_operation_to_finalize.py +6 -2
  120. eval_studio_client/api/models/required_the_operation_to_update.py +6 -2
  121. eval_studio_client/api/models/test_case_service_append_test_cases_request.py +89 -0
  122. eval_studio_client/api/models/v1_actual_output_meta.py +97 -0
  123. eval_studio_client/api/models/v1_actual_output_meta_diff.py +101 -0
  124. eval_studio_client/api/models/v1_agent_chat_activity_diagram.py +109 -0
  125. eval_studio_client/api/models/v1_agent_chat_activity_diagram_edge.py +97 -0
  126. eval_studio_client/api/models/v1_agent_chat_activity_diagram_node.py +97 -0
  127. eval_studio_client/api/models/v1_agent_chat_activity_diagram_row.py +97 -0
  128. eval_studio_client/api/models/v1_agent_chat_script_usage.py +101 -0
  129. eval_studio_client/api/models/v1_agent_chat_scripts_bar_chart.py +102 -0
  130. eval_studio_client/api/models/v1_agent_chat_tool_usage.py +101 -0
  131. eval_studio_client/api/models/v1_agent_chat_tools_bar_chart.py +102 -0
  132. eval_studio_client/api/models/v1_all_metric_scores.py +87 -0
  133. eval_studio_client/api/models/v1_append_test_cases_response.py +95 -0
  134. eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_request.py +99 -0
  135. eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_response.py +91 -0
  136. eval_studio_client/api/models/v1_batch_mark_operation_seen_by_creator_response.py +95 -0
  137. eval_studio_client/api/models/v1_cmp_leaderboard_reports_request.py +96 -0
  138. eval_studio_client/api/models/v1_cmp_leaderboard_reports_response.py +91 -0
  139. eval_studio_client/api/models/v1_comparison_item.py +130 -0
  140. eval_studio_client/api/models/v1_comparison_metric_score.py +89 -0
  141. eval_studio_client/api/models/v1_comparison_result.py +120 -0
  142. eval_studio_client/api/models/v1_comparison_summary.py +91 -0
  143. eval_studio_client/api/models/v1_create_evaluation_request.py +5 -2
  144. eval_studio_client/api/models/v1_create_test_from_test_cases_request.py +93 -0
  145. eval_studio_client/api/models/v1_create_test_from_test_cases_response.py +91 -0
  146. eval_studio_client/api/models/v1_dashboard_report.py +109 -0
  147. eval_studio_client/api/models/v1_dashboard_report_result.py +139 -0
  148. eval_studio_client/api/models/v1_dashboard_test_case_annotation.py +112 -0
  149. eval_studio_client/api/models/v1_data_fragment.py +91 -0
  150. eval_studio_client/api/models/v1_deep_compare_leaderboards_request.py +96 -0
  151. eval_studio_client/api/models/v1_deep_compare_leaderboards_response.py +91 -0
  152. eval_studio_client/api/models/v1_diff_item.py +137 -0
  153. eval_studio_client/api/models/v1_evaluation_type.py +39 -0
  154. eval_studio_client/api/models/v1_flipped_metric.py +91 -0
  155. eval_studio_client/api/models/v1_get_dashboard_report_response.py +91 -0
  156. eval_studio_client/api/models/v1_human_decision.py +38 -0
  157. eval_studio_client/api/models/v1_info.py +4 -2
  158. eval_studio_client/api/models/v1_leaderboard.py +5 -2
  159. eval_studio_client/api/models/v1_leaderboard_cmp_report.py +93 -0
  160. eval_studio_client/api/models/v1_leaderboard_comparison_item.py +91 -0
  161. eval_studio_client/api/models/v1_leaderboard_info.py +97 -0
  162. eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +23 -9
  163. eval_studio_client/api/models/v1_leaderboard_report_result.py +21 -10
  164. eval_studio_client/api/models/v1_leaderboard_report_result_view.py +38 -0
  165. eval_studio_client/api/models/v1_leaderboard_test_case_annotation.py +112 -0
  166. eval_studio_client/api/models/v1_list_dashboard_test_case_annotations_response.py +95 -0
  167. eval_studio_client/api/models/v1_list_leaderboard_test_case_annotations_response.py +95 -0
  168. eval_studio_client/api/models/v1_list_operations_response.py +5 -3
  169. eval_studio_client/api/models/v1_list_unseen_operations_response.py +97 -0
  170. eval_studio_client/api/models/v1_mark_operation_seen_by_creator_response.py +91 -0
  171. eval_studio_client/api/models/v1_metric.py +89 -0
  172. eval_studio_client/api/models/v1_metric_average.py +101 -0
  173. eval_studio_client/api/models/v1_metric_meta.py +109 -0
  174. eval_studio_client/api/models/v1_metric_score.py +6 -1
  175. eval_studio_client/api/models/v1_metric_scores.py +1 -1
  176. eval_studio_client/api/models/v1_model_type.py +2 -1
  177. eval_studio_client/api/models/v1_models_comparisons.py +93 -0
  178. eval_studio_client/api/models/v1_models_comparisons_metrics.py +103 -0
  179. eval_studio_client/api/models/v1_models_overview.py +97 -0
  180. eval_studio_client/api/models/v1_operation.py +6 -2
  181. eval_studio_client/api/models/v1_operation_view.py +38 -0
  182. eval_studio_client/api/models/v1_retrieved_context_diff.py +101 -0
  183. eval_studio_client/api/models/v1_stats.py +16 -2
  184. eval_studio_client/api/models/v1_technical_metrics.py +96 -0
  185. eval_studio_client/api/models/v1_technical_metrics_detail.py +95 -0
  186. eval_studio_client/api/models/v1_test_case_leaderboard_item.py +91 -0
  187. eval_studio_client/api/models/v1_test_case_relationship_info.py +91 -0
  188. eval_studio_client/api/models/v1_test_case_result.py +157 -0
  189. eval_studio_client/api/models/v1_text_similarity_metric.py +39 -0
  190. eval_studio_client/api/models/v1_update_dashboard_test_case_annotation_response.py +91 -0
  191. eval_studio_client/api/models/v1_update_leaderboard_test_case_annotation_response.py +91 -0
  192. eval_studio_client/api/models/v1_workflow_node_type.py +1 -0
  193. eval_studio_client/api/models/v1_workflow_type.py +1 -0
  194. eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +6 -0
  195. eval_studio_client/api/test/test_dashboard_report_service_api.py +37 -0
  196. eval_studio_client/api/test/test_dashboard_test_case_annotation_service_api.py +43 -0
  197. eval_studio_client/api/test/test_leaderboard_report_service_api.py +6 -0
  198. eval_studio_client/api/test/test_leaderboard_service_api.py +12 -0
  199. eval_studio_client/api/test/test_leaderboard_test_case_annotation_service_api.py +43 -0
  200. eval_studio_client/api/test/test_operation_service_api.py +18 -0
  201. eval_studio_client/api/test/test_required_the_dashboard_test_case_annotation_to_update.py +57 -0
  202. eval_studio_client/api/test/test_required_the_leaderboard_test_case_annotation_to_update.py +57 -0
  203. eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +2 -1
  204. eval_studio_client/api/test/test_required_the_operation_to_finalize.py +2 -1
  205. eval_studio_client/api/test/test_required_the_operation_to_update.py +2 -1
  206. eval_studio_client/api/test/test_test_case_service_api.py +6 -0
  207. eval_studio_client/api/test/test_test_case_service_append_test_cases_request.py +52 -0
  208. eval_studio_client/api/test/test_test_service_api.py +6 -0
  209. eval_studio_client/api/test/test_v1_abort_operation_response.py +2 -1
  210. eval_studio_client/api/test/test_v1_actual_output_meta.py +61 -0
  211. eval_studio_client/api/test/test_v1_actual_output_meta_diff.py +66 -0
  212. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram.py +65 -0
  213. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_edge.py +53 -0
  214. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_node.py +53 -0
  215. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_row.py +56 -0
  216. eval_studio_client/api/test/test_v1_agent_chat_script_usage.py +54 -0
  217. eval_studio_client/api/test/test_v1_agent_chat_scripts_bar_chart.py +57 -0
  218. eval_studio_client/api/test/test_v1_agent_chat_tool_usage.py +54 -0
  219. eval_studio_client/api/test/test_v1_agent_chat_tools_bar_chart.py +57 -0
  220. eval_studio_client/api/test/test_v1_all_metric_scores.py +53 -0
  221. eval_studio_client/api/test/test_v1_append_test_cases_response.py +74 -0
  222. eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +2 -1
  223. eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +2 -1
  224. eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_request.py +120 -0
  225. eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_response.py +72 -0
  226. eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +2 -1
  227. eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +2 -1
  228. eval_studio_client/api/test/test_v1_batch_get_operations_response.py +2 -1
  229. eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +2 -1
  230. eval_studio_client/api/test/test_v1_batch_mark_operation_seen_by_creator_response.py +74 -0
  231. eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_request.py +55 -0
  232. eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_response.py +255 -0
  233. eval_studio_client/api/test/test_v1_comparison_item.py +233 -0
  234. eval_studio_client/api/test/test_v1_comparison_metric_score.py +52 -0
  235. eval_studio_client/api/test/test_v1_comparison_result.py +258 -0
  236. eval_studio_client/api/test/test_v1_comparison_summary.py +53 -0
  237. eval_studio_client/api/test/test_v1_create_evaluation_request.py +2 -1
  238. eval_studio_client/api/test/test_v1_create_leaderboard_request.py +2 -1
  239. eval_studio_client/api/test/test_v1_create_leaderboard_response.py +2 -1
  240. eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +2 -1
  241. eval_studio_client/api/test/test_v1_create_test_from_test_cases_request.py +54 -0
  242. eval_studio_client/api/test/test_v1_create_test_from_test_cases_response.py +68 -0
  243. eval_studio_client/api/test/test_v1_dashboard_report.py +142 -0
  244. eval_studio_client/api/test/test_v1_dashboard_report_result.py +72 -0
  245. eval_studio_client/api/test/test_v1_dashboard_test_case_annotation.py +58 -0
  246. eval_studio_client/api/test/test_v1_data_fragment.py +57 -0
  247. eval_studio_client/api/test/test_v1_deep_compare_leaderboards_request.py +55 -0
  248. eval_studio_client/api/test/test_v1_deep_compare_leaderboards_response.py +255 -0
  249. eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +2 -1
  250. eval_studio_client/api/test/test_v1_diff_item.py +226 -0
  251. eval_studio_client/api/test/test_v1_evaluation_type.py +33 -0
  252. eval_studio_client/api/test/test_v1_finalize_operation_response.py +2 -1
  253. eval_studio_client/api/test/test_v1_flipped_metric.py +53 -0
  254. eval_studio_client/api/test/test_v1_generate_test_cases_response.py +2 -1
  255. eval_studio_client/api/test/test_v1_get_dashboard_report_response.py +143 -0
  256. eval_studio_client/api/test/test_v1_get_info_response.py +4 -1
  257. eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +39 -2
  258. eval_studio_client/api/test/test_v1_get_leaderboard_response.py +2 -1
  259. eval_studio_client/api/test/test_v1_get_operation_response.py +2 -1
  260. eval_studio_client/api/test/test_v1_get_stats_response.py +3 -1
  261. eval_studio_client/api/test/test_v1_human_decision.py +33 -0
  262. eval_studio_client/api/test/test_v1_import_leaderboard_response.py +2 -1
  263. eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +2 -1
  264. eval_studio_client/api/test/test_v1_info.py +4 -1
  265. eval_studio_client/api/test/test_v1_leaderboard.py +2 -1
  266. eval_studio_client/api/test/test_v1_leaderboard_cmp_report.py +254 -0
  267. eval_studio_client/api/test/test_v1_leaderboard_comparison_item.py +53 -0
  268. eval_studio_client/api/test/test_v1_leaderboard_info.py +57 -0
  269. eval_studio_client/api/test/test_v1_leaderboard_report.py +39 -2
  270. eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +33 -1
  271. eval_studio_client/api/test/test_v1_leaderboard_report_result.py +39 -2
  272. eval_studio_client/api/test/test_v1_leaderboard_report_result_view.py +33 -0
  273. eval_studio_client/api/test/test_v1_leaderboard_test_case_annotation.py +58 -0
  274. eval_studio_client/api/test/test_v1_list_dashboard_test_case_annotations_response.py +61 -0
  275. eval_studio_client/api/test/test_v1_list_leaderboard_test_case_annotations_response.py +61 -0
  276. eval_studio_client/api/test/test_v1_list_leaderboards_response.py +2 -1
  277. eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +2 -1
  278. eval_studio_client/api/test/test_v1_list_operations_response.py +4 -2
  279. eval_studio_client/api/test/test_v1_list_unseen_operations_response.py +75 -0
  280. eval_studio_client/api/test/test_v1_mark_operation_seen_by_creator_response.py +72 -0
  281. eval_studio_client/api/test/test_v1_metric.py +52 -0
  282. eval_studio_client/api/test/test_v1_metric_average.py +58 -0
  283. eval_studio_client/api/test/test_v1_metric_meta.py +66 -0
  284. eval_studio_client/api/test/test_v1_models_comparisons.py +54 -0
  285. eval_studio_client/api/test/test_v1_models_comparisons_metrics.py +65 -0
  286. eval_studio_client/api/test/test_v1_models_overview.py +60 -0
  287. eval_studio_client/api/test/test_v1_operation.py +2 -1
  288. eval_studio_client/api/test/test_v1_operation_view.py +33 -0
  289. eval_studio_client/api/test/test_v1_process_workflow_node_response.py +2 -1
  290. eval_studio_client/api/test/test_v1_retrieved_context_diff.py +66 -0
  291. eval_studio_client/api/test/test_v1_stats.py +3 -1
  292. eval_studio_client/api/test/test_v1_technical_metrics.py +62 -0
  293. eval_studio_client/api/test/test_v1_technical_metrics_detail.py +55 -0
  294. eval_studio_client/api/test/test_v1_test_case_leaderboard_item.py +53 -0
  295. eval_studio_client/api/test/test_v1_test_case_relationship_info.py +53 -0
  296. eval_studio_client/api/test/test_v1_test_case_result.py +106 -0
  297. eval_studio_client/api/test/test_v1_text_similarity_metric.py +33 -0
  298. eval_studio_client/api/test/test_v1_update_dashboard_test_case_annotation_response.py +59 -0
  299. eval_studio_client/api/test/test_v1_update_leaderboard_response.py +2 -1
  300. eval_studio_client/api/test/test_v1_update_leaderboard_test_case_annotation_response.py +59 -0
  301. eval_studio_client/api/test/test_v1_update_operation_response.py +2 -1
  302. eval_studio_client/gen/openapiv2/eval_studio.swagger.json +2340 -210
  303. eval_studio_client/models.py +18 -6
  304. {eval_studio_client-1.2.5.dist-info → eval_studio_client-1.3.0a1.dist-info}/METADATA +2 -2
  305. {eval_studio_client-1.2.5.dist-info → eval_studio_client-1.3.0a1.dist-info}/RECORD +306 -111
  306. {eval_studio_client-1.2.5.dist-info → eval_studio_client-1.3.0a1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,39 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import json
17
+ from enum import Enum
18
+ from typing_extensions import Self
19
+
20
+
21
+ class V1TextSimilarityMetric(str, Enum):
22
+ """
23
+ - TEXT_SIMILARITY_METRIC_UNSPECIFIED: Default value - must not be used - TEXT_SIMILARITY_METRIC_EXACT_MATCH: Exact string matching - sentences must be identical (default) - TEXT_SIMILARITY_METRIC_COSINE_DISTANCE: Cosine distance of sentence embeddings - semantic similarity - TEXT_SIMILARITY_METRIC_BERT_SCORE: BERTScore - contextual embeddings similarity using BERT
24
+ """
25
+
26
+ """
27
+ allowed enum values
28
+ """
29
+ TEXT_SIMILARITY_METRIC_UNSPECIFIED = 'TEXT_SIMILARITY_METRIC_UNSPECIFIED'
30
+ TEXT_SIMILARITY_METRIC_EXACT_MATCH = 'TEXT_SIMILARITY_METRIC_EXACT_MATCH'
31
+ TEXT_SIMILARITY_METRIC_COSINE_DISTANCE = 'TEXT_SIMILARITY_METRIC_COSINE_DISTANCE'
32
+ TEXT_SIMILARITY_METRIC_BERT_SCORE = 'TEXT_SIMILARITY_METRIC_BERT_SCORE'
33
+
34
+ @classmethod
35
+ def from_json(cls, json_str: str) -> Self:
36
+ """Create an instance of V1TextSimilarityMetric from a JSON string"""
37
+ return cls(json.loads(json_str))
38
+
39
+
@@ -0,0 +1,91 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from eval_studio_client.api.models.v1_dashboard_test_case_annotation import V1DashboardTestCaseAnnotation
23
+ from typing import Optional, Set
24
+ from typing_extensions import Self
25
+
26
+ class V1UpdateDashboardTestCaseAnnotationResponse(BaseModel):
27
+ """
28
+ V1UpdateDashboardTestCaseAnnotationResponse
29
+ """ # noqa: E501
30
+ dashboard_test_case_annotation: Optional[V1DashboardTestCaseAnnotation] = Field(default=None, alias="dashboardTestCaseAnnotation")
31
+ __properties: ClassVar[List[str]] = ["dashboardTestCaseAnnotation"]
32
+
33
+ model_config = ConfigDict(
34
+ populate_by_name=True,
35
+ validate_assignment=True,
36
+ protected_namespaces=(),
37
+ )
38
+
39
+
40
+ def to_str(self) -> str:
41
+ """Returns the string representation of the model using alias"""
42
+ return pprint.pformat(self.model_dump(by_alias=True))
43
+
44
+ def to_json(self) -> str:
45
+ """Returns the JSON representation of the model using alias"""
46
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
+ return json.dumps(self.to_dict())
48
+
49
+ @classmethod
50
+ def from_json(cls, json_str: str) -> Optional[Self]:
51
+ """Create an instance of V1UpdateDashboardTestCaseAnnotationResponse from a JSON string"""
52
+ return cls.from_dict(json.loads(json_str))
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ """Return the dictionary representation of the model using alias.
56
+
57
+ This has the following differences from calling pydantic's
58
+ `self.model_dump(by_alias=True)`:
59
+
60
+ * `None` is only added to the output dict for nullable fields that
61
+ were set at model initialization. Other fields with value `None`
62
+ are ignored.
63
+ """
64
+ excluded_fields: Set[str] = set([
65
+ ])
66
+
67
+ _dict = self.model_dump(
68
+ by_alias=True,
69
+ exclude=excluded_fields,
70
+ exclude_none=True,
71
+ )
72
+ # override the default output from pydantic by calling `to_dict()` of dashboard_test_case_annotation
73
+ if self.dashboard_test_case_annotation:
74
+ _dict['dashboardTestCaseAnnotation'] = self.dashboard_test_case_annotation.to_dict()
75
+ return _dict
76
+
77
+ @classmethod
78
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
79
+ """Create an instance of V1UpdateDashboardTestCaseAnnotationResponse from a dict"""
80
+ if obj is None:
81
+ return None
82
+
83
+ if not isinstance(obj, dict):
84
+ return cls.model_validate(obj, strict=False)
85
+
86
+ _obj = cls.model_validate({
87
+ "dashboardTestCaseAnnotation": V1DashboardTestCaseAnnotation.from_dict(obj["dashboardTestCaseAnnotation"]) if obj.get("dashboardTestCaseAnnotation") is not None else None
88
+ }, strict=False)
89
+ return _obj
90
+
91
+
@@ -0,0 +1,91 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from eval_studio_client.api.models.v1_leaderboard_test_case_annotation import V1LeaderboardTestCaseAnnotation
23
+ from typing import Optional, Set
24
+ from typing_extensions import Self
25
+
26
+ class V1UpdateLeaderboardTestCaseAnnotationResponse(BaseModel):
27
+ """
28
+ V1UpdateLeaderboardTestCaseAnnotationResponse
29
+ """ # noqa: E501
30
+ leaderboard_test_case_annotation: Optional[V1LeaderboardTestCaseAnnotation] = Field(default=None, alias="leaderboardTestCaseAnnotation")
31
+ __properties: ClassVar[List[str]] = ["leaderboardTestCaseAnnotation"]
32
+
33
+ model_config = ConfigDict(
34
+ populate_by_name=True,
35
+ validate_assignment=True,
36
+ protected_namespaces=(),
37
+ )
38
+
39
+
40
+ def to_str(self) -> str:
41
+ """Returns the string representation of the model using alias"""
42
+ return pprint.pformat(self.model_dump(by_alias=True))
43
+
44
+ def to_json(self) -> str:
45
+ """Returns the JSON representation of the model using alias"""
46
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
+ return json.dumps(self.to_dict())
48
+
49
+ @classmethod
50
+ def from_json(cls, json_str: str) -> Optional[Self]:
51
+ """Create an instance of V1UpdateLeaderboardTestCaseAnnotationResponse from a JSON string"""
52
+ return cls.from_dict(json.loads(json_str))
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ """Return the dictionary representation of the model using alias.
56
+
57
+ This has the following differences from calling pydantic's
58
+ `self.model_dump(by_alias=True)`:
59
+
60
+ * `None` is only added to the output dict for nullable fields that
61
+ were set at model initialization. Other fields with value `None`
62
+ are ignored.
63
+ """
64
+ excluded_fields: Set[str] = set([
65
+ ])
66
+
67
+ _dict = self.model_dump(
68
+ by_alias=True,
69
+ exclude=excluded_fields,
70
+ exclude_none=True,
71
+ )
72
+ # override the default output from pydantic by calling `to_dict()` of leaderboard_test_case_annotation
73
+ if self.leaderboard_test_case_annotation:
74
+ _dict['leaderboardTestCaseAnnotation'] = self.leaderboard_test_case_annotation.to_dict()
75
+ return _dict
76
+
77
+ @classmethod
78
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
79
+ """Create an instance of V1UpdateLeaderboardTestCaseAnnotationResponse from a dict"""
80
+ if obj is None:
81
+ return None
82
+
83
+ if not isinstance(obj, dict):
84
+ return cls.model_validate(obj, strict=False)
85
+
86
+ _obj = cls.model_validate({
87
+ "leaderboardTestCaseAnnotation": V1LeaderboardTestCaseAnnotation.from_dict(obj["leaderboardTestCaseAnnotation"]) if obj.get("leaderboardTestCaseAnnotation") is not None else None
88
+ }, strict=False)
89
+ return _obj
90
+
91
+
@@ -36,6 +36,7 @@ class V1WorkflowNodeType(str, Enum):
36
36
  WORKFLOW_NODE_TYPE_VALIDATION = 'WORKFLOW_NODE_TYPE_VALIDATION'
37
37
  WORKFLOW_NODE_TYPE_ADVERSARIAL_INPUTS = 'WORKFLOW_NODE_TYPE_ADVERSARIAL_INPUTS'
38
38
  WORKFLOW_NODE_TYPE_FAILURE_CLUSTERING = 'WORKFLOW_NODE_TYPE_FAILURE_CLUSTERING'
39
+ WORKFLOW_NODE_TYPE_TEST_CASE_IMPORT = 'WORKFLOW_NODE_TYPE_TEST_CASE_IMPORT'
39
40
 
40
41
  @classmethod
41
42
  def from_json(cls, json_str: str) -> Self:
@@ -28,6 +28,7 @@ class V1WorkflowType(str, Enum):
28
28
  """
29
29
  WORKFLOW_TYPE_UNSPECIFIED = 'WORKFLOW_TYPE_UNSPECIFIED'
30
30
  WORKFLOW_TYPE_MRM = 'WORKFLOW_TYPE_MRM'
31
+ WORKFLOW_TYPE_IMPORTED_TEST = 'WORKFLOW_TYPE_IMPORTED_TEST'
31
32
 
32
33
  @classmethod
33
34
  def from_json(cls, json_str: str) -> Self:
@@ -112,6 +112,12 @@ class TestAdversarialInputsServiceTestAdversarialInputsRobustnessRequest(unittes
112
112
  key = '',
113
113
  value = 1.337, )
114
114
  ], )
115
+ },
116
+ all_baseline_metrics_scores = {
117
+ 'key' : eval_studio_client.api.models.v1_all_metric_scores.v1AllMetricScores(
118
+ scores = [
119
+ 1.337
120
+ ], )
115
121
  }
116
122
  )
117
123
  else:
@@ -0,0 +1,37 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ import unittest
16
+
17
+ from eval_studio_client.api.api.dashboard_report_service_api import DashboardReportServiceApi
18
+
19
+
20
+ class TestDashboardReportServiceApi(unittest.TestCase):
21
+ """DashboardReportServiceApi unit test stubs"""
22
+
23
+ def setUp(self) -> None:
24
+ self.api = DashboardReportServiceApi()
25
+
26
+ def tearDown(self) -> None:
27
+ pass
28
+
29
+ def test_dashboard_report_service_get_dashboard_report(self) -> None:
30
+ """Test case for dashboard_report_service_get_dashboard_report
31
+
32
+ """
33
+ pass
34
+
35
+
36
+ if __name__ == '__main__':
37
+ unittest.main()
@@ -0,0 +1,43 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ import unittest
16
+
17
+ from eval_studio_client.api.api.dashboard_test_case_annotation_service_api import DashboardTestCaseAnnotationServiceApi
18
+
19
+
20
+ class TestDashboardTestCaseAnnotationServiceApi(unittest.TestCase):
21
+ """DashboardTestCaseAnnotationServiceApi unit test stubs"""
22
+
23
+ def setUp(self) -> None:
24
+ self.api = DashboardTestCaseAnnotationServiceApi()
25
+
26
+ def tearDown(self) -> None:
27
+ pass
28
+
29
+ def test_dashboard_test_case_annotation_service_list_dashboard_test_case_annotations(self) -> None:
30
+ """Test case for dashboard_test_case_annotation_service_list_dashboard_test_case_annotations
31
+
32
+ """
33
+ pass
34
+
35
+ def test_dashboard_test_case_annotation_service_update_dashboard_test_case_annotation(self) -> None:
36
+ """Test case for dashboard_test_case_annotation_service_update_dashboard_test_case_annotation
37
+
38
+ """
39
+ pass
40
+
41
+
42
+ if __name__ == '__main__':
43
+ unittest.main()
@@ -26,6 +26,12 @@ class TestLeaderboardReportServiceApi(unittest.TestCase):
26
26
  def tearDown(self) -> None:
27
27
  pass
28
28
 
29
+ def test_leaderboard_report_service_cmp_leaderboard_reports(self) -> None:
30
+ """Test case for leaderboard_report_service_cmp_leaderboard_reports
31
+
32
+ """
33
+ pass
34
+
29
35
  def test_leaderboard_report_service_get_leaderboard_report(self) -> None:
30
36
  """Test case for leaderboard_report_service_get_leaderboard_report
31
37
 
@@ -32,6 +32,12 @@ class TestLeaderboardServiceApi(unittest.TestCase):
32
32
  """
33
33
  pass
34
34
 
35
+ def test_leaderboard_service_batch_create_leaderboards_without_cache(self) -> None:
36
+ """Test case for leaderboard_service_batch_create_leaderboards_without_cache
37
+
38
+ """
39
+ pass
40
+
35
41
  def test_leaderboard_service_batch_delete_leaderboards(self) -> None:
36
42
  """Test case for leaderboard_service_batch_delete_leaderboards
37
43
 
@@ -62,6 +68,12 @@ class TestLeaderboardServiceApi(unittest.TestCase):
62
68
  """
63
69
  pass
64
70
 
71
+ def test_leaderboard_service_deep_compare_leaderboards(self) -> None:
72
+ """Test case for leaderboard_service_deep_compare_leaderboards
73
+
74
+ """
75
+ pass
76
+
65
77
  def test_leaderboard_service_delete_leaderboard(self) -> None:
66
78
  """Test case for leaderboard_service_delete_leaderboard
67
79
 
@@ -0,0 +1,43 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ import unittest
16
+
17
+ from eval_studio_client.api.api.leaderboard_test_case_annotation_service_api import LeaderboardTestCaseAnnotationServiceApi
18
+
19
+
20
+ class TestLeaderboardTestCaseAnnotationServiceApi(unittest.TestCase):
21
+ """LeaderboardTestCaseAnnotationServiceApi unit test stubs"""
22
+
23
+ def setUp(self) -> None:
24
+ self.api = LeaderboardTestCaseAnnotationServiceApi()
25
+
26
+ def tearDown(self) -> None:
27
+ pass
28
+
29
+ def test_leaderboard_test_case_annotation_service_list_leaderboard_test_case_annotations(self) -> None:
30
+ """Test case for leaderboard_test_case_annotation_service_list_leaderboard_test_case_annotations
31
+
32
+ """
33
+ pass
34
+
35
+ def test_leaderboard_test_case_annotation_service_update_leaderboard_test_case_annotation(self) -> None:
36
+ """Test case for leaderboard_test_case_annotation_service_update_leaderboard_test_case_annotation
37
+
38
+ """
39
+ pass
40
+
41
+
42
+ if __name__ == '__main__':
43
+ unittest.main()
@@ -38,6 +38,12 @@ class TestOperationServiceApi(unittest.TestCase):
38
38
  """
39
39
  pass
40
40
 
41
+ def test_operation_service_batch_mark_operation_seen_by_creator(self) -> None:
42
+ """Test case for operation_service_batch_mark_operation_seen_by_creator
43
+
44
+ """
45
+ pass
46
+
41
47
  def test_operation_service_finalize_operation(self) -> None:
42
48
  """Test case for operation_service_finalize_operation
43
49
 
@@ -56,6 +62,18 @@ class TestOperationServiceApi(unittest.TestCase):
56
62
  """
57
63
  pass
58
64
 
65
+ def test_operation_service_list_unseen_operations(self) -> None:
66
+ """Test case for operation_service_list_unseen_operations
67
+
68
+ """
69
+ pass
70
+
71
+ def test_operation_service_mark_operation_seen_by_creator(self) -> None:
72
+ """Test case for operation_service_mark_operation_seen_by_creator
73
+
74
+ """
75
+ pass
76
+
59
77
  def test_operation_service_update_operation(self) -> None:
60
78
  """Test case for operation_service_update_operation
61
79
 
@@ -0,0 +1,57 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ import unittest
16
+
17
+ from eval_studio_client.api.models.required_the_dashboard_test_case_annotation_to_update import RequiredTheDashboardTestCaseAnnotationToUpdate
18
+
19
+ class TestRequiredTheDashboardTestCaseAnnotationToUpdate(unittest.TestCase):
20
+ """RequiredTheDashboardTestCaseAnnotationToUpdate unit test stubs"""
21
+
22
+ def setUp(self):
23
+ pass
24
+
25
+ def tearDown(self):
26
+ pass
27
+
28
+ def make_instance(self, include_optional) -> RequiredTheDashboardTestCaseAnnotationToUpdate:
29
+ """Test RequiredTheDashboardTestCaseAnnotationToUpdate
30
+ include_option is a boolean, when False only required
31
+ params are included, when True both required and
32
+ optional params are included """
33
+ # uncomment below to create an instance of `RequiredTheDashboardTestCaseAnnotationToUpdate`
34
+ """
35
+ model = RequiredTheDashboardTestCaseAnnotationToUpdate()
36
+ if include_optional:
37
+ return RequiredTheDashboardTestCaseAnnotationToUpdate(
38
+ create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
39
+ creator = '',
40
+ update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
41
+ updater = '',
42
+ parent = '',
43
+ key = '',
44
+ value = None
45
+ )
46
+ else:
47
+ return RequiredTheDashboardTestCaseAnnotationToUpdate(
48
+ )
49
+ """
50
+
51
+ def testRequiredTheDashboardTestCaseAnnotationToUpdate(self):
52
+ """Test RequiredTheDashboardTestCaseAnnotationToUpdate"""
53
+ # inst_req_only = self.make_instance(include_optional=False)
54
+ # inst_req_and_optional = self.make_instance(include_optional=True)
55
+
56
+ if __name__ == '__main__':
57
+ unittest.main()
@@ -0,0 +1,57 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ import unittest
16
+
17
+ from eval_studio_client.api.models.required_the_leaderboard_test_case_annotation_to_update import RequiredTheLeaderboardTestCaseAnnotationToUpdate
18
+
19
+ class TestRequiredTheLeaderboardTestCaseAnnotationToUpdate(unittest.TestCase):
20
+ """RequiredTheLeaderboardTestCaseAnnotationToUpdate unit test stubs"""
21
+
22
+ def setUp(self):
23
+ pass
24
+
25
+ def tearDown(self):
26
+ pass
27
+
28
+ def make_instance(self, include_optional) -> RequiredTheLeaderboardTestCaseAnnotationToUpdate:
29
+ """Test RequiredTheLeaderboardTestCaseAnnotationToUpdate
30
+ include_option is a boolean, when False only required
31
+ params are included, when True both required and
32
+ optional params are included """
33
+ # uncomment below to create an instance of `RequiredTheLeaderboardTestCaseAnnotationToUpdate`
34
+ """
35
+ model = RequiredTheLeaderboardTestCaseAnnotationToUpdate()
36
+ if include_optional:
37
+ return RequiredTheLeaderboardTestCaseAnnotationToUpdate(
38
+ create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
39
+ creator = '',
40
+ update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
41
+ updater = '',
42
+ parent = '',
43
+ key = '',
44
+ value = None
45
+ )
46
+ else:
47
+ return RequiredTheLeaderboardTestCaseAnnotationToUpdate(
48
+ )
49
+ """
50
+
51
+ def testRequiredTheLeaderboardTestCaseAnnotationToUpdate(self):
52
+ """Test RequiredTheLeaderboardTestCaseAnnotationToUpdate"""
53
+ # inst_req_only = self.make_instance(include_optional=False)
54
+ # inst_req_and_optional = self.make_instance(include_optional=True)
55
+
56
+ if __name__ == '__main__':
57
+ unittest.main()
@@ -99,7 +99,8 @@ class TestRequiredTheLeaderboardToUpdate(unittest.TestCase):
99
99
  h2ogpte_collection = '',
100
100
  type = 'LEADERBOARD_TYPE_UNSPECIFIED',
101
101
  demo = True,
102
- test_lab = ''
102
+ test_lab = '',
103
+ evaluation_type = 'EVALUATION_TYPE_UNSPECIFIED'
103
104
  )
104
105
  else:
105
106
  return RequiredTheLeaderboardToUpdate(
@@ -55,7 +55,8 @@ class TestRequiredTheOperationToFinalize(unittest.TestCase):
55
55
  ], ),
56
56
  response = {
57
57
  'key' : None
58
- }
58
+ },
59
+ seen_by_creator_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f')
59
60
  )
60
61
  else:
61
62
  return RequiredTheOperationToFinalize(
@@ -55,7 +55,8 @@ class TestRequiredTheOperationToUpdate(unittest.TestCase):
55
55
  ], ),
56
56
  response = {
57
57
  'key' : None
58
- }
58
+ },
59
+ seen_by_creator_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f')
59
60
  )
60
61
  else:
61
62
  return RequiredTheOperationToUpdate(
@@ -26,6 +26,12 @@ class TestTestCaseServiceApi(unittest.TestCase):
26
26
  def tearDown(self) -> None:
27
27
  pass
28
28
 
29
+ def test_test_case_service_append_test_cases(self) -> None:
30
+ """Test case for test_case_service_append_test_cases
31
+
32
+ """
33
+ pass
34
+
29
35
  def test_test_case_service_batch_delete_test_cases(self) -> None:
30
36
  """Test case for test_case_service_batch_delete_test_cases
31
37
 
@@ -0,0 +1,52 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ import unittest
16
+
17
+ from eval_studio_client.api.models.test_case_service_append_test_cases_request import TestCaseServiceAppendTestCasesRequest
18
+
19
+ class TestTestCaseServiceAppendTestCasesRequest(unittest.TestCase):
20
+ """TestCaseServiceAppendTestCasesRequest unit test stubs"""
21
+
22
+ def setUp(self):
23
+ pass
24
+
25
+ def tearDown(self):
26
+ pass
27
+
28
+ def make_instance(self, include_optional) -> TestCaseServiceAppendTestCasesRequest:
29
+ """Test TestCaseServiceAppendTestCasesRequest
30
+ include_option is a boolean, when False only required
31
+ params are included, when True both required and
32
+ optional params are included """
33
+ # uncomment below to create an instance of `TestCaseServiceAppendTestCasesRequest`
34
+ """
35
+ model = TestCaseServiceAppendTestCasesRequest()
36
+ if include_optional:
37
+ return TestCaseServiceAppendTestCasesRequest(
38
+ test_cases_json = '',
39
+ url = ''
40
+ )
41
+ else:
42
+ return TestCaseServiceAppendTestCasesRequest(
43
+ )
44
+ """
45
+
46
+ def testTestCaseServiceAppendTestCasesRequest(self):
47
+ """Test TestCaseServiceAppendTestCasesRequest"""
48
+ # inst_req_only = self.make_instance(include_optional=False)
49
+ # inst_req_and_optional = self.make_instance(include_optional=True)
50
+
51
+ if __name__ == '__main__':
52
+ unittest.main()