eval-studio-client 1.2.4a2__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (306) hide show
  1. eval_studio_client/api/__init__.py +65 -0
  2. eval_studio_client/api/api/__init__.py +3 -0
  3. eval_studio_client/api/api/dashboard_report_service_api.py +292 -0
  4. eval_studio_client/api/api/dashboard_service_api.py +16 -16
  5. eval_studio_client/api/api/dashboard_test_case_annotation_service_api.py +611 -0
  6. eval_studio_client/api/api/document_service_api.py +16 -16
  7. eval_studio_client/api/api/evaluation_service_api.py +12 -12
  8. eval_studio_client/api/api/evaluator_service_api.py +16 -16
  9. eval_studio_client/api/api/leaderboard_report_service_api.py +304 -17
  10. eval_studio_client/api/api/leaderboard_service_api.py +554 -16
  11. eval_studio_client/api/api/leaderboard_test_case_annotation_service_api.py +611 -0
  12. eval_studio_client/api/api/model_service_api.py +16 -16
  13. eval_studio_client/api/api/operation_service_api.py +821 -17
  14. eval_studio_client/api/api/perturbator_service_api.py +22 -22
  15. eval_studio_client/api/api/test_case_service_api.py +300 -16
  16. eval_studio_client/api/api/test_class_service_api.py +16 -16
  17. eval_studio_client/api/api/test_service_api.py +285 -16
  18. eval_studio_client/api/api/workflow_node_service_api.py +16 -16
  19. eval_studio_client/api/api/workflow_service_api.py +16 -16
  20. eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +2 -1
  21. eval_studio_client/api/docs/DashboardReportServiceApi.md +75 -0
  22. eval_studio_client/api/docs/DashboardServiceApi.md +5 -5
  23. eval_studio_client/api/docs/DashboardTestCaseAnnotationServiceApi.md +149 -0
  24. eval_studio_client/api/docs/DocumentServiceApi.md +5 -5
  25. eval_studio_client/api/docs/EvaluationServiceApi.md +4 -4
  26. eval_studio_client/api/docs/EvaluatorServiceApi.md +5 -5
  27. eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -5
  28. eval_studio_client/api/docs/LeaderboardServiceApi.md +141 -5
  29. eval_studio_client/api/docs/LeaderboardTestCaseAnnotationServiceApi.md +149 -0
  30. eval_studio_client/api/docs/ModelServiceApi.md +5 -5
  31. eval_studio_client/api/docs/OperationServiceApi.md +215 -8
  32. eval_studio_client/api/docs/PerturbatorServiceApi.md +7 -7
  33. eval_studio_client/api/docs/RequiredTheDashboardTestCaseAnnotationToUpdate.md +35 -0
  34. eval_studio_client/api/docs/RequiredTheLeaderboardTestCaseAnnotationToUpdate.md +35 -0
  35. eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +1 -0
  36. eval_studio_client/api/docs/RequiredTheOperationToFinalize.md +1 -0
  37. eval_studio_client/api/docs/RequiredTheOperationToUpdate.md +1 -0
  38. eval_studio_client/api/docs/TestCaseServiceApi.md +75 -5
  39. eval_studio_client/api/docs/TestCaseServiceAppendTestCasesRequest.md +30 -0
  40. eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
  41. eval_studio_client/api/docs/TestServiceApi.md +73 -5
  42. eval_studio_client/api/docs/V1ActualOutputMeta.md +30 -0
  43. eval_studio_client/api/docs/V1ActualOutputMetaDiff.md +36 -0
  44. eval_studio_client/api/docs/V1AgentChatActivityDiagram.md +31 -0
  45. eval_studio_client/api/docs/V1AgentChatActivityDiagramEdge.md +32 -0
  46. eval_studio_client/api/docs/V1AgentChatActivityDiagramNode.md +32 -0
  47. eval_studio_client/api/docs/V1AgentChatActivityDiagramRow.md +30 -0
  48. eval_studio_client/api/docs/V1AgentChatScriptUsage.md +33 -0
  49. eval_studio_client/api/docs/V1AgentChatScriptsBarChart.md +30 -0
  50. eval_studio_client/api/docs/V1AgentChatToolUsage.md +33 -0
  51. eval_studio_client/api/docs/V1AgentChatToolsBarChart.md +30 -0
  52. eval_studio_client/api/docs/V1AllMetricScores.md +29 -0
  53. eval_studio_client/api/docs/V1AppendTestCasesResponse.md +29 -0
  54. eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheRequest.md +31 -0
  55. eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheResponse.md +29 -0
  56. eval_studio_client/api/docs/V1BatchMarkOperationSeenByCreatorResponse.md +29 -0
  57. eval_studio_client/api/docs/V1CmpLeaderboardReportsRequest.md +33 -0
  58. eval_studio_client/api/docs/V1CmpLeaderboardReportsResponse.md +29 -0
  59. eval_studio_client/api/docs/V1ComparisonItem.md +36 -0
  60. eval_studio_client/api/docs/V1ComparisonMetricScore.md +30 -0
  61. eval_studio_client/api/docs/V1ComparisonResult.md +31 -0
  62. eval_studio_client/api/docs/V1ComparisonSummary.md +31 -0
  63. eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
  64. eval_studio_client/api/docs/V1CreateTestFromTestCasesRequest.md +32 -0
  65. eval_studio_client/api/docs/V1CreateTestFromTestCasesResponse.md +29 -0
  66. eval_studio_client/api/docs/V1DashboardReport.md +31 -0
  67. eval_studio_client/api/docs/V1DashboardReportResult.md +39 -0
  68. eval_studio_client/api/docs/V1DashboardTestCaseAnnotation.md +36 -0
  69. eval_studio_client/api/docs/V1DataFragment.md +31 -0
  70. eval_studio_client/api/docs/V1DeepCompareLeaderboardsRequest.md +33 -0
  71. eval_studio_client/api/docs/V1DeepCompareLeaderboardsResponse.md +29 -0
  72. eval_studio_client/api/docs/V1DiffItem.md +36 -0
  73. eval_studio_client/api/docs/V1EvaluationType.md +12 -0
  74. eval_studio_client/api/docs/V1FlippedMetric.md +31 -0
  75. eval_studio_client/api/docs/V1GetDashboardReportResponse.md +29 -0
  76. eval_studio_client/api/docs/V1HumanDecision.md +12 -0
  77. eval_studio_client/api/docs/V1Info.md +1 -0
  78. eval_studio_client/api/docs/V1Leaderboard.md +1 -0
  79. eval_studio_client/api/docs/V1LeaderboardCmpReport.md +30 -0
  80. eval_studio_client/api/docs/V1LeaderboardComparisonItem.md +31 -0
  81. eval_studio_client/api/docs/V1LeaderboardInfo.md +30 -0
  82. eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +6 -3
  83. eval_studio_client/api/docs/V1LeaderboardReportResult.md +11 -8
  84. eval_studio_client/api/docs/V1LeaderboardReportResultView.md +12 -0
  85. eval_studio_client/api/docs/V1LeaderboardTestCaseAnnotation.md +36 -0
  86. eval_studio_client/api/docs/V1ListDashboardTestCaseAnnotationsResponse.md +29 -0
  87. eval_studio_client/api/docs/V1ListLeaderboardTestCaseAnnotationsResponse.md +29 -0
  88. eval_studio_client/api/docs/V1ListOperationsResponse.md +1 -0
  89. eval_studio_client/api/docs/V1ListUnseenOperationsResponse.md +30 -0
  90. eval_studio_client/api/docs/V1MarkOperationSeenByCreatorResponse.md +29 -0
  91. eval_studio_client/api/docs/V1Metric.md +30 -0
  92. eval_studio_client/api/docs/V1MetricAverage.md +36 -0
  93. eval_studio_client/api/docs/V1MetricMeta.md +40 -0
  94. eval_studio_client/api/docs/V1MetricScore.md +1 -1
  95. eval_studio_client/api/docs/V1MetricScores.md +1 -1
  96. eval_studio_client/api/docs/V1ModelType.md +1 -1
  97. eval_studio_client/api/docs/V1ModelsComparisons.md +32 -0
  98. eval_studio_client/api/docs/V1ModelsComparisonsMetrics.md +33 -0
  99. eval_studio_client/api/docs/V1ModelsOverview.md +34 -0
  100. eval_studio_client/api/docs/V1Operation.md +1 -0
  101. eval_studio_client/api/docs/V1OperationView.md +12 -0
  102. eval_studio_client/api/docs/V1RetrievedContextDiff.md +36 -0
  103. eval_studio_client/api/docs/V1Stats.md +2 -0
  104. eval_studio_client/api/docs/V1TechnicalMetrics.md +30 -0
  105. eval_studio_client/api/docs/V1TechnicalMetricsDetail.md +33 -0
  106. eval_studio_client/api/docs/V1TestCaseLeaderboardItem.md +31 -0
  107. eval_studio_client/api/docs/V1TestCaseRelationshipInfo.md +31 -0
  108. eval_studio_client/api/docs/V1TestCaseResult.md +48 -0
  109. eval_studio_client/api/docs/V1TextSimilarityMetric.md +12 -0
  110. eval_studio_client/api/docs/V1UpdateDashboardTestCaseAnnotationResponse.md +29 -0
  111. eval_studio_client/api/docs/V1UpdateLeaderboardTestCaseAnnotationResponse.md +29 -0
  112. eval_studio_client/api/docs/WorkflowNodeServiceApi.md +5 -5
  113. eval_studio_client/api/docs/WorkflowServiceApi.md +5 -5
  114. eval_studio_client/api/models/__init__.py +62 -0
  115. eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +17 -2
  116. eval_studio_client/api/models/required_the_dashboard_test_case_annotation_to_update.py +108 -0
  117. eval_studio_client/api/models/required_the_leaderboard_test_case_annotation_to_update.py +108 -0
  118. eval_studio_client/api/models/required_the_leaderboard_to_update.py +5 -2
  119. eval_studio_client/api/models/required_the_operation_to_finalize.py +6 -2
  120. eval_studio_client/api/models/required_the_operation_to_update.py +6 -2
  121. eval_studio_client/api/models/test_case_service_append_test_cases_request.py +89 -0
  122. eval_studio_client/api/models/v1_actual_output_meta.py +97 -0
  123. eval_studio_client/api/models/v1_actual_output_meta_diff.py +101 -0
  124. eval_studio_client/api/models/v1_agent_chat_activity_diagram.py +109 -0
  125. eval_studio_client/api/models/v1_agent_chat_activity_diagram_edge.py +97 -0
  126. eval_studio_client/api/models/v1_agent_chat_activity_diagram_node.py +97 -0
  127. eval_studio_client/api/models/v1_agent_chat_activity_diagram_row.py +97 -0
  128. eval_studio_client/api/models/v1_agent_chat_script_usage.py +101 -0
  129. eval_studio_client/api/models/v1_agent_chat_scripts_bar_chart.py +102 -0
  130. eval_studio_client/api/models/v1_agent_chat_tool_usage.py +101 -0
  131. eval_studio_client/api/models/v1_agent_chat_tools_bar_chart.py +102 -0
  132. eval_studio_client/api/models/v1_all_metric_scores.py +87 -0
  133. eval_studio_client/api/models/v1_append_test_cases_response.py +95 -0
  134. eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_request.py +99 -0
  135. eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_response.py +91 -0
  136. eval_studio_client/api/models/v1_batch_mark_operation_seen_by_creator_response.py +95 -0
  137. eval_studio_client/api/models/v1_cmp_leaderboard_reports_request.py +96 -0
  138. eval_studio_client/api/models/v1_cmp_leaderboard_reports_response.py +91 -0
  139. eval_studio_client/api/models/v1_comparison_item.py +130 -0
  140. eval_studio_client/api/models/v1_comparison_metric_score.py +89 -0
  141. eval_studio_client/api/models/v1_comparison_result.py +120 -0
  142. eval_studio_client/api/models/v1_comparison_summary.py +91 -0
  143. eval_studio_client/api/models/v1_create_evaluation_request.py +5 -2
  144. eval_studio_client/api/models/v1_create_test_from_test_cases_request.py +93 -0
  145. eval_studio_client/api/models/v1_create_test_from_test_cases_response.py +91 -0
  146. eval_studio_client/api/models/v1_dashboard_report.py +109 -0
  147. eval_studio_client/api/models/v1_dashboard_report_result.py +139 -0
  148. eval_studio_client/api/models/v1_dashboard_test_case_annotation.py +112 -0
  149. eval_studio_client/api/models/v1_data_fragment.py +91 -0
  150. eval_studio_client/api/models/v1_deep_compare_leaderboards_request.py +96 -0
  151. eval_studio_client/api/models/v1_deep_compare_leaderboards_response.py +91 -0
  152. eval_studio_client/api/models/v1_diff_item.py +137 -0
  153. eval_studio_client/api/models/v1_evaluation_type.py +39 -0
  154. eval_studio_client/api/models/v1_flipped_metric.py +91 -0
  155. eval_studio_client/api/models/v1_get_dashboard_report_response.py +91 -0
  156. eval_studio_client/api/models/v1_human_decision.py +38 -0
  157. eval_studio_client/api/models/v1_info.py +4 -2
  158. eval_studio_client/api/models/v1_leaderboard.py +5 -2
  159. eval_studio_client/api/models/v1_leaderboard_cmp_report.py +93 -0
  160. eval_studio_client/api/models/v1_leaderboard_comparison_item.py +91 -0
  161. eval_studio_client/api/models/v1_leaderboard_info.py +97 -0
  162. eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +23 -9
  163. eval_studio_client/api/models/v1_leaderboard_report_result.py +21 -10
  164. eval_studio_client/api/models/v1_leaderboard_report_result_view.py +38 -0
  165. eval_studio_client/api/models/v1_leaderboard_test_case_annotation.py +112 -0
  166. eval_studio_client/api/models/v1_list_dashboard_test_case_annotations_response.py +95 -0
  167. eval_studio_client/api/models/v1_list_leaderboard_test_case_annotations_response.py +95 -0
  168. eval_studio_client/api/models/v1_list_operations_response.py +5 -3
  169. eval_studio_client/api/models/v1_list_unseen_operations_response.py +97 -0
  170. eval_studio_client/api/models/v1_mark_operation_seen_by_creator_response.py +91 -0
  171. eval_studio_client/api/models/v1_metric.py +89 -0
  172. eval_studio_client/api/models/v1_metric_average.py +101 -0
  173. eval_studio_client/api/models/v1_metric_meta.py +109 -0
  174. eval_studio_client/api/models/v1_metric_score.py +6 -1
  175. eval_studio_client/api/models/v1_metric_scores.py +1 -1
  176. eval_studio_client/api/models/v1_model_type.py +2 -1
  177. eval_studio_client/api/models/v1_models_comparisons.py +93 -0
  178. eval_studio_client/api/models/v1_models_comparisons_metrics.py +103 -0
  179. eval_studio_client/api/models/v1_models_overview.py +97 -0
  180. eval_studio_client/api/models/v1_operation.py +6 -2
  181. eval_studio_client/api/models/v1_operation_view.py +38 -0
  182. eval_studio_client/api/models/v1_retrieved_context_diff.py +101 -0
  183. eval_studio_client/api/models/v1_stats.py +16 -2
  184. eval_studio_client/api/models/v1_technical_metrics.py +96 -0
  185. eval_studio_client/api/models/v1_technical_metrics_detail.py +95 -0
  186. eval_studio_client/api/models/v1_test_case_leaderboard_item.py +91 -0
  187. eval_studio_client/api/models/v1_test_case_relationship_info.py +91 -0
  188. eval_studio_client/api/models/v1_test_case_result.py +157 -0
  189. eval_studio_client/api/models/v1_text_similarity_metric.py +39 -0
  190. eval_studio_client/api/models/v1_update_dashboard_test_case_annotation_response.py +91 -0
  191. eval_studio_client/api/models/v1_update_leaderboard_test_case_annotation_response.py +91 -0
  192. eval_studio_client/api/models/v1_workflow_node_type.py +1 -0
  193. eval_studio_client/api/models/v1_workflow_type.py +1 -0
  194. eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +6 -0
  195. eval_studio_client/api/test/test_dashboard_report_service_api.py +37 -0
  196. eval_studio_client/api/test/test_dashboard_test_case_annotation_service_api.py +43 -0
  197. eval_studio_client/api/test/test_leaderboard_report_service_api.py +6 -0
  198. eval_studio_client/api/test/test_leaderboard_service_api.py +12 -0
  199. eval_studio_client/api/test/test_leaderboard_test_case_annotation_service_api.py +43 -0
  200. eval_studio_client/api/test/test_operation_service_api.py +18 -0
  201. eval_studio_client/api/test/test_required_the_dashboard_test_case_annotation_to_update.py +57 -0
  202. eval_studio_client/api/test/test_required_the_leaderboard_test_case_annotation_to_update.py +57 -0
  203. eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +2 -1
  204. eval_studio_client/api/test/test_required_the_operation_to_finalize.py +2 -1
  205. eval_studio_client/api/test/test_required_the_operation_to_update.py +2 -1
  206. eval_studio_client/api/test/test_test_case_service_api.py +6 -0
  207. eval_studio_client/api/test/test_test_case_service_append_test_cases_request.py +52 -0
  208. eval_studio_client/api/test/test_test_service_api.py +6 -0
  209. eval_studio_client/api/test/test_v1_abort_operation_response.py +2 -1
  210. eval_studio_client/api/test/test_v1_actual_output_meta.py +61 -0
  211. eval_studio_client/api/test/test_v1_actual_output_meta_diff.py +66 -0
  212. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram.py +65 -0
  213. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_edge.py +53 -0
  214. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_node.py +53 -0
  215. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_row.py +56 -0
  216. eval_studio_client/api/test/test_v1_agent_chat_script_usage.py +54 -0
  217. eval_studio_client/api/test/test_v1_agent_chat_scripts_bar_chart.py +57 -0
  218. eval_studio_client/api/test/test_v1_agent_chat_tool_usage.py +54 -0
  219. eval_studio_client/api/test/test_v1_agent_chat_tools_bar_chart.py +57 -0
  220. eval_studio_client/api/test/test_v1_all_metric_scores.py +53 -0
  221. eval_studio_client/api/test/test_v1_append_test_cases_response.py +74 -0
  222. eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +2 -1
  223. eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +2 -1
  224. eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_request.py +120 -0
  225. eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_response.py +72 -0
  226. eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +2 -1
  227. eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +2 -1
  228. eval_studio_client/api/test/test_v1_batch_get_operations_response.py +2 -1
  229. eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +2 -1
  230. eval_studio_client/api/test/test_v1_batch_mark_operation_seen_by_creator_response.py +74 -0
  231. eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_request.py +55 -0
  232. eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_response.py +255 -0
  233. eval_studio_client/api/test/test_v1_comparison_item.py +233 -0
  234. eval_studio_client/api/test/test_v1_comparison_metric_score.py +52 -0
  235. eval_studio_client/api/test/test_v1_comparison_result.py +258 -0
  236. eval_studio_client/api/test/test_v1_comparison_summary.py +53 -0
  237. eval_studio_client/api/test/test_v1_create_evaluation_request.py +2 -1
  238. eval_studio_client/api/test/test_v1_create_leaderboard_request.py +2 -1
  239. eval_studio_client/api/test/test_v1_create_leaderboard_response.py +2 -1
  240. eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +2 -1
  241. eval_studio_client/api/test/test_v1_create_test_from_test_cases_request.py +54 -0
  242. eval_studio_client/api/test/test_v1_create_test_from_test_cases_response.py +68 -0
  243. eval_studio_client/api/test/test_v1_dashboard_report.py +142 -0
  244. eval_studio_client/api/test/test_v1_dashboard_report_result.py +72 -0
  245. eval_studio_client/api/test/test_v1_dashboard_test_case_annotation.py +58 -0
  246. eval_studio_client/api/test/test_v1_data_fragment.py +57 -0
  247. eval_studio_client/api/test/test_v1_deep_compare_leaderboards_request.py +55 -0
  248. eval_studio_client/api/test/test_v1_deep_compare_leaderboards_response.py +255 -0
  249. eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +2 -1
  250. eval_studio_client/api/test/test_v1_diff_item.py +226 -0
  251. eval_studio_client/api/test/test_v1_evaluation_type.py +33 -0
  252. eval_studio_client/api/test/test_v1_finalize_operation_response.py +2 -1
  253. eval_studio_client/api/test/test_v1_flipped_metric.py +53 -0
  254. eval_studio_client/api/test/test_v1_generate_test_cases_response.py +2 -1
  255. eval_studio_client/api/test/test_v1_get_dashboard_report_response.py +143 -0
  256. eval_studio_client/api/test/test_v1_get_info_response.py +4 -1
  257. eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +39 -2
  258. eval_studio_client/api/test/test_v1_get_leaderboard_response.py +2 -1
  259. eval_studio_client/api/test/test_v1_get_operation_response.py +2 -1
  260. eval_studio_client/api/test/test_v1_get_stats_response.py +3 -1
  261. eval_studio_client/api/test/test_v1_human_decision.py +33 -0
  262. eval_studio_client/api/test/test_v1_import_leaderboard_response.py +2 -1
  263. eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +2 -1
  264. eval_studio_client/api/test/test_v1_info.py +4 -1
  265. eval_studio_client/api/test/test_v1_leaderboard.py +2 -1
  266. eval_studio_client/api/test/test_v1_leaderboard_cmp_report.py +254 -0
  267. eval_studio_client/api/test/test_v1_leaderboard_comparison_item.py +53 -0
  268. eval_studio_client/api/test/test_v1_leaderboard_info.py +57 -0
  269. eval_studio_client/api/test/test_v1_leaderboard_report.py +39 -2
  270. eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +33 -1
  271. eval_studio_client/api/test/test_v1_leaderboard_report_result.py +39 -2
  272. eval_studio_client/api/test/test_v1_leaderboard_report_result_view.py +33 -0
  273. eval_studio_client/api/test/test_v1_leaderboard_test_case_annotation.py +58 -0
  274. eval_studio_client/api/test/test_v1_list_dashboard_test_case_annotations_response.py +61 -0
  275. eval_studio_client/api/test/test_v1_list_leaderboard_test_case_annotations_response.py +61 -0
  276. eval_studio_client/api/test/test_v1_list_leaderboards_response.py +2 -1
  277. eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +2 -1
  278. eval_studio_client/api/test/test_v1_list_operations_response.py +4 -2
  279. eval_studio_client/api/test/test_v1_list_unseen_operations_response.py +75 -0
  280. eval_studio_client/api/test/test_v1_mark_operation_seen_by_creator_response.py +72 -0
  281. eval_studio_client/api/test/test_v1_metric.py +52 -0
  282. eval_studio_client/api/test/test_v1_metric_average.py +58 -0
  283. eval_studio_client/api/test/test_v1_metric_meta.py +66 -0
  284. eval_studio_client/api/test/test_v1_models_comparisons.py +54 -0
  285. eval_studio_client/api/test/test_v1_models_comparisons_metrics.py +65 -0
  286. eval_studio_client/api/test/test_v1_models_overview.py +60 -0
  287. eval_studio_client/api/test/test_v1_operation.py +2 -1
  288. eval_studio_client/api/test/test_v1_operation_view.py +33 -0
  289. eval_studio_client/api/test/test_v1_process_workflow_node_response.py +2 -1
  290. eval_studio_client/api/test/test_v1_retrieved_context_diff.py +66 -0
  291. eval_studio_client/api/test/test_v1_stats.py +3 -1
  292. eval_studio_client/api/test/test_v1_technical_metrics.py +62 -0
  293. eval_studio_client/api/test/test_v1_technical_metrics_detail.py +55 -0
  294. eval_studio_client/api/test/test_v1_test_case_leaderboard_item.py +53 -0
  295. eval_studio_client/api/test/test_v1_test_case_relationship_info.py +53 -0
  296. eval_studio_client/api/test/test_v1_test_case_result.py +106 -0
  297. eval_studio_client/api/test/test_v1_text_similarity_metric.py +33 -0
  298. eval_studio_client/api/test/test_v1_update_dashboard_test_case_annotation_response.py +59 -0
  299. eval_studio_client/api/test/test_v1_update_leaderboard_response.py +2 -1
  300. eval_studio_client/api/test/test_v1_update_leaderboard_test_case_annotation_response.py +59 -0
  301. eval_studio_client/api/test/test_v1_update_operation_response.py +2 -1
  302. eval_studio_client/gen/openapiv2/eval_studio.swagger.json +2340 -210
  303. eval_studio_client/models.py +18 -6
  304. {eval_studio_client-1.2.4a2.dist-info → eval_studio_client-1.3.0.dist-info}/METADATA +2 -2
  305. {eval_studio_client-1.2.4a2.dist-info → eval_studio_client-1.3.0.dist-info}/RECORD +306 -111
  306. {eval_studio_client-1.2.4a2.dist-info → eval_studio_client-1.3.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,101 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional, Union
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class V1MetricAverage(BaseModel):
26
+ """
27
+ V1MetricAverage
28
+ """ # noqa: E501
29
+ metric_key: Optional[StrictStr] = Field(default=None, description="Metric key.", alias="metricKey")
30
+ baseline_avg: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Baseline average.", alias="baselineAvg")
31
+ current_avg: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Current average.", alias="currentAvg")
32
+ diff: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Difference between current and baseline.")
33
+ baseline_better_wins: Optional[StrictInt] = Field(default=None, description="Baseline better wins count.", alias="baselineBetterWins")
34
+ current_better_wins: Optional[StrictInt] = Field(default=None, description="Current better wins count.", alias="currentBetterWins")
35
+ baseline_rank_avg: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Baseline rank average.", alias="baselineRankAvg")
36
+ current_rank_avg: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Current rank average.", alias="currentRankAvg")
37
+ __properties: ClassVar[List[str]] = ["metricKey", "baselineAvg", "currentAvg", "diff", "baselineBetterWins", "currentBetterWins", "baselineRankAvg", "currentRankAvg"]
38
+
39
+ model_config = ConfigDict(
40
+ populate_by_name=True,
41
+ validate_assignment=True,
42
+ protected_namespaces=(),
43
+ )
44
+
45
+
46
+ def to_str(self) -> str:
47
+ """Returns the string representation of the model using alias"""
48
+ return pprint.pformat(self.model_dump(by_alias=True))
49
+
50
+ def to_json(self) -> str:
51
+ """Returns the JSON representation of the model using alias"""
52
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
53
+ return json.dumps(self.to_dict())
54
+
55
+ @classmethod
56
+ def from_json(cls, json_str: str) -> Optional[Self]:
57
+ """Create an instance of V1MetricAverage from a JSON string"""
58
+ return cls.from_dict(json.loads(json_str))
59
+
60
+ def to_dict(self) -> Dict[str, Any]:
61
+ """Return the dictionary representation of the model using alias.
62
+
63
+ This has the following differences from calling pydantic's
64
+ `self.model_dump(by_alias=True)`:
65
+
66
+ * `None` is only added to the output dict for nullable fields that
67
+ were set at model initialization. Other fields with value `None`
68
+ are ignored.
69
+ """
70
+ excluded_fields: Set[str] = set([
71
+ ])
72
+
73
+ _dict = self.model_dump(
74
+ by_alias=True,
75
+ exclude=excluded_fields,
76
+ exclude_none=True,
77
+ )
78
+ return _dict
79
+
80
+ @classmethod
81
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
82
+ """Create an instance of V1MetricAverage from a dict"""
83
+ if obj is None:
84
+ return None
85
+
86
+ if not isinstance(obj, dict):
87
+ return cls.model_validate(obj, strict=False)
88
+
89
+ _obj = cls.model_validate({
90
+ "metricKey": obj.get("metricKey"),
91
+ "baselineAvg": obj.get("baselineAvg"),
92
+ "currentAvg": obj.get("currentAvg"),
93
+ "diff": obj.get("diff"),
94
+ "baselineBetterWins": obj.get("baselineBetterWins"),
95
+ "currentBetterWins": obj.get("currentBetterWins"),
96
+ "baselineRankAvg": obj.get("baselineRankAvg"),
97
+ "currentRankAvg": obj.get("currentRankAvg")
98
+ }, strict=False)
99
+ return _obj
100
+
101
+
@@ -0,0 +1,109 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictFloat, StrictInt, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional, Union
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class V1MetricMeta(BaseModel):
26
+ """
27
+ V1MetricMeta
28
+ """ # noqa: E501
29
+ key: Optional[StrictStr] = Field(default=None, description="Metric key.")
30
+ display_name: Optional[StrictStr] = Field(default=None, description="Display name.", alias="displayName")
31
+ data_type: Optional[StrictStr] = Field(default=None, description="Data type.", alias="dataType")
32
+ display_value: Optional[StrictStr] = Field(default=None, description="Display value format.", alias="displayValue")
33
+ description: Optional[StrictStr] = Field(default=None, description="Description.")
34
+ value_range: Optional[List[Union[StrictFloat, StrictInt]]] = Field(default=None, description="Value range (min, max).", alias="valueRange")
35
+ value_enum: Optional[List[StrictStr]] = Field(default=None, description="Value enum (null if not applicable).", alias="valueEnum")
36
+ higher_is_better: Optional[StrictBool] = Field(default=None, description="Whether higher is better.", alias="higherIsBetter")
37
+ threshold: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Threshold value.")
38
+ is_primary_metric: Optional[StrictBool] = Field(default=None, description="Is primary metric.", alias="isPrimaryMetric")
39
+ parent_metric: Optional[StrictStr] = Field(default=None, description="Parent metric.", alias="parentMetric")
40
+ exclude: Optional[StrictBool] = Field(default=None, description="Exclude flag.")
41
+ __properties: ClassVar[List[str]] = ["key", "displayName", "dataType", "displayValue", "description", "valueRange", "valueEnum", "higherIsBetter", "threshold", "isPrimaryMetric", "parentMetric", "exclude"]
42
+
43
+ model_config = ConfigDict(
44
+ populate_by_name=True,
45
+ validate_assignment=True,
46
+ protected_namespaces=(),
47
+ )
48
+
49
+
50
+ def to_str(self) -> str:
51
+ """Returns the string representation of the model using alias"""
52
+ return pprint.pformat(self.model_dump(by_alias=True))
53
+
54
+ def to_json(self) -> str:
55
+ """Returns the JSON representation of the model using alias"""
56
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
57
+ return json.dumps(self.to_dict())
58
+
59
+ @classmethod
60
+ def from_json(cls, json_str: str) -> Optional[Self]:
61
+ """Create an instance of V1MetricMeta from a JSON string"""
62
+ return cls.from_dict(json.loads(json_str))
63
+
64
+ def to_dict(self) -> Dict[str, Any]:
65
+ """Return the dictionary representation of the model using alias.
66
+
67
+ This has the following differences from calling pydantic's
68
+ `self.model_dump(by_alias=True)`:
69
+
70
+ * `None` is only added to the output dict for nullable fields that
71
+ were set at model initialization. Other fields with value `None`
72
+ are ignored.
73
+ """
74
+ excluded_fields: Set[str] = set([
75
+ ])
76
+
77
+ _dict = self.model_dump(
78
+ by_alias=True,
79
+ exclude=excluded_fields,
80
+ exclude_none=True,
81
+ )
82
+ return _dict
83
+
84
+ @classmethod
85
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
86
+ """Create an instance of V1MetricMeta from a dict"""
87
+ if obj is None:
88
+ return None
89
+
90
+ if not isinstance(obj, dict):
91
+ return cls.model_validate(obj, strict=False)
92
+
93
+ _obj = cls.model_validate({
94
+ "key": obj.get("key"),
95
+ "displayName": obj.get("displayName"),
96
+ "dataType": obj.get("dataType"),
97
+ "displayValue": obj.get("displayValue"),
98
+ "description": obj.get("description"),
99
+ "valueRange": obj.get("valueRange"),
100
+ "valueEnum": obj.get("valueEnum"),
101
+ "higherIsBetter": obj.get("higherIsBetter"),
102
+ "threshold": obj.get("threshold"),
103
+ "isPrimaryMetric": obj.get("isPrimaryMetric"),
104
+ "parentMetric": obj.get("parentMetric"),
105
+ "exclude": obj.get("exclude")
106
+ }, strict=False)
107
+ return _obj
108
+
109
+
@@ -27,7 +27,7 @@ class V1MetricScore(BaseModel):
27
27
  MetricScore represents the metric score.
28
28
  """ # noqa: E501
29
29
  key: Optional[StrictStr] = Field(default=None, description="Required. Metric key.")
30
- value: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Required. Metric value - consider NaN, Infinity or -Infinity for float representation.")
30
+ value: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Optional. Metric value. May be omitted if the metric could not be computed. Valid values include normal floats, as well as special values: NaN, Infinity, or -Infinity.")
31
31
  __properties: ClassVar[List[str]] = ["key", "value"]
32
32
 
33
33
  model_config = ConfigDict(
@@ -69,6 +69,11 @@ class V1MetricScore(BaseModel):
69
69
  exclude=excluded_fields,
70
70
  exclude_none=True,
71
71
  )
72
+ # set to None if value (nullable) is None
73
+ # and model_fields_set contains the field
74
+ if self.value is None and "value" in self.model_fields_set:
75
+ _dict['value'] = None
76
+
72
77
  return _dict
73
78
 
74
79
  @classmethod
@@ -27,7 +27,7 @@ class V1MetricScores(BaseModel):
27
27
  """
28
28
  V1MetricScores
29
29
  """ # noqa: E501
30
- scores: Optional[List[V1MetricScore]] = Field(default=None, description="Required. The metric scores.")
30
+ scores: Optional[List[V1MetricScore]] = Field(default=None, description="Repeated. List of metric scores.")
31
31
  __properties: ClassVar[List[str]] = ["scores"]
32
32
 
33
33
  model_config = ConfigDict(
@@ -20,7 +20,7 @@ from typing_extensions import Self
20
20
 
21
21
  class V1ModelType(str, Enum):
22
22
  """
23
- - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock.
23
+ - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock. - MODEL_TYPE_ANTHROPIC_CLAUDE: Anthropic Claude chat.
24
24
  """
25
25
 
26
26
  """
@@ -37,6 +37,7 @@ class V1ModelType(str, Enum):
37
37
  MODEL_TYPE_H2_OLLMOPS = 'MODEL_TYPE_H2OLLMOPS'
38
38
  MODEL_TYPE_OLLAMA = 'MODEL_TYPE_OLLAMA'
39
39
  MODEL_TYPE_AMAZON_BEDROCK = 'MODEL_TYPE_AMAZON_BEDROCK'
40
+ MODEL_TYPE_ANTHROPIC_CLAUDE = 'MODEL_TYPE_ANTHROPIC_CLAUDE'
40
41
 
41
42
  @classmethod
42
43
  def from_json(cls, json_str: str) -> Self:
@@ -0,0 +1,93 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class V1ModelsComparisons(BaseModel):
26
+ """
27
+ V1ModelsComparisons
28
+ """ # noqa: E501
29
+ test_case_ranks_baseline: Optional[StrictInt] = Field(default=None, description="Test case ranks for baseline.", alias="testCaseRanksBaseline")
30
+ test_case_ranks_current: Optional[StrictInt] = Field(default=None, description="Test case ranks for current.", alias="testCaseRanksCurrent")
31
+ test_case_wins_baseline: Optional[StrictInt] = Field(default=None, description="Test case wins for baseline.", alias="testCaseWinsBaseline")
32
+ test_case_wins_current: Optional[StrictInt] = Field(default=None, description="Test case wins for current.", alias="testCaseWinsCurrent")
33
+ __properties: ClassVar[List[str]] = ["testCaseRanksBaseline", "testCaseRanksCurrent", "testCaseWinsBaseline", "testCaseWinsCurrent"]
34
+
35
+ model_config = ConfigDict(
36
+ populate_by_name=True,
37
+ validate_assignment=True,
38
+ protected_namespaces=(),
39
+ )
40
+
41
+
42
+ def to_str(self) -> str:
43
+ """Returns the string representation of the model using alias"""
44
+ return pprint.pformat(self.model_dump(by_alias=True))
45
+
46
+ def to_json(self) -> str:
47
+ """Returns the JSON representation of the model using alias"""
48
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
49
+ return json.dumps(self.to_dict())
50
+
51
+ @classmethod
52
+ def from_json(cls, json_str: str) -> Optional[Self]:
53
+ """Create an instance of V1ModelsComparisons from a JSON string"""
54
+ return cls.from_dict(json.loads(json_str))
55
+
56
+ def to_dict(self) -> Dict[str, Any]:
57
+ """Return the dictionary representation of the model using alias.
58
+
59
+ This has the following differences from calling pydantic's
60
+ `self.model_dump(by_alias=True)`:
61
+
62
+ * `None` is only added to the output dict for nullable fields that
63
+ were set at model initialization. Other fields with value `None`
64
+ are ignored.
65
+ """
66
+ excluded_fields: Set[str] = set([
67
+ ])
68
+
69
+ _dict = self.model_dump(
70
+ by_alias=True,
71
+ exclude=excluded_fields,
72
+ exclude_none=True,
73
+ )
74
+ return _dict
75
+
76
+ @classmethod
77
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
78
+ """Create an instance of V1ModelsComparisons from a dict"""
79
+ if obj is None:
80
+ return None
81
+
82
+ if not isinstance(obj, dict):
83
+ return cls.model_validate(obj, strict=False)
84
+
85
+ _obj = cls.model_validate({
86
+ "testCaseRanksBaseline": obj.get("testCaseRanksBaseline"),
87
+ "testCaseRanksCurrent": obj.get("testCaseRanksCurrent"),
88
+ "testCaseWinsBaseline": obj.get("testCaseWinsBaseline"),
89
+ "testCaseWinsCurrent": obj.get("testCaseWinsCurrent")
90
+ }, strict=False)
91
+ return _obj
92
+
93
+
@@ -0,0 +1,103 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt
21
+ from typing import Any, ClassVar, Dict, List, Optional, Union
22
+ from eval_studio_client.api.models.v1_metric_average import V1MetricAverage
23
+ from typing import Optional, Set
24
+ from typing_extensions import Self
25
+
26
+ class V1ModelsComparisonsMetrics(BaseModel):
27
+ """
28
+ V1ModelsComparisonsMetrics
29
+ """ # noqa: E501
30
+ metrics_ranks_baseline: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Metrics ranks for baseline.", alias="metricsRanksBaseline")
31
+ metrics_ranks_current: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Metrics ranks for current.", alias="metricsRanksCurrent")
32
+ metrics_wins_baseline: Optional[StrictInt] = Field(default=None, description="Metrics wins for baseline.", alias="metricsWinsBaseline")
33
+ metrics_wins_current: Optional[StrictInt] = Field(default=None, description="Metrics wins for current.", alias="metricsWinsCurrent")
34
+ metrics_averages: Optional[List[V1MetricAverage]] = Field(default=None, description="Metrics averages.", alias="metricsAverages")
35
+ __properties: ClassVar[List[str]] = ["metricsRanksBaseline", "metricsRanksCurrent", "metricsWinsBaseline", "metricsWinsCurrent", "metricsAverages"]
36
+
37
+ model_config = ConfigDict(
38
+ populate_by_name=True,
39
+ validate_assignment=True,
40
+ protected_namespaces=(),
41
+ )
42
+
43
+
44
+ def to_str(self) -> str:
45
+ """Returns the string representation of the model using alias"""
46
+ return pprint.pformat(self.model_dump(by_alias=True))
47
+
48
+ def to_json(self) -> str:
49
+ """Returns the JSON representation of the model using alias"""
50
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
51
+ return json.dumps(self.to_dict())
52
+
53
+ @classmethod
54
+ def from_json(cls, json_str: str) -> Optional[Self]:
55
+ """Create an instance of V1ModelsComparisonsMetrics from a JSON string"""
56
+ return cls.from_dict(json.loads(json_str))
57
+
58
+ def to_dict(self) -> Dict[str, Any]:
59
+ """Return the dictionary representation of the model using alias.
60
+
61
+ This has the following differences from calling pydantic's
62
+ `self.model_dump(by_alias=True)`:
63
+
64
+ * `None` is only added to the output dict for nullable fields that
65
+ were set at model initialization. Other fields with value `None`
66
+ are ignored.
67
+ """
68
+ excluded_fields: Set[str] = set([
69
+ ])
70
+
71
+ _dict = self.model_dump(
72
+ by_alias=True,
73
+ exclude=excluded_fields,
74
+ exclude_none=True,
75
+ )
76
+ # override the default output from pydantic by calling `to_dict()` of each item in metrics_averages (list)
77
+ _items = []
78
+ if self.metrics_averages:
79
+ for _item_metrics_averages in self.metrics_averages:
80
+ if _item_metrics_averages:
81
+ _items.append(_item_metrics_averages.to_dict())
82
+ _dict['metricsAverages'] = _items
83
+ return _dict
84
+
85
+ @classmethod
86
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
87
+ """Create an instance of V1ModelsComparisonsMetrics from a dict"""
88
+ if obj is None:
89
+ return None
90
+
91
+ if not isinstance(obj, dict):
92
+ return cls.model_validate(obj, strict=False)
93
+
94
+ _obj = cls.model_validate({
95
+ "metricsRanksBaseline": obj.get("metricsRanksBaseline"),
96
+ "metricsRanksCurrent": obj.get("metricsRanksCurrent"),
97
+ "metricsWinsBaseline": obj.get("metricsWinsBaseline"),
98
+ "metricsWinsCurrent": obj.get("metricsWinsCurrent"),
99
+ "metricsAverages": [V1MetricAverage.from_dict(_item) for _item in obj["metricsAverages"]] if obj.get("metricsAverages") is not None else None
100
+ }, strict=False)
101
+ return _obj
102
+
103
+
@@ -0,0 +1,97 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class V1ModelsOverview(BaseModel):
26
+ """
27
+ V1ModelsOverview
28
+ """ # noqa: E501
29
+ baseline_model_key: Optional[StrictStr] = Field(default=None, description="Baseline model key.", alias="baselineModelKey")
30
+ current_model_key: Optional[StrictStr] = Field(default=None, description="Current model key.", alias="currentModelKey")
31
+ baseline_model_name: Optional[StrictStr] = Field(default=None, description="Baseline model name.", alias="baselineModelName")
32
+ baseline_collection_id: Optional[List[StrictStr]] = Field(default=None, description="Baseline collection IDs.", alias="baselineCollectionId")
33
+ current_model_name: Optional[StrictStr] = Field(default=None, description="Current model name.", alias="currentModelName")
34
+ current_collection_id: Optional[List[StrictStr]] = Field(default=None, description="Current collection IDs.", alias="currentCollectionId")
35
+ __properties: ClassVar[List[str]] = ["baselineModelKey", "currentModelKey", "baselineModelName", "baselineCollectionId", "currentModelName", "currentCollectionId"]
36
+
37
+ model_config = ConfigDict(
38
+ populate_by_name=True,
39
+ validate_assignment=True,
40
+ protected_namespaces=(),
41
+ )
42
+
43
+
44
+ def to_str(self) -> str:
45
+ """Returns the string representation of the model using alias"""
46
+ return pprint.pformat(self.model_dump(by_alias=True))
47
+
48
+ def to_json(self) -> str:
49
+ """Returns the JSON representation of the model using alias"""
50
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
51
+ return json.dumps(self.to_dict())
52
+
53
+ @classmethod
54
+ def from_json(cls, json_str: str) -> Optional[Self]:
55
+ """Create an instance of V1ModelsOverview from a JSON string"""
56
+ return cls.from_dict(json.loads(json_str))
57
+
58
+ def to_dict(self) -> Dict[str, Any]:
59
+ """Return the dictionary representation of the model using alias.
60
+
61
+ This has the following differences from calling pydantic's
62
+ `self.model_dump(by_alias=True)`:
63
+
64
+ * `None` is only added to the output dict for nullable fields that
65
+ were set at model initialization. Other fields with value `None`
66
+ are ignored.
67
+ """
68
+ excluded_fields: Set[str] = set([
69
+ ])
70
+
71
+ _dict = self.model_dump(
72
+ by_alias=True,
73
+ exclude=excluded_fields,
74
+ exclude_none=True,
75
+ )
76
+ return _dict
77
+
78
+ @classmethod
79
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
80
+ """Create an instance of V1ModelsOverview from a dict"""
81
+ if obj is None:
82
+ return None
83
+
84
+ if not isinstance(obj, dict):
85
+ return cls.model_validate(obj, strict=False)
86
+
87
+ _obj = cls.model_validate({
88
+ "baselineModelKey": obj.get("baselineModelKey"),
89
+ "currentModelKey": obj.get("currentModelKey"),
90
+ "baselineModelName": obj.get("baselineModelName"),
91
+ "baselineCollectionId": obj.get("baselineCollectionId"),
92
+ "currentModelName": obj.get("currentModelName"),
93
+ "currentCollectionId": obj.get("currentCollectionId")
94
+ }, strict=False)
95
+ return _obj
96
+
97
+
@@ -40,7 +40,8 @@ class V1Operation(BaseModel):
40
40
  done: Optional[StrictBool] = Field(default=None, description="If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.")
41
41
  error: Optional[RpcStatus] = None
42
42
  response: Optional[ProtobufAny] = None
43
- __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "metadata", "done", "error", "response"]
43
+ seen_by_creator_time: Optional[datetime] = Field(default=None, description="Output only. Optional. Timestamp when the creator marked the Operation as seen. Once set, this field cannot be changed. Set via MarkOperationSeenByCreator method.", alias="seenByCreatorTime")
44
+ __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "metadata", "done", "error", "response", "seenByCreatorTime"]
44
45
 
45
46
  model_config = ConfigDict(
46
47
  populate_by_name=True,
@@ -79,6 +80,7 @@ class V1Operation(BaseModel):
79
80
  * OpenAPI `readOnly` fields are excluded.
80
81
  * OpenAPI `readOnly` fields are excluded.
81
82
  * OpenAPI `readOnly` fields are excluded.
83
+ * OpenAPI `readOnly` fields are excluded.
82
84
  """
83
85
  excluded_fields: Set[str] = set([
84
86
  "name",
@@ -88,6 +90,7 @@ class V1Operation(BaseModel):
88
90
  "updater",
89
91
  "delete_time",
90
92
  "deleter",
93
+ "seen_by_creator_time",
91
94
  ])
92
95
 
93
96
  _dict = self.model_dump(
@@ -126,7 +129,8 @@ class V1Operation(BaseModel):
126
129
  "metadata": ProtobufAny.from_dict(obj["metadata"]) if obj.get("metadata") is not None else None,
127
130
  "done": obj.get("done"),
128
131
  "error": RpcStatus.from_dict(obj["error"]) if obj.get("error") is not None else None,
129
- "response": ProtobufAny.from_dict(obj["response"]) if obj.get("response") is not None else None
132
+ "response": ProtobufAny.from_dict(obj["response"]) if obj.get("response") is not None else None,
133
+ "seenByCreatorTime": obj.get("seenByCreatorTime")
130
134
  }, strict=False)
131
135
  return _obj
132
136
 
@@ -0,0 +1,38 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import json
17
+ from enum import Enum
18
+ from typing_extensions import Self
19
+
20
+
21
+ class V1OperationView(str, Enum):
22
+ """
23
+ - OPERATION_VIEW_UNSPECIFIED: Default / unset value. The API will default to the OPERATION_VIEW_FULL. - OPERATION_VIEW_BASIC: Include basic metadata about the Operation, but not the response. - OPERATION_VIEW_FULL: Include everything.
24
+ """
25
+
26
+ """
27
+ allowed enum values
28
+ """
29
+ OPERATION_VIEW_UNSPECIFIED = 'OPERATION_VIEW_UNSPECIFIED'
30
+ OPERATION_VIEW_BASIC = 'OPERATION_VIEW_BASIC'
31
+ OPERATION_VIEW_FULL = 'OPERATION_VIEW_FULL'
32
+
33
+ @classmethod
34
+ def from_json(cls, json_str: str) -> Self:
35
+ """Create an instance of V1OperationView from a JSON string"""
36
+ return cls(json.loads(json_str))
37
+
38
+