eval-studio-client 1.2.4a2__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +65 -0
- eval_studio_client/api/api/__init__.py +3 -0
- eval_studio_client/api/api/dashboard_report_service_api.py +292 -0
- eval_studio_client/api/api/dashboard_service_api.py +16 -16
- eval_studio_client/api/api/dashboard_test_case_annotation_service_api.py +611 -0
- eval_studio_client/api/api/document_service_api.py +16 -16
- eval_studio_client/api/api/evaluation_service_api.py +12 -12
- eval_studio_client/api/api/evaluator_service_api.py +16 -16
- eval_studio_client/api/api/leaderboard_report_service_api.py +304 -17
- eval_studio_client/api/api/leaderboard_service_api.py +554 -16
- eval_studio_client/api/api/leaderboard_test_case_annotation_service_api.py +611 -0
- eval_studio_client/api/api/model_service_api.py +16 -16
- eval_studio_client/api/api/operation_service_api.py +821 -17
- eval_studio_client/api/api/perturbator_service_api.py +22 -22
- eval_studio_client/api/api/test_case_service_api.py +300 -16
- eval_studio_client/api/api/test_class_service_api.py +16 -16
- eval_studio_client/api/api/test_service_api.py +285 -16
- eval_studio_client/api/api/workflow_node_service_api.py +16 -16
- eval_studio_client/api/api/workflow_service_api.py +16 -16
- eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +2 -1
- eval_studio_client/api/docs/DashboardReportServiceApi.md +75 -0
- eval_studio_client/api/docs/DashboardServiceApi.md +5 -5
- eval_studio_client/api/docs/DashboardTestCaseAnnotationServiceApi.md +149 -0
- eval_studio_client/api/docs/DocumentServiceApi.md +5 -5
- eval_studio_client/api/docs/EvaluationServiceApi.md +4 -4
- eval_studio_client/api/docs/EvaluatorServiceApi.md +5 -5
- eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -5
- eval_studio_client/api/docs/LeaderboardServiceApi.md +141 -5
- eval_studio_client/api/docs/LeaderboardTestCaseAnnotationServiceApi.md +149 -0
- eval_studio_client/api/docs/ModelServiceApi.md +5 -5
- eval_studio_client/api/docs/OperationServiceApi.md +215 -8
- eval_studio_client/api/docs/PerturbatorServiceApi.md +7 -7
- eval_studio_client/api/docs/RequiredTheDashboardTestCaseAnnotationToUpdate.md +35 -0
- eval_studio_client/api/docs/RequiredTheLeaderboardTestCaseAnnotationToUpdate.md +35 -0
- eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +1 -0
- eval_studio_client/api/docs/RequiredTheOperationToFinalize.md +1 -0
- eval_studio_client/api/docs/RequiredTheOperationToUpdate.md +1 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +75 -5
- eval_studio_client/api/docs/TestCaseServiceAppendTestCasesRequest.md +30 -0
- eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
- eval_studio_client/api/docs/TestServiceApi.md +73 -5
- eval_studio_client/api/docs/V1ActualOutputMeta.md +30 -0
- eval_studio_client/api/docs/V1ActualOutputMetaDiff.md +36 -0
- eval_studio_client/api/docs/V1AgentChatActivityDiagram.md +31 -0
- eval_studio_client/api/docs/V1AgentChatActivityDiagramEdge.md +32 -0
- eval_studio_client/api/docs/V1AgentChatActivityDiagramNode.md +32 -0
- eval_studio_client/api/docs/V1AgentChatActivityDiagramRow.md +30 -0
- eval_studio_client/api/docs/V1AgentChatScriptUsage.md +33 -0
- eval_studio_client/api/docs/V1AgentChatScriptsBarChart.md +30 -0
- eval_studio_client/api/docs/V1AgentChatToolUsage.md +33 -0
- eval_studio_client/api/docs/V1AgentChatToolsBarChart.md +30 -0
- eval_studio_client/api/docs/V1AllMetricScores.md +29 -0
- eval_studio_client/api/docs/V1AppendTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheRequest.md +31 -0
- eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchMarkOperationSeenByCreatorResponse.md +29 -0
- eval_studio_client/api/docs/V1CmpLeaderboardReportsRequest.md +33 -0
- eval_studio_client/api/docs/V1CmpLeaderboardReportsResponse.md +29 -0
- eval_studio_client/api/docs/V1ComparisonItem.md +36 -0
- eval_studio_client/api/docs/V1ComparisonMetricScore.md +30 -0
- eval_studio_client/api/docs/V1ComparisonResult.md +31 -0
- eval_studio_client/api/docs/V1ComparisonSummary.md +31 -0
- eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1CreateTestFromTestCasesRequest.md +32 -0
- eval_studio_client/api/docs/V1CreateTestFromTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1DashboardReport.md +31 -0
- eval_studio_client/api/docs/V1DashboardReportResult.md +39 -0
- eval_studio_client/api/docs/V1DashboardTestCaseAnnotation.md +36 -0
- eval_studio_client/api/docs/V1DataFragment.md +31 -0
- eval_studio_client/api/docs/V1DeepCompareLeaderboardsRequest.md +33 -0
- eval_studio_client/api/docs/V1DeepCompareLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1DiffItem.md +36 -0
- eval_studio_client/api/docs/V1EvaluationType.md +12 -0
- eval_studio_client/api/docs/V1FlippedMetric.md +31 -0
- eval_studio_client/api/docs/V1GetDashboardReportResponse.md +29 -0
- eval_studio_client/api/docs/V1HumanDecision.md +12 -0
- eval_studio_client/api/docs/V1Info.md +1 -0
- eval_studio_client/api/docs/V1Leaderboard.md +1 -0
- eval_studio_client/api/docs/V1LeaderboardCmpReport.md +30 -0
- eval_studio_client/api/docs/V1LeaderboardComparisonItem.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardInfo.md +30 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +6 -3
- eval_studio_client/api/docs/V1LeaderboardReportResult.md +11 -8
- eval_studio_client/api/docs/V1LeaderboardReportResultView.md +12 -0
- eval_studio_client/api/docs/V1LeaderboardTestCaseAnnotation.md +36 -0
- eval_studio_client/api/docs/V1ListDashboardTestCaseAnnotationsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListLeaderboardTestCaseAnnotationsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListOperationsResponse.md +1 -0
- eval_studio_client/api/docs/V1ListUnseenOperationsResponse.md +30 -0
- eval_studio_client/api/docs/V1MarkOperationSeenByCreatorResponse.md +29 -0
- eval_studio_client/api/docs/V1Metric.md +30 -0
- eval_studio_client/api/docs/V1MetricAverage.md +36 -0
- eval_studio_client/api/docs/V1MetricMeta.md +40 -0
- eval_studio_client/api/docs/V1MetricScore.md +1 -1
- eval_studio_client/api/docs/V1MetricScores.md +1 -1
- eval_studio_client/api/docs/V1ModelType.md +1 -1
- eval_studio_client/api/docs/V1ModelsComparisons.md +32 -0
- eval_studio_client/api/docs/V1ModelsComparisonsMetrics.md +33 -0
- eval_studio_client/api/docs/V1ModelsOverview.md +34 -0
- eval_studio_client/api/docs/V1Operation.md +1 -0
- eval_studio_client/api/docs/V1OperationView.md +12 -0
- eval_studio_client/api/docs/V1RetrievedContextDiff.md +36 -0
- eval_studio_client/api/docs/V1Stats.md +2 -0
- eval_studio_client/api/docs/V1TechnicalMetrics.md +30 -0
- eval_studio_client/api/docs/V1TechnicalMetricsDetail.md +33 -0
- eval_studio_client/api/docs/V1TestCaseLeaderboardItem.md +31 -0
- eval_studio_client/api/docs/V1TestCaseRelationshipInfo.md +31 -0
- eval_studio_client/api/docs/V1TestCaseResult.md +48 -0
- eval_studio_client/api/docs/V1TextSimilarityMetric.md +12 -0
- eval_studio_client/api/docs/V1UpdateDashboardTestCaseAnnotationResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateLeaderboardTestCaseAnnotationResponse.md +29 -0
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +5 -5
- eval_studio_client/api/docs/WorkflowServiceApi.md +5 -5
- eval_studio_client/api/models/__init__.py +62 -0
- eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +17 -2
- eval_studio_client/api/models/required_the_dashboard_test_case_annotation_to_update.py +108 -0
- eval_studio_client/api/models/required_the_leaderboard_test_case_annotation_to_update.py +108 -0
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +5 -2
- eval_studio_client/api/models/required_the_operation_to_finalize.py +6 -2
- eval_studio_client/api/models/required_the_operation_to_update.py +6 -2
- eval_studio_client/api/models/test_case_service_append_test_cases_request.py +89 -0
- eval_studio_client/api/models/v1_actual_output_meta.py +97 -0
- eval_studio_client/api/models/v1_actual_output_meta_diff.py +101 -0
- eval_studio_client/api/models/v1_agent_chat_activity_diagram.py +109 -0
- eval_studio_client/api/models/v1_agent_chat_activity_diagram_edge.py +97 -0
- eval_studio_client/api/models/v1_agent_chat_activity_diagram_node.py +97 -0
- eval_studio_client/api/models/v1_agent_chat_activity_diagram_row.py +97 -0
- eval_studio_client/api/models/v1_agent_chat_script_usage.py +101 -0
- eval_studio_client/api/models/v1_agent_chat_scripts_bar_chart.py +102 -0
- eval_studio_client/api/models/v1_agent_chat_tool_usage.py +101 -0
- eval_studio_client/api/models/v1_agent_chat_tools_bar_chart.py +102 -0
- eval_studio_client/api/models/v1_all_metric_scores.py +87 -0
- eval_studio_client/api/models/v1_append_test_cases_response.py +95 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_request.py +99 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_response.py +91 -0
- eval_studio_client/api/models/v1_batch_mark_operation_seen_by_creator_response.py +95 -0
- eval_studio_client/api/models/v1_cmp_leaderboard_reports_request.py +96 -0
- eval_studio_client/api/models/v1_cmp_leaderboard_reports_response.py +91 -0
- eval_studio_client/api/models/v1_comparison_item.py +130 -0
- eval_studio_client/api/models/v1_comparison_metric_score.py +89 -0
- eval_studio_client/api/models/v1_comparison_result.py +120 -0
- eval_studio_client/api/models/v1_comparison_summary.py +91 -0
- eval_studio_client/api/models/v1_create_evaluation_request.py +5 -2
- eval_studio_client/api/models/v1_create_test_from_test_cases_request.py +93 -0
- eval_studio_client/api/models/v1_create_test_from_test_cases_response.py +91 -0
- eval_studio_client/api/models/v1_dashboard_report.py +109 -0
- eval_studio_client/api/models/v1_dashboard_report_result.py +139 -0
- eval_studio_client/api/models/v1_dashboard_test_case_annotation.py +112 -0
- eval_studio_client/api/models/v1_data_fragment.py +91 -0
- eval_studio_client/api/models/v1_deep_compare_leaderboards_request.py +96 -0
- eval_studio_client/api/models/v1_deep_compare_leaderboards_response.py +91 -0
- eval_studio_client/api/models/v1_diff_item.py +137 -0
- eval_studio_client/api/models/v1_evaluation_type.py +39 -0
- eval_studio_client/api/models/v1_flipped_metric.py +91 -0
- eval_studio_client/api/models/v1_get_dashboard_report_response.py +91 -0
- eval_studio_client/api/models/v1_human_decision.py +38 -0
- eval_studio_client/api/models/v1_info.py +4 -2
- eval_studio_client/api/models/v1_leaderboard.py +5 -2
- eval_studio_client/api/models/v1_leaderboard_cmp_report.py +93 -0
- eval_studio_client/api/models/v1_leaderboard_comparison_item.py +91 -0
- eval_studio_client/api/models/v1_leaderboard_info.py +97 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +23 -9
- eval_studio_client/api/models/v1_leaderboard_report_result.py +21 -10
- eval_studio_client/api/models/v1_leaderboard_report_result_view.py +38 -0
- eval_studio_client/api/models/v1_leaderboard_test_case_annotation.py +112 -0
- eval_studio_client/api/models/v1_list_dashboard_test_case_annotations_response.py +95 -0
- eval_studio_client/api/models/v1_list_leaderboard_test_case_annotations_response.py +95 -0
- eval_studio_client/api/models/v1_list_operations_response.py +5 -3
- eval_studio_client/api/models/v1_list_unseen_operations_response.py +97 -0
- eval_studio_client/api/models/v1_mark_operation_seen_by_creator_response.py +91 -0
- eval_studio_client/api/models/v1_metric.py +89 -0
- eval_studio_client/api/models/v1_metric_average.py +101 -0
- eval_studio_client/api/models/v1_metric_meta.py +109 -0
- eval_studio_client/api/models/v1_metric_score.py +6 -1
- eval_studio_client/api/models/v1_metric_scores.py +1 -1
- eval_studio_client/api/models/v1_model_type.py +2 -1
- eval_studio_client/api/models/v1_models_comparisons.py +93 -0
- eval_studio_client/api/models/v1_models_comparisons_metrics.py +103 -0
- eval_studio_client/api/models/v1_models_overview.py +97 -0
- eval_studio_client/api/models/v1_operation.py +6 -2
- eval_studio_client/api/models/v1_operation_view.py +38 -0
- eval_studio_client/api/models/v1_retrieved_context_diff.py +101 -0
- eval_studio_client/api/models/v1_stats.py +16 -2
- eval_studio_client/api/models/v1_technical_metrics.py +96 -0
- eval_studio_client/api/models/v1_technical_metrics_detail.py +95 -0
- eval_studio_client/api/models/v1_test_case_leaderboard_item.py +91 -0
- eval_studio_client/api/models/v1_test_case_relationship_info.py +91 -0
- eval_studio_client/api/models/v1_test_case_result.py +157 -0
- eval_studio_client/api/models/v1_text_similarity_metric.py +39 -0
- eval_studio_client/api/models/v1_update_dashboard_test_case_annotation_response.py +91 -0
- eval_studio_client/api/models/v1_update_leaderboard_test_case_annotation_response.py +91 -0
- eval_studio_client/api/models/v1_workflow_node_type.py +1 -0
- eval_studio_client/api/models/v1_workflow_type.py +1 -0
- eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +6 -0
- eval_studio_client/api/test/test_dashboard_report_service_api.py +37 -0
- eval_studio_client/api/test/test_dashboard_test_case_annotation_service_api.py +43 -0
- eval_studio_client/api/test/test_leaderboard_report_service_api.py +6 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +12 -0
- eval_studio_client/api/test/test_leaderboard_test_case_annotation_service_api.py +43 -0
- eval_studio_client/api/test/test_operation_service_api.py +18 -0
- eval_studio_client/api/test/test_required_the_dashboard_test_case_annotation_to_update.py +57 -0
- eval_studio_client/api/test/test_required_the_leaderboard_test_case_annotation_to_update.py +57 -0
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +2 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +2 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +2 -1
- eval_studio_client/api/test/test_test_case_service_api.py +6 -0
- eval_studio_client/api/test/test_test_case_service_append_test_cases_request.py +52 -0
- eval_studio_client/api/test/test_test_service_api.py +6 -0
- eval_studio_client/api/test/test_v1_abort_operation_response.py +2 -1
- eval_studio_client/api/test/test_v1_actual_output_meta.py +61 -0
- eval_studio_client/api/test/test_v1_actual_output_meta_diff.py +66 -0
- eval_studio_client/api/test/test_v1_agent_chat_activity_diagram.py +65 -0
- eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_edge.py +53 -0
- eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_node.py +53 -0
- eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_row.py +56 -0
- eval_studio_client/api/test/test_v1_agent_chat_script_usage.py +54 -0
- eval_studio_client/api/test/test_v1_agent_chat_scripts_bar_chart.py +57 -0
- eval_studio_client/api/test/test_v1_agent_chat_tool_usage.py +54 -0
- eval_studio_client/api/test/test_v1_agent_chat_tools_bar_chart.py +57 -0
- eval_studio_client/api/test/test_v1_all_metric_scores.py +53 -0
- eval_studio_client/api/test/test_v1_append_test_cases_response.py +74 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +2 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_request.py +120 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_response.py +72 -0
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_get_operations_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_mark_operation_seen_by_creator_response.py +74 -0
- eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_request.py +55 -0
- eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_response.py +255 -0
- eval_studio_client/api/test/test_v1_comparison_item.py +233 -0
- eval_studio_client/api/test/test_v1_comparison_metric_score.py +52 -0
- eval_studio_client/api/test/test_v1_comparison_result.py +258 -0
- eval_studio_client/api/test/test_v1_comparison_summary.py +53 -0
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +2 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_request.py +2 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +2 -1
- eval_studio_client/api/test/test_v1_create_test_from_test_cases_request.py +54 -0
- eval_studio_client/api/test/test_v1_create_test_from_test_cases_response.py +68 -0
- eval_studio_client/api/test/test_v1_dashboard_report.py +142 -0
- eval_studio_client/api/test/test_v1_dashboard_report_result.py +72 -0
- eval_studio_client/api/test/test_v1_dashboard_test_case_annotation.py +58 -0
- eval_studio_client/api/test/test_v1_data_fragment.py +57 -0
- eval_studio_client/api/test/test_v1_deep_compare_leaderboards_request.py +55 -0
- eval_studio_client/api/test/test_v1_deep_compare_leaderboards_response.py +255 -0
- eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_diff_item.py +226 -0
- eval_studio_client/api/test/test_v1_evaluation_type.py +33 -0
- eval_studio_client/api/test/test_v1_finalize_operation_response.py +2 -1
- eval_studio_client/api/test/test_v1_flipped_metric.py +53 -0
- eval_studio_client/api/test/test_v1_generate_test_cases_response.py +2 -1
- eval_studio_client/api/test/test_v1_get_dashboard_report_response.py +143 -0
- eval_studio_client/api/test/test_v1_get_info_response.py +4 -1
- eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +39 -2
- eval_studio_client/api/test/test_v1_get_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_get_operation_response.py +2 -1
- eval_studio_client/api/test/test_v1_get_stats_response.py +3 -1
- eval_studio_client/api/test/test_v1_human_decision.py +33 -0
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +2 -1
- eval_studio_client/api/test/test_v1_info.py +4 -1
- eval_studio_client/api/test/test_v1_leaderboard.py +2 -1
- eval_studio_client/api/test/test_v1_leaderboard_cmp_report.py +254 -0
- eval_studio_client/api/test/test_v1_leaderboard_comparison_item.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard_info.py +57 -0
- eval_studio_client/api/test/test_v1_leaderboard_report.py +39 -2
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +33 -1
- eval_studio_client/api/test/test_v1_leaderboard_report_result.py +39 -2
- eval_studio_client/api/test/test_v1_leaderboard_report_result_view.py +33 -0
- eval_studio_client/api/test/test_v1_leaderboard_test_case_annotation.py +58 -0
- eval_studio_client/api/test/test_v1_list_dashboard_test_case_annotations_response.py +61 -0
- eval_studio_client/api/test/test_v1_list_leaderboard_test_case_annotations_response.py +61 -0
- eval_studio_client/api/test/test_v1_list_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_list_operations_response.py +4 -2
- eval_studio_client/api/test/test_v1_list_unseen_operations_response.py +75 -0
- eval_studio_client/api/test/test_v1_mark_operation_seen_by_creator_response.py +72 -0
- eval_studio_client/api/test/test_v1_metric.py +52 -0
- eval_studio_client/api/test/test_v1_metric_average.py +58 -0
- eval_studio_client/api/test/test_v1_metric_meta.py +66 -0
- eval_studio_client/api/test/test_v1_models_comparisons.py +54 -0
- eval_studio_client/api/test/test_v1_models_comparisons_metrics.py +65 -0
- eval_studio_client/api/test/test_v1_models_overview.py +60 -0
- eval_studio_client/api/test/test_v1_operation.py +2 -1
- eval_studio_client/api/test/test_v1_operation_view.py +33 -0
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +2 -1
- eval_studio_client/api/test/test_v1_retrieved_context_diff.py +66 -0
- eval_studio_client/api/test/test_v1_stats.py +3 -1
- eval_studio_client/api/test/test_v1_technical_metrics.py +62 -0
- eval_studio_client/api/test/test_v1_technical_metrics_detail.py +55 -0
- eval_studio_client/api/test/test_v1_test_case_leaderboard_item.py +53 -0
- eval_studio_client/api/test/test_v1_test_case_relationship_info.py +53 -0
- eval_studio_client/api/test/test_v1_test_case_result.py +106 -0
- eval_studio_client/api/test/test_v1_text_similarity_metric.py +33 -0
- eval_studio_client/api/test/test_v1_update_dashboard_test_case_annotation_response.py +59 -0
- eval_studio_client/api/test/test_v1_update_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_update_leaderboard_test_case_annotation_response.py +59 -0
- eval_studio_client/api/test/test_v1_update_operation_response.py +2 -1
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +2340 -210
- eval_studio_client/models.py +18 -6
- {eval_studio_client-1.2.4a2.dist-info → eval_studio_client-1.3.0.dist-info}/METADATA +2 -2
- {eval_studio_client-1.2.4a2.dist-info → eval_studio_client-1.3.0.dist-info}/RECORD +306 -111
- {eval_studio_client-1.2.4a2.dist-info → eval_studio_client-1.3.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_list_leaderboard_test_case_annotations_response import V1ListLeaderboardTestCaseAnnotationsResponse
|
|
18
|
+
|
|
19
|
+
class TestV1ListLeaderboardTestCaseAnnotationsResponse(unittest.TestCase):
|
|
20
|
+
"""V1ListLeaderboardTestCaseAnnotationsResponse unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1ListLeaderboardTestCaseAnnotationsResponse:
|
|
29
|
+
"""Test V1ListLeaderboardTestCaseAnnotationsResponse
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1ListLeaderboardTestCaseAnnotationsResponse`
|
|
34
|
+
"""
|
|
35
|
+
model = V1ListLeaderboardTestCaseAnnotationsResponse()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1ListLeaderboardTestCaseAnnotationsResponse(
|
|
38
|
+
leaderboard_test_case_annotations = [
|
|
39
|
+
eval_studio_client.api.models.v1_leaderboard_test_case_annotation.v1LeaderboardTestCaseAnnotation(
|
|
40
|
+
name = '',
|
|
41
|
+
create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
42
|
+
creator = '',
|
|
43
|
+
update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
44
|
+
updater = '',
|
|
45
|
+
parent = '',
|
|
46
|
+
key = '',
|
|
47
|
+
value = eval_studio_client.api.models.value.value(), )
|
|
48
|
+
]
|
|
49
|
+
)
|
|
50
|
+
else:
|
|
51
|
+
return V1ListLeaderboardTestCaseAnnotationsResponse(
|
|
52
|
+
)
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def testV1ListLeaderboardTestCaseAnnotationsResponse(self):
|
|
56
|
+
"""Test V1ListLeaderboardTestCaseAnnotationsResponse"""
|
|
57
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
58
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
59
|
+
|
|
60
|
+
if __name__ == '__main__':
|
|
61
|
+
unittest.main()
|
|
@@ -99,7 +99,8 @@ class TestV1ListLeaderboardsResponse(unittest.TestCase):
|
|
|
99
99
|
h2ogpte_collection = '',
|
|
100
100
|
type = 'LEADERBOARD_TYPE_UNSPECIFIED',
|
|
101
101
|
demo = True,
|
|
102
|
-
test_lab = '',
|
|
102
|
+
test_lab = '',
|
|
103
|
+
evaluation_type = 'EVALUATION_TYPE_UNSPECIFIED', )
|
|
103
104
|
],
|
|
104
105
|
next_page_token = ''
|
|
105
106
|
)
|
|
@@ -99,7 +99,8 @@ class TestV1ListMostRecentLeaderboardsResponse(unittest.TestCase):
|
|
|
99
99
|
h2ogpte_collection = '',
|
|
100
100
|
type = 'LEADERBOARD_TYPE_UNSPECIFIED',
|
|
101
101
|
demo = True,
|
|
102
|
-
test_lab = '',
|
|
102
|
+
test_lab = '',
|
|
103
|
+
evaluation_type = 'EVALUATION_TYPE_UNSPECIFIED', )
|
|
103
104
|
]
|
|
104
105
|
)
|
|
105
106
|
else:
|
|
@@ -56,8 +56,10 @@ class TestV1ListOperationsResponse(unittest.TestCase):
|
|
|
56
56
|
'key' : None
|
|
57
57
|
}
|
|
58
58
|
], ),
|
|
59
|
-
response = ,
|
|
60
|
-
|
|
59
|
+
response = ,
|
|
60
|
+
seen_by_creator_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
|
|
61
|
+
],
|
|
62
|
+
total_size = 56
|
|
61
63
|
)
|
|
62
64
|
else:
|
|
63
65
|
return V1ListOperationsResponse(
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_list_unseen_operations_response import V1ListUnseenOperationsResponse
|
|
18
|
+
|
|
19
|
+
class TestV1ListUnseenOperationsResponse(unittest.TestCase):
|
|
20
|
+
"""V1ListUnseenOperationsResponse unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1ListUnseenOperationsResponse:
|
|
29
|
+
"""Test V1ListUnseenOperationsResponse
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1ListUnseenOperationsResponse`
|
|
34
|
+
"""
|
|
35
|
+
model = V1ListUnseenOperationsResponse()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1ListUnseenOperationsResponse(
|
|
38
|
+
operations = [
|
|
39
|
+
eval_studio_client.api.models.v1_operation.v1Operation(
|
|
40
|
+
name = '',
|
|
41
|
+
create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
42
|
+
creator = '',
|
|
43
|
+
update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
44
|
+
updater = '',
|
|
45
|
+
delete_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
46
|
+
deleter = '',
|
|
47
|
+
metadata = {
|
|
48
|
+
'key' : None
|
|
49
|
+
},
|
|
50
|
+
done = True,
|
|
51
|
+
error = eval_studio_client.api.models.rpc_status.rpcStatus(
|
|
52
|
+
code = 56,
|
|
53
|
+
message = '',
|
|
54
|
+
details = [
|
|
55
|
+
{
|
|
56
|
+
'key' : None
|
|
57
|
+
}
|
|
58
|
+
], ),
|
|
59
|
+
response = ,
|
|
60
|
+
seen_by_creator_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
|
|
61
|
+
],
|
|
62
|
+
total_size = 56
|
|
63
|
+
)
|
|
64
|
+
else:
|
|
65
|
+
return V1ListUnseenOperationsResponse(
|
|
66
|
+
)
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
def testV1ListUnseenOperationsResponse(self):
|
|
70
|
+
"""Test V1ListUnseenOperationsResponse"""
|
|
71
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
72
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
73
|
+
|
|
74
|
+
if __name__ == '__main__':
|
|
75
|
+
unittest.main()
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_mark_operation_seen_by_creator_response import V1MarkOperationSeenByCreatorResponse
|
|
18
|
+
|
|
19
|
+
class TestV1MarkOperationSeenByCreatorResponse(unittest.TestCase):
|
|
20
|
+
"""V1MarkOperationSeenByCreatorResponse unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1MarkOperationSeenByCreatorResponse:
|
|
29
|
+
"""Test V1MarkOperationSeenByCreatorResponse
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1MarkOperationSeenByCreatorResponse`
|
|
34
|
+
"""
|
|
35
|
+
model = V1MarkOperationSeenByCreatorResponse()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1MarkOperationSeenByCreatorResponse(
|
|
38
|
+
operation = eval_studio_client.api.models.v1_operation.v1Operation(
|
|
39
|
+
name = '',
|
|
40
|
+
create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
41
|
+
creator = '',
|
|
42
|
+
update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
43
|
+
updater = '',
|
|
44
|
+
delete_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
|
|
45
|
+
deleter = '',
|
|
46
|
+
metadata = {
|
|
47
|
+
'key' : None
|
|
48
|
+
},
|
|
49
|
+
done = True,
|
|
50
|
+
error = eval_studio_client.api.models.rpc_status.rpcStatus(
|
|
51
|
+
code = 56,
|
|
52
|
+
message = '',
|
|
53
|
+
details = [
|
|
54
|
+
{
|
|
55
|
+
'key' : None
|
|
56
|
+
}
|
|
57
|
+
], ),
|
|
58
|
+
response = ,
|
|
59
|
+
seen_by_creator_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
|
|
60
|
+
)
|
|
61
|
+
else:
|
|
62
|
+
return V1MarkOperationSeenByCreatorResponse(
|
|
63
|
+
)
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def testV1MarkOperationSeenByCreatorResponse(self):
|
|
67
|
+
"""Test V1MarkOperationSeenByCreatorResponse"""
|
|
68
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
69
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
70
|
+
|
|
71
|
+
if __name__ == '__main__':
|
|
72
|
+
unittest.main()
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_metric import V1Metric
|
|
18
|
+
|
|
19
|
+
class TestV1Metric(unittest.TestCase):
|
|
20
|
+
"""V1Metric unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1Metric:
|
|
29
|
+
"""Test V1Metric
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1Metric`
|
|
34
|
+
"""
|
|
35
|
+
model = V1Metric()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1Metric(
|
|
38
|
+
key = '',
|
|
39
|
+
value = 1.337
|
|
40
|
+
)
|
|
41
|
+
else:
|
|
42
|
+
return V1Metric(
|
|
43
|
+
)
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def testV1Metric(self):
|
|
47
|
+
"""Test V1Metric"""
|
|
48
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
49
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
50
|
+
|
|
51
|
+
if __name__ == '__main__':
|
|
52
|
+
unittest.main()
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_metric_average import V1MetricAverage
|
|
18
|
+
|
|
19
|
+
class TestV1MetricAverage(unittest.TestCase):
|
|
20
|
+
"""V1MetricAverage unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1MetricAverage:
|
|
29
|
+
"""Test V1MetricAverage
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1MetricAverage`
|
|
34
|
+
"""
|
|
35
|
+
model = V1MetricAverage()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1MetricAverage(
|
|
38
|
+
metric_key = '',
|
|
39
|
+
baseline_avg = 1.337,
|
|
40
|
+
current_avg = 1.337,
|
|
41
|
+
diff = 1.337,
|
|
42
|
+
baseline_better_wins = 56,
|
|
43
|
+
current_better_wins = 56,
|
|
44
|
+
baseline_rank_avg = 1.337,
|
|
45
|
+
current_rank_avg = 1.337
|
|
46
|
+
)
|
|
47
|
+
else:
|
|
48
|
+
return V1MetricAverage(
|
|
49
|
+
)
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def testV1MetricAverage(self):
|
|
53
|
+
"""Test V1MetricAverage"""
|
|
54
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
55
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
56
|
+
|
|
57
|
+
if __name__ == '__main__':
|
|
58
|
+
unittest.main()
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_metric_meta import V1MetricMeta
|
|
18
|
+
|
|
19
|
+
class TestV1MetricMeta(unittest.TestCase):
|
|
20
|
+
"""V1MetricMeta unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1MetricMeta:
|
|
29
|
+
"""Test V1MetricMeta
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1MetricMeta`
|
|
34
|
+
"""
|
|
35
|
+
model = V1MetricMeta()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1MetricMeta(
|
|
38
|
+
key = '',
|
|
39
|
+
display_name = '',
|
|
40
|
+
data_type = '',
|
|
41
|
+
display_value = '',
|
|
42
|
+
description = '',
|
|
43
|
+
value_range = [
|
|
44
|
+
1.337
|
|
45
|
+
],
|
|
46
|
+
value_enum = [
|
|
47
|
+
''
|
|
48
|
+
],
|
|
49
|
+
higher_is_better = True,
|
|
50
|
+
threshold = 1.337,
|
|
51
|
+
is_primary_metric = True,
|
|
52
|
+
parent_metric = '',
|
|
53
|
+
exclude = True
|
|
54
|
+
)
|
|
55
|
+
else:
|
|
56
|
+
return V1MetricMeta(
|
|
57
|
+
)
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
def testV1MetricMeta(self):
|
|
61
|
+
"""Test V1MetricMeta"""
|
|
62
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
63
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
64
|
+
|
|
65
|
+
if __name__ == '__main__':
|
|
66
|
+
unittest.main()
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_models_comparisons import V1ModelsComparisons
|
|
18
|
+
|
|
19
|
+
class TestV1ModelsComparisons(unittest.TestCase):
|
|
20
|
+
"""V1ModelsComparisons unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1ModelsComparisons:
|
|
29
|
+
"""Test V1ModelsComparisons
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1ModelsComparisons`
|
|
34
|
+
"""
|
|
35
|
+
model = V1ModelsComparisons()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1ModelsComparisons(
|
|
38
|
+
test_case_ranks_baseline = 56,
|
|
39
|
+
test_case_ranks_current = 56,
|
|
40
|
+
test_case_wins_baseline = 56,
|
|
41
|
+
test_case_wins_current = 56
|
|
42
|
+
)
|
|
43
|
+
else:
|
|
44
|
+
return V1ModelsComparisons(
|
|
45
|
+
)
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def testV1ModelsComparisons(self):
|
|
49
|
+
"""Test V1ModelsComparisons"""
|
|
50
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
51
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
52
|
+
|
|
53
|
+
if __name__ == '__main__':
|
|
54
|
+
unittest.main()
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_models_comparisons_metrics import V1ModelsComparisonsMetrics
|
|
18
|
+
|
|
19
|
+
class TestV1ModelsComparisonsMetrics(unittest.TestCase):
|
|
20
|
+
"""V1ModelsComparisonsMetrics unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1ModelsComparisonsMetrics:
|
|
29
|
+
"""Test V1ModelsComparisonsMetrics
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1ModelsComparisonsMetrics`
|
|
34
|
+
"""
|
|
35
|
+
model = V1ModelsComparisonsMetrics()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1ModelsComparisonsMetrics(
|
|
38
|
+
metrics_ranks_baseline = 1.337,
|
|
39
|
+
metrics_ranks_current = 1.337,
|
|
40
|
+
metrics_wins_baseline = 56,
|
|
41
|
+
metrics_wins_current = 56,
|
|
42
|
+
metrics_averages = [
|
|
43
|
+
eval_studio_client.api.models.metric_average_comparison.Metric average comparison(
|
|
44
|
+
metric_key = '',
|
|
45
|
+
baseline_avg = 1.337,
|
|
46
|
+
current_avg = 1.337,
|
|
47
|
+
diff = 1.337,
|
|
48
|
+
baseline_better_wins = 56,
|
|
49
|
+
current_better_wins = 56,
|
|
50
|
+
baseline_rank_avg = 1.337,
|
|
51
|
+
current_rank_avg = 1.337, )
|
|
52
|
+
]
|
|
53
|
+
)
|
|
54
|
+
else:
|
|
55
|
+
return V1ModelsComparisonsMetrics(
|
|
56
|
+
)
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def testV1ModelsComparisonsMetrics(self):
|
|
60
|
+
"""Test V1ModelsComparisonsMetrics"""
|
|
61
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
62
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
63
|
+
|
|
64
|
+
if __name__ == '__main__':
|
|
65
|
+
unittest.main()
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_models_overview import V1ModelsOverview
|
|
18
|
+
|
|
19
|
+
class TestV1ModelsOverview(unittest.TestCase):
|
|
20
|
+
"""V1ModelsOverview unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1ModelsOverview:
|
|
29
|
+
"""Test V1ModelsOverview
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1ModelsOverview`
|
|
34
|
+
"""
|
|
35
|
+
model = V1ModelsOverview()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1ModelsOverview(
|
|
38
|
+
baseline_model_key = '',
|
|
39
|
+
current_model_key = '',
|
|
40
|
+
baseline_model_name = '',
|
|
41
|
+
baseline_collection_id = [
|
|
42
|
+
''
|
|
43
|
+
],
|
|
44
|
+
current_model_name = '',
|
|
45
|
+
current_collection_id = [
|
|
46
|
+
''
|
|
47
|
+
]
|
|
48
|
+
)
|
|
49
|
+
else:
|
|
50
|
+
return V1ModelsOverview(
|
|
51
|
+
)
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def testV1ModelsOverview(self):
|
|
55
|
+
"""Test V1ModelsOverview"""
|
|
56
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
57
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
58
|
+
|
|
59
|
+
if __name__ == '__main__':
|
|
60
|
+
unittest.main()
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_operation_view import V1OperationView
|
|
18
|
+
|
|
19
|
+
class TestV1OperationView(unittest.TestCase):
|
|
20
|
+
"""V1OperationView unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def testV1OperationView(self):
|
|
29
|
+
"""Test V1OperationView"""
|
|
30
|
+
# inst = V1OperationView()
|
|
31
|
+
|
|
32
|
+
if __name__ == '__main__':
|
|
33
|
+
unittest.main()
|
|
@@ -55,7 +55,8 @@ class TestV1ProcessWorkflowNodeResponse(unittest.TestCase):
|
|
|
55
55
|
'key' : None
|
|
56
56
|
}
|
|
57
57
|
], ),
|
|
58
|
-
response = ,
|
|
58
|
+
response = ,
|
|
59
|
+
seen_by_creator_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
|
|
59
60
|
)
|
|
60
61
|
else:
|
|
61
62
|
return V1ProcessWorkflowNodeResponse(
|