eval-studio-client 1.2.5__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +65 -0
- eval_studio_client/api/api/__init__.py +3 -0
- eval_studio_client/api/api/dashboard_report_service_api.py +292 -0
- eval_studio_client/api/api/dashboard_service_api.py +16 -16
- eval_studio_client/api/api/dashboard_test_case_annotation_service_api.py +611 -0
- eval_studio_client/api/api/document_service_api.py +16 -16
- eval_studio_client/api/api/evaluation_service_api.py +12 -12
- eval_studio_client/api/api/evaluator_service_api.py +16 -16
- eval_studio_client/api/api/leaderboard_report_service_api.py +304 -17
- eval_studio_client/api/api/leaderboard_service_api.py +554 -16
- eval_studio_client/api/api/leaderboard_test_case_annotation_service_api.py +611 -0
- eval_studio_client/api/api/model_service_api.py +16 -16
- eval_studio_client/api/api/operation_service_api.py +821 -17
- eval_studio_client/api/api/perturbator_service_api.py +22 -22
- eval_studio_client/api/api/test_case_service_api.py +300 -16
- eval_studio_client/api/api/test_class_service_api.py +16 -16
- eval_studio_client/api/api/test_service_api.py +285 -16
- eval_studio_client/api/api/workflow_node_service_api.py +16 -16
- eval_studio_client/api/api/workflow_service_api.py +16 -16
- eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +2 -1
- eval_studio_client/api/docs/DashboardReportServiceApi.md +75 -0
- eval_studio_client/api/docs/DashboardServiceApi.md +5 -5
- eval_studio_client/api/docs/DashboardTestCaseAnnotationServiceApi.md +149 -0
- eval_studio_client/api/docs/DocumentServiceApi.md +5 -5
- eval_studio_client/api/docs/EvaluationServiceApi.md +4 -4
- eval_studio_client/api/docs/EvaluatorServiceApi.md +5 -5
- eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -5
- eval_studio_client/api/docs/LeaderboardServiceApi.md +141 -5
- eval_studio_client/api/docs/LeaderboardTestCaseAnnotationServiceApi.md +149 -0
- eval_studio_client/api/docs/ModelServiceApi.md +5 -5
- eval_studio_client/api/docs/OperationServiceApi.md +215 -8
- eval_studio_client/api/docs/PerturbatorServiceApi.md +7 -7
- eval_studio_client/api/docs/RequiredTheDashboardTestCaseAnnotationToUpdate.md +35 -0
- eval_studio_client/api/docs/RequiredTheLeaderboardTestCaseAnnotationToUpdate.md +35 -0
- eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +1 -0
- eval_studio_client/api/docs/RequiredTheOperationToFinalize.md +1 -0
- eval_studio_client/api/docs/RequiredTheOperationToUpdate.md +1 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +75 -5
- eval_studio_client/api/docs/TestCaseServiceAppendTestCasesRequest.md +30 -0
- eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
- eval_studio_client/api/docs/TestServiceApi.md +73 -5
- eval_studio_client/api/docs/V1ActualOutputMeta.md +30 -0
- eval_studio_client/api/docs/V1ActualOutputMetaDiff.md +36 -0
- eval_studio_client/api/docs/V1AgentChatActivityDiagram.md +31 -0
- eval_studio_client/api/docs/V1AgentChatActivityDiagramEdge.md +32 -0
- eval_studio_client/api/docs/V1AgentChatActivityDiagramNode.md +32 -0
- eval_studio_client/api/docs/V1AgentChatActivityDiagramRow.md +30 -0
- eval_studio_client/api/docs/V1AgentChatScriptUsage.md +33 -0
- eval_studio_client/api/docs/V1AgentChatScriptsBarChart.md +30 -0
- eval_studio_client/api/docs/V1AgentChatToolUsage.md +33 -0
- eval_studio_client/api/docs/V1AgentChatToolsBarChart.md +30 -0
- eval_studio_client/api/docs/V1AllMetricScores.md +29 -0
- eval_studio_client/api/docs/V1AppendTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheRequest.md +31 -0
- eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchMarkOperationSeenByCreatorResponse.md +29 -0
- eval_studio_client/api/docs/V1CmpLeaderboardReportsRequest.md +33 -0
- eval_studio_client/api/docs/V1CmpLeaderboardReportsResponse.md +29 -0
- eval_studio_client/api/docs/V1ComparisonItem.md +36 -0
- eval_studio_client/api/docs/V1ComparisonMetricScore.md +30 -0
- eval_studio_client/api/docs/V1ComparisonResult.md +31 -0
- eval_studio_client/api/docs/V1ComparisonSummary.md +31 -0
- eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1CreateTestFromTestCasesRequest.md +32 -0
- eval_studio_client/api/docs/V1CreateTestFromTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1DashboardReport.md +31 -0
- eval_studio_client/api/docs/V1DashboardReportResult.md +39 -0
- eval_studio_client/api/docs/V1DashboardTestCaseAnnotation.md +36 -0
- eval_studio_client/api/docs/V1DataFragment.md +31 -0
- eval_studio_client/api/docs/V1DeepCompareLeaderboardsRequest.md +33 -0
- eval_studio_client/api/docs/V1DeepCompareLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1DiffItem.md +36 -0
- eval_studio_client/api/docs/V1EvaluationType.md +12 -0
- eval_studio_client/api/docs/V1FlippedMetric.md +31 -0
- eval_studio_client/api/docs/V1GetDashboardReportResponse.md +29 -0
- eval_studio_client/api/docs/V1HumanDecision.md +12 -0
- eval_studio_client/api/docs/V1Info.md +1 -0
- eval_studio_client/api/docs/V1Leaderboard.md +1 -0
- eval_studio_client/api/docs/V1LeaderboardCmpReport.md +30 -0
- eval_studio_client/api/docs/V1LeaderboardComparisonItem.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardInfo.md +30 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +6 -3
- eval_studio_client/api/docs/V1LeaderboardReportResult.md +11 -8
- eval_studio_client/api/docs/V1LeaderboardReportResultView.md +12 -0
- eval_studio_client/api/docs/V1LeaderboardTestCaseAnnotation.md +36 -0
- eval_studio_client/api/docs/V1ListDashboardTestCaseAnnotationsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListLeaderboardTestCaseAnnotationsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListOperationsResponse.md +1 -0
- eval_studio_client/api/docs/V1ListUnseenOperationsResponse.md +30 -0
- eval_studio_client/api/docs/V1MarkOperationSeenByCreatorResponse.md +29 -0
- eval_studio_client/api/docs/V1Metric.md +30 -0
- eval_studio_client/api/docs/V1MetricAverage.md +36 -0
- eval_studio_client/api/docs/V1MetricMeta.md +40 -0
- eval_studio_client/api/docs/V1MetricScore.md +1 -1
- eval_studio_client/api/docs/V1MetricScores.md +1 -1
- eval_studio_client/api/docs/V1ModelType.md +1 -1
- eval_studio_client/api/docs/V1ModelsComparisons.md +32 -0
- eval_studio_client/api/docs/V1ModelsComparisonsMetrics.md +33 -0
- eval_studio_client/api/docs/V1ModelsOverview.md +34 -0
- eval_studio_client/api/docs/V1Operation.md +1 -0
- eval_studio_client/api/docs/V1OperationView.md +12 -0
- eval_studio_client/api/docs/V1RetrievedContextDiff.md +36 -0
- eval_studio_client/api/docs/V1Stats.md +2 -0
- eval_studio_client/api/docs/V1TechnicalMetrics.md +30 -0
- eval_studio_client/api/docs/V1TechnicalMetricsDetail.md +33 -0
- eval_studio_client/api/docs/V1TestCaseLeaderboardItem.md +31 -0
- eval_studio_client/api/docs/V1TestCaseRelationshipInfo.md +31 -0
- eval_studio_client/api/docs/V1TestCaseResult.md +48 -0
- eval_studio_client/api/docs/V1TextSimilarityMetric.md +12 -0
- eval_studio_client/api/docs/V1UpdateDashboardTestCaseAnnotationResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateLeaderboardTestCaseAnnotationResponse.md +29 -0
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +5 -5
- eval_studio_client/api/docs/WorkflowServiceApi.md +5 -5
- eval_studio_client/api/models/__init__.py +62 -0
- eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +17 -2
- eval_studio_client/api/models/required_the_dashboard_test_case_annotation_to_update.py +108 -0
- eval_studio_client/api/models/required_the_leaderboard_test_case_annotation_to_update.py +108 -0
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +5 -2
- eval_studio_client/api/models/required_the_operation_to_finalize.py +6 -2
- eval_studio_client/api/models/required_the_operation_to_update.py +6 -2
- eval_studio_client/api/models/test_case_service_append_test_cases_request.py +89 -0
- eval_studio_client/api/models/v1_actual_output_meta.py +97 -0
- eval_studio_client/api/models/v1_actual_output_meta_diff.py +101 -0
- eval_studio_client/api/models/v1_agent_chat_activity_diagram.py +109 -0
- eval_studio_client/api/models/v1_agent_chat_activity_diagram_edge.py +97 -0
- eval_studio_client/api/models/v1_agent_chat_activity_diagram_node.py +97 -0
- eval_studio_client/api/models/v1_agent_chat_activity_diagram_row.py +97 -0
- eval_studio_client/api/models/v1_agent_chat_script_usage.py +101 -0
- eval_studio_client/api/models/v1_agent_chat_scripts_bar_chart.py +102 -0
- eval_studio_client/api/models/v1_agent_chat_tool_usage.py +101 -0
- eval_studio_client/api/models/v1_agent_chat_tools_bar_chart.py +102 -0
- eval_studio_client/api/models/v1_all_metric_scores.py +87 -0
- eval_studio_client/api/models/v1_append_test_cases_response.py +95 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_request.py +99 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_response.py +91 -0
- eval_studio_client/api/models/v1_batch_mark_operation_seen_by_creator_response.py +95 -0
- eval_studio_client/api/models/v1_cmp_leaderboard_reports_request.py +96 -0
- eval_studio_client/api/models/v1_cmp_leaderboard_reports_response.py +91 -0
- eval_studio_client/api/models/v1_comparison_item.py +130 -0
- eval_studio_client/api/models/v1_comparison_metric_score.py +89 -0
- eval_studio_client/api/models/v1_comparison_result.py +120 -0
- eval_studio_client/api/models/v1_comparison_summary.py +91 -0
- eval_studio_client/api/models/v1_create_evaluation_request.py +5 -2
- eval_studio_client/api/models/v1_create_test_from_test_cases_request.py +93 -0
- eval_studio_client/api/models/v1_create_test_from_test_cases_response.py +91 -0
- eval_studio_client/api/models/v1_dashboard_report.py +109 -0
- eval_studio_client/api/models/v1_dashboard_report_result.py +139 -0
- eval_studio_client/api/models/v1_dashboard_test_case_annotation.py +112 -0
- eval_studio_client/api/models/v1_data_fragment.py +91 -0
- eval_studio_client/api/models/v1_deep_compare_leaderboards_request.py +96 -0
- eval_studio_client/api/models/v1_deep_compare_leaderboards_response.py +91 -0
- eval_studio_client/api/models/v1_diff_item.py +137 -0
- eval_studio_client/api/models/v1_evaluation_type.py +39 -0
- eval_studio_client/api/models/v1_flipped_metric.py +91 -0
- eval_studio_client/api/models/v1_get_dashboard_report_response.py +91 -0
- eval_studio_client/api/models/v1_human_decision.py +38 -0
- eval_studio_client/api/models/v1_info.py +4 -2
- eval_studio_client/api/models/v1_leaderboard.py +5 -2
- eval_studio_client/api/models/v1_leaderboard_cmp_report.py +93 -0
- eval_studio_client/api/models/v1_leaderboard_comparison_item.py +91 -0
- eval_studio_client/api/models/v1_leaderboard_info.py +97 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +23 -9
- eval_studio_client/api/models/v1_leaderboard_report_result.py +21 -10
- eval_studio_client/api/models/v1_leaderboard_report_result_view.py +38 -0
- eval_studio_client/api/models/v1_leaderboard_test_case_annotation.py +112 -0
- eval_studio_client/api/models/v1_list_dashboard_test_case_annotations_response.py +95 -0
- eval_studio_client/api/models/v1_list_leaderboard_test_case_annotations_response.py +95 -0
- eval_studio_client/api/models/v1_list_operations_response.py +5 -3
- eval_studio_client/api/models/v1_list_unseen_operations_response.py +97 -0
- eval_studio_client/api/models/v1_mark_operation_seen_by_creator_response.py +91 -0
- eval_studio_client/api/models/v1_metric.py +89 -0
- eval_studio_client/api/models/v1_metric_average.py +101 -0
- eval_studio_client/api/models/v1_metric_meta.py +109 -0
- eval_studio_client/api/models/v1_metric_score.py +6 -1
- eval_studio_client/api/models/v1_metric_scores.py +1 -1
- eval_studio_client/api/models/v1_model_type.py +2 -1
- eval_studio_client/api/models/v1_models_comparisons.py +93 -0
- eval_studio_client/api/models/v1_models_comparisons_metrics.py +103 -0
- eval_studio_client/api/models/v1_models_overview.py +97 -0
- eval_studio_client/api/models/v1_operation.py +6 -2
- eval_studio_client/api/models/v1_operation_view.py +38 -0
- eval_studio_client/api/models/v1_retrieved_context_diff.py +101 -0
- eval_studio_client/api/models/v1_stats.py +16 -2
- eval_studio_client/api/models/v1_technical_metrics.py +96 -0
- eval_studio_client/api/models/v1_technical_metrics_detail.py +95 -0
- eval_studio_client/api/models/v1_test_case_leaderboard_item.py +91 -0
- eval_studio_client/api/models/v1_test_case_relationship_info.py +91 -0
- eval_studio_client/api/models/v1_test_case_result.py +157 -0
- eval_studio_client/api/models/v1_text_similarity_metric.py +39 -0
- eval_studio_client/api/models/v1_update_dashboard_test_case_annotation_response.py +91 -0
- eval_studio_client/api/models/v1_update_leaderboard_test_case_annotation_response.py +91 -0
- eval_studio_client/api/models/v1_workflow_node_type.py +1 -0
- eval_studio_client/api/models/v1_workflow_type.py +1 -0
- eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +6 -0
- eval_studio_client/api/test/test_dashboard_report_service_api.py +37 -0
- eval_studio_client/api/test/test_dashboard_test_case_annotation_service_api.py +43 -0
- eval_studio_client/api/test/test_leaderboard_report_service_api.py +6 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +12 -0
- eval_studio_client/api/test/test_leaderboard_test_case_annotation_service_api.py +43 -0
- eval_studio_client/api/test/test_operation_service_api.py +18 -0
- eval_studio_client/api/test/test_required_the_dashboard_test_case_annotation_to_update.py +57 -0
- eval_studio_client/api/test/test_required_the_leaderboard_test_case_annotation_to_update.py +57 -0
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +2 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +2 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +2 -1
- eval_studio_client/api/test/test_test_case_service_api.py +6 -0
- eval_studio_client/api/test/test_test_case_service_append_test_cases_request.py +52 -0
- eval_studio_client/api/test/test_test_service_api.py +6 -0
- eval_studio_client/api/test/test_v1_abort_operation_response.py +2 -1
- eval_studio_client/api/test/test_v1_actual_output_meta.py +61 -0
- eval_studio_client/api/test/test_v1_actual_output_meta_diff.py +66 -0
- eval_studio_client/api/test/test_v1_agent_chat_activity_diagram.py +65 -0
- eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_edge.py +53 -0
- eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_node.py +53 -0
- eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_row.py +56 -0
- eval_studio_client/api/test/test_v1_agent_chat_script_usage.py +54 -0
- eval_studio_client/api/test/test_v1_agent_chat_scripts_bar_chart.py +57 -0
- eval_studio_client/api/test/test_v1_agent_chat_tool_usage.py +54 -0
- eval_studio_client/api/test/test_v1_agent_chat_tools_bar_chart.py +57 -0
- eval_studio_client/api/test/test_v1_all_metric_scores.py +53 -0
- eval_studio_client/api/test/test_v1_append_test_cases_response.py +74 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +2 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_request.py +120 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_response.py +72 -0
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_get_operations_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_batch_mark_operation_seen_by_creator_response.py +74 -0
- eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_request.py +55 -0
- eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_response.py +255 -0
- eval_studio_client/api/test/test_v1_comparison_item.py +233 -0
- eval_studio_client/api/test/test_v1_comparison_metric_score.py +52 -0
- eval_studio_client/api/test/test_v1_comparison_result.py +258 -0
- eval_studio_client/api/test/test_v1_comparison_summary.py +53 -0
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +2 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_request.py +2 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +2 -1
- eval_studio_client/api/test/test_v1_create_test_from_test_cases_request.py +54 -0
- eval_studio_client/api/test/test_v1_create_test_from_test_cases_response.py +68 -0
- eval_studio_client/api/test/test_v1_dashboard_report.py +142 -0
- eval_studio_client/api/test/test_v1_dashboard_report_result.py +72 -0
- eval_studio_client/api/test/test_v1_dashboard_test_case_annotation.py +58 -0
- eval_studio_client/api/test/test_v1_data_fragment.py +57 -0
- eval_studio_client/api/test/test_v1_deep_compare_leaderboards_request.py +55 -0
- eval_studio_client/api/test/test_v1_deep_compare_leaderboards_response.py +255 -0
- eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_diff_item.py +226 -0
- eval_studio_client/api/test/test_v1_evaluation_type.py +33 -0
- eval_studio_client/api/test/test_v1_finalize_operation_response.py +2 -1
- eval_studio_client/api/test/test_v1_flipped_metric.py +53 -0
- eval_studio_client/api/test/test_v1_generate_test_cases_response.py +2 -1
- eval_studio_client/api/test/test_v1_get_dashboard_report_response.py +143 -0
- eval_studio_client/api/test/test_v1_get_info_response.py +4 -1
- eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +39 -2
- eval_studio_client/api/test/test_v1_get_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_get_operation_response.py +2 -1
- eval_studio_client/api/test/test_v1_get_stats_response.py +3 -1
- eval_studio_client/api/test/test_v1_human_decision.py +33 -0
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +2 -1
- eval_studio_client/api/test/test_v1_info.py +4 -1
- eval_studio_client/api/test/test_v1_leaderboard.py +2 -1
- eval_studio_client/api/test/test_v1_leaderboard_cmp_report.py +254 -0
- eval_studio_client/api/test/test_v1_leaderboard_comparison_item.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard_info.py +57 -0
- eval_studio_client/api/test/test_v1_leaderboard_report.py +39 -2
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +33 -1
- eval_studio_client/api/test/test_v1_leaderboard_report_result.py +39 -2
- eval_studio_client/api/test/test_v1_leaderboard_report_result_view.py +33 -0
- eval_studio_client/api/test/test_v1_leaderboard_test_case_annotation.py +58 -0
- eval_studio_client/api/test/test_v1_list_dashboard_test_case_annotations_response.py +61 -0
- eval_studio_client/api/test/test_v1_list_leaderboard_test_case_annotations_response.py +61 -0
- eval_studio_client/api/test/test_v1_list_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +2 -1
- eval_studio_client/api/test/test_v1_list_operations_response.py +4 -2
- eval_studio_client/api/test/test_v1_list_unseen_operations_response.py +75 -0
- eval_studio_client/api/test/test_v1_mark_operation_seen_by_creator_response.py +72 -0
- eval_studio_client/api/test/test_v1_metric.py +52 -0
- eval_studio_client/api/test/test_v1_metric_average.py +58 -0
- eval_studio_client/api/test/test_v1_metric_meta.py +66 -0
- eval_studio_client/api/test/test_v1_models_comparisons.py +54 -0
- eval_studio_client/api/test/test_v1_models_comparisons_metrics.py +65 -0
- eval_studio_client/api/test/test_v1_models_overview.py +60 -0
- eval_studio_client/api/test/test_v1_operation.py +2 -1
- eval_studio_client/api/test/test_v1_operation_view.py +33 -0
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +2 -1
- eval_studio_client/api/test/test_v1_retrieved_context_diff.py +66 -0
- eval_studio_client/api/test/test_v1_stats.py +3 -1
- eval_studio_client/api/test/test_v1_technical_metrics.py +62 -0
- eval_studio_client/api/test/test_v1_technical_metrics_detail.py +55 -0
- eval_studio_client/api/test/test_v1_test_case_leaderboard_item.py +53 -0
- eval_studio_client/api/test/test_v1_test_case_relationship_info.py +53 -0
- eval_studio_client/api/test/test_v1_test_case_result.py +106 -0
- eval_studio_client/api/test/test_v1_text_similarity_metric.py +33 -0
- eval_studio_client/api/test/test_v1_update_dashboard_test_case_annotation_response.py +59 -0
- eval_studio_client/api/test/test_v1_update_leaderboard_response.py +2 -1
- eval_studio_client/api/test/test_v1_update_leaderboard_test_case_annotation_response.py +59 -0
- eval_studio_client/api/test/test_v1_update_operation_response.py +2 -1
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +2340 -210
- eval_studio_client/models.py +18 -6
- {eval_studio_client-1.2.5.dist-info → eval_studio_client-1.3.0.dist-info}/METADATA +2 -2
- {eval_studio_client-1.2.5.dist-info → eval_studio_client-1.3.0.dist-info}/RECORD +306 -111
- {eval_studio_client-1.2.5.dist-info → eval_studio_client-1.3.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class V1AgentChatScriptUsage(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
AgentChatScriptUsage represents usage statistics for a single script.
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
name: Optional[StrictStr] = Field(default=None, description="Output only. Name of the script.")
|
|
30
|
+
success_count: Optional[StrictInt] = Field(default=None, description="Output only. Number of successful executions.", alias="successCount")
|
|
31
|
+
failure_count: Optional[StrictInt] = Field(default=None, description="Output only. Number of failed executions.", alias="failureCount")
|
|
32
|
+
total_count: Optional[StrictInt] = Field(default=None, description="Output only. Total number of executions.", alias="totalCount")
|
|
33
|
+
__properties: ClassVar[List[str]] = ["name", "successCount", "failureCount", "totalCount"]
|
|
34
|
+
|
|
35
|
+
model_config = ConfigDict(
|
|
36
|
+
populate_by_name=True,
|
|
37
|
+
validate_assignment=True,
|
|
38
|
+
protected_namespaces=(),
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def to_str(self) -> str:
|
|
43
|
+
"""Returns the string representation of the model using alias"""
|
|
44
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
45
|
+
|
|
46
|
+
def to_json(self) -> str:
|
|
47
|
+
"""Returns the JSON representation of the model using alias"""
|
|
48
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
49
|
+
return json.dumps(self.to_dict())
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
53
|
+
"""Create an instance of V1AgentChatScriptUsage from a JSON string"""
|
|
54
|
+
return cls.from_dict(json.loads(json_str))
|
|
55
|
+
|
|
56
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
57
|
+
"""Return the dictionary representation of the model using alias.
|
|
58
|
+
|
|
59
|
+
This has the following differences from calling pydantic's
|
|
60
|
+
`self.model_dump(by_alias=True)`:
|
|
61
|
+
|
|
62
|
+
* `None` is only added to the output dict for nullable fields that
|
|
63
|
+
were set at model initialization. Other fields with value `None`
|
|
64
|
+
are ignored.
|
|
65
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
66
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
67
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
68
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
69
|
+
"""
|
|
70
|
+
excluded_fields: Set[str] = set([
|
|
71
|
+
"name",
|
|
72
|
+
"success_count",
|
|
73
|
+
"failure_count",
|
|
74
|
+
"total_count",
|
|
75
|
+
])
|
|
76
|
+
|
|
77
|
+
_dict = self.model_dump(
|
|
78
|
+
by_alias=True,
|
|
79
|
+
exclude=excluded_fields,
|
|
80
|
+
exclude_none=True,
|
|
81
|
+
)
|
|
82
|
+
return _dict
|
|
83
|
+
|
|
84
|
+
@classmethod
|
|
85
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
86
|
+
"""Create an instance of V1AgentChatScriptUsage from a dict"""
|
|
87
|
+
if obj is None:
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
if not isinstance(obj, dict):
|
|
91
|
+
return cls.model_validate(obj, strict=False)
|
|
92
|
+
|
|
93
|
+
_obj = cls.model_validate({
|
|
94
|
+
"name": obj.get("name"),
|
|
95
|
+
"successCount": obj.get("successCount"),
|
|
96
|
+
"failureCount": obj.get("failureCount"),
|
|
97
|
+
"totalCount": obj.get("totalCount")
|
|
98
|
+
}, strict=False)
|
|
99
|
+
return _obj
|
|
100
|
+
|
|
101
|
+
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1_agent_chat_script_usage import V1AgentChatScriptUsage
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class V1AgentChatScriptsBarChart(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
AgentChatScriptsBarChart represents the bar chart for agent chat scripts usage.
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
scripts: Optional[Dict[str, V1AgentChatScriptUsage]] = Field(default=None, description="Output only. Map of script name to script usage statistics.")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["scripts"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of V1AgentChatScriptsBarChart from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
64
|
+
"""
|
|
65
|
+
excluded_fields: Set[str] = set([
|
|
66
|
+
"scripts",
|
|
67
|
+
])
|
|
68
|
+
|
|
69
|
+
_dict = self.model_dump(
|
|
70
|
+
by_alias=True,
|
|
71
|
+
exclude=excluded_fields,
|
|
72
|
+
exclude_none=True,
|
|
73
|
+
)
|
|
74
|
+
# override the default output from pydantic by calling `to_dict()` of each value in scripts (dict)
|
|
75
|
+
_field_dict = {}
|
|
76
|
+
if self.scripts:
|
|
77
|
+
for _key_scripts in self.scripts:
|
|
78
|
+
if self.scripts[_key_scripts]:
|
|
79
|
+
_field_dict[_key_scripts] = self.scripts[_key_scripts].to_dict()
|
|
80
|
+
_dict['scripts'] = _field_dict
|
|
81
|
+
return _dict
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
85
|
+
"""Create an instance of V1AgentChatScriptsBarChart from a dict"""
|
|
86
|
+
if obj is None:
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
if not isinstance(obj, dict):
|
|
90
|
+
return cls.model_validate(obj, strict=False)
|
|
91
|
+
|
|
92
|
+
_obj = cls.model_validate({
|
|
93
|
+
"scripts": dict(
|
|
94
|
+
(_k, V1AgentChatScriptUsage.from_dict(_v))
|
|
95
|
+
for _k, _v in obj["scripts"].items()
|
|
96
|
+
)
|
|
97
|
+
if obj.get("scripts") is not None
|
|
98
|
+
else None
|
|
99
|
+
}, strict=False)
|
|
100
|
+
return _obj
|
|
101
|
+
|
|
102
|
+
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class V1AgentChatToolUsage(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
AgentChatToolUsage represents usage statistics for a single tool.
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
name: Optional[StrictStr] = Field(default=None, description="Output only. Name of the tool.")
|
|
30
|
+
success_count: Optional[StrictInt] = Field(default=None, description="Output only. Number of successful invocations.", alias="successCount")
|
|
31
|
+
failure_count: Optional[StrictInt] = Field(default=None, description="Output only. Number of failed invocations.", alias="failureCount")
|
|
32
|
+
total_count: Optional[StrictInt] = Field(default=None, description="Output only. Total number of invocations.", alias="totalCount")
|
|
33
|
+
__properties: ClassVar[List[str]] = ["name", "successCount", "failureCount", "totalCount"]
|
|
34
|
+
|
|
35
|
+
model_config = ConfigDict(
|
|
36
|
+
populate_by_name=True,
|
|
37
|
+
validate_assignment=True,
|
|
38
|
+
protected_namespaces=(),
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def to_str(self) -> str:
|
|
43
|
+
"""Returns the string representation of the model using alias"""
|
|
44
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
45
|
+
|
|
46
|
+
def to_json(self) -> str:
|
|
47
|
+
"""Returns the JSON representation of the model using alias"""
|
|
48
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
49
|
+
return json.dumps(self.to_dict())
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
53
|
+
"""Create an instance of V1AgentChatToolUsage from a JSON string"""
|
|
54
|
+
return cls.from_dict(json.loads(json_str))
|
|
55
|
+
|
|
56
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
57
|
+
"""Return the dictionary representation of the model using alias.
|
|
58
|
+
|
|
59
|
+
This has the following differences from calling pydantic's
|
|
60
|
+
`self.model_dump(by_alias=True)`:
|
|
61
|
+
|
|
62
|
+
* `None` is only added to the output dict for nullable fields that
|
|
63
|
+
were set at model initialization. Other fields with value `None`
|
|
64
|
+
are ignored.
|
|
65
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
66
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
67
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
68
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
69
|
+
"""
|
|
70
|
+
excluded_fields: Set[str] = set([
|
|
71
|
+
"name",
|
|
72
|
+
"success_count",
|
|
73
|
+
"failure_count",
|
|
74
|
+
"total_count",
|
|
75
|
+
])
|
|
76
|
+
|
|
77
|
+
_dict = self.model_dump(
|
|
78
|
+
by_alias=True,
|
|
79
|
+
exclude=excluded_fields,
|
|
80
|
+
exclude_none=True,
|
|
81
|
+
)
|
|
82
|
+
return _dict
|
|
83
|
+
|
|
84
|
+
@classmethod
|
|
85
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
86
|
+
"""Create an instance of V1AgentChatToolUsage from a dict"""
|
|
87
|
+
if obj is None:
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
if not isinstance(obj, dict):
|
|
91
|
+
return cls.model_validate(obj, strict=False)
|
|
92
|
+
|
|
93
|
+
_obj = cls.model_validate({
|
|
94
|
+
"name": obj.get("name"),
|
|
95
|
+
"successCount": obj.get("successCount"),
|
|
96
|
+
"failureCount": obj.get("failureCount"),
|
|
97
|
+
"totalCount": obj.get("totalCount")
|
|
98
|
+
}, strict=False)
|
|
99
|
+
return _obj
|
|
100
|
+
|
|
101
|
+
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1_agent_chat_tool_usage import V1AgentChatToolUsage
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class V1AgentChatToolsBarChart(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
AgentChatToolsBarChart represents the bar chart for agent chat tools usage.
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
tools: Optional[Dict[str, V1AgentChatToolUsage]] = Field(default=None, description="Output only. Map of tool name to tool usage statistics.")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["tools"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of V1AgentChatToolsBarChart from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
64
|
+
"""
|
|
65
|
+
excluded_fields: Set[str] = set([
|
|
66
|
+
"tools",
|
|
67
|
+
])
|
|
68
|
+
|
|
69
|
+
_dict = self.model_dump(
|
|
70
|
+
by_alias=True,
|
|
71
|
+
exclude=excluded_fields,
|
|
72
|
+
exclude_none=True,
|
|
73
|
+
)
|
|
74
|
+
# override the default output from pydantic by calling `to_dict()` of each value in tools (dict)
|
|
75
|
+
_field_dict = {}
|
|
76
|
+
if self.tools:
|
|
77
|
+
for _key_tools in self.tools:
|
|
78
|
+
if self.tools[_key_tools]:
|
|
79
|
+
_field_dict[_key_tools] = self.tools[_key_tools].to_dict()
|
|
80
|
+
_dict['tools'] = _field_dict
|
|
81
|
+
return _dict
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
85
|
+
"""Create an instance of V1AgentChatToolsBarChart from a dict"""
|
|
86
|
+
if obj is None:
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
if not isinstance(obj, dict):
|
|
90
|
+
return cls.model_validate(obj, strict=False)
|
|
91
|
+
|
|
92
|
+
_obj = cls.model_validate({
|
|
93
|
+
"tools": dict(
|
|
94
|
+
(_k, V1AgentChatToolUsage.from_dict(_v))
|
|
95
|
+
for _k, _v in obj["tools"].items()
|
|
96
|
+
)
|
|
97
|
+
if obj.get("tools") is not None
|
|
98
|
+
else None
|
|
99
|
+
}, strict=False)
|
|
100
|
+
return _obj
|
|
101
|
+
|
|
102
|
+
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional, Union
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class V1AllMetricScores(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
V1AllMetricScores
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
scores: Optional[List[Union[StrictFloat, StrictInt]]] = Field(default=None, description="Required. Metric scores for all baseline test cases.")
|
|
30
|
+
__properties: ClassVar[List[str]] = ["scores"]
|
|
31
|
+
|
|
32
|
+
model_config = ConfigDict(
|
|
33
|
+
populate_by_name=True,
|
|
34
|
+
validate_assignment=True,
|
|
35
|
+
protected_namespaces=(),
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def to_str(self) -> str:
|
|
40
|
+
"""Returns the string representation of the model using alias"""
|
|
41
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
42
|
+
|
|
43
|
+
def to_json(self) -> str:
|
|
44
|
+
"""Returns the JSON representation of the model using alias"""
|
|
45
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
46
|
+
return json.dumps(self.to_dict())
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
50
|
+
"""Create an instance of V1AllMetricScores from a JSON string"""
|
|
51
|
+
return cls.from_dict(json.loads(json_str))
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
54
|
+
"""Return the dictionary representation of the model using alias.
|
|
55
|
+
|
|
56
|
+
This has the following differences from calling pydantic's
|
|
57
|
+
`self.model_dump(by_alias=True)`:
|
|
58
|
+
|
|
59
|
+
* `None` is only added to the output dict for nullable fields that
|
|
60
|
+
were set at model initialization. Other fields with value `None`
|
|
61
|
+
are ignored.
|
|
62
|
+
"""
|
|
63
|
+
excluded_fields: Set[str] = set([
|
|
64
|
+
])
|
|
65
|
+
|
|
66
|
+
_dict = self.model_dump(
|
|
67
|
+
by_alias=True,
|
|
68
|
+
exclude=excluded_fields,
|
|
69
|
+
exclude_none=True,
|
|
70
|
+
)
|
|
71
|
+
return _dict
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
75
|
+
"""Create an instance of V1AllMetricScores from a dict"""
|
|
76
|
+
if obj is None:
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
if not isinstance(obj, dict):
|
|
80
|
+
return cls.model_validate(obj, strict=False)
|
|
81
|
+
|
|
82
|
+
_obj = cls.model_validate({
|
|
83
|
+
"scores": obj.get("scores")
|
|
84
|
+
}, strict=False)
|
|
85
|
+
return _obj
|
|
86
|
+
|
|
87
|
+
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1_test_case import V1TestCase
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class V1AppendTestCasesResponse(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
V1AppendTestCasesResponse
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
test_cases: Optional[List[V1TestCase]] = Field(default=None, description="The list of added TestCases.", alias="testCases")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["testCases"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of V1AppendTestCasesResponse from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([
|
|
65
|
+
])
|
|
66
|
+
|
|
67
|
+
_dict = self.model_dump(
|
|
68
|
+
by_alias=True,
|
|
69
|
+
exclude=excluded_fields,
|
|
70
|
+
exclude_none=True,
|
|
71
|
+
)
|
|
72
|
+
# override the default output from pydantic by calling `to_dict()` of each item in test_cases (list)
|
|
73
|
+
_items = []
|
|
74
|
+
if self.test_cases:
|
|
75
|
+
for _item_test_cases in self.test_cases:
|
|
76
|
+
if _item_test_cases:
|
|
77
|
+
_items.append(_item_test_cases.to_dict())
|
|
78
|
+
_dict['testCases'] = _items
|
|
79
|
+
return _dict
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
+
"""Create an instance of V1AppendTestCasesResponse from a dict"""
|
|
84
|
+
if obj is None:
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
if not isinstance(obj, dict):
|
|
88
|
+
return cls.model_validate(obj, strict=False)
|
|
89
|
+
|
|
90
|
+
_obj = cls.model_validate({
|
|
91
|
+
"testCases": [V1TestCase.from_dict(_item) for _item in obj["testCases"]] if obj.get("testCases") is not None else None
|
|
92
|
+
}, strict=False)
|
|
93
|
+
return _obj
|
|
94
|
+
|
|
95
|
+
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1_create_leaderboard_request import V1CreateLeaderboardRequest
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class V1BatchCreateLeaderboardsWithoutCacheRequest(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
V1BatchCreateLeaderboardsWithoutCacheRequest
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
requests: Optional[List[V1CreateLeaderboardRequest]] = Field(default=None, description="Required. Contains list of requests for leaderboards to be created.")
|
|
31
|
+
dashboard_display_name: Optional[StrictStr] = Field(default=None, description="Optional. Display name for the dashboard that will group the leaderboards.", alias="dashboardDisplayName")
|
|
32
|
+
dashboard_description: Optional[StrictStr] = Field(default=None, description="Optional. Description for the dashboard that will group the leaderboards.", alias="dashboardDescription")
|
|
33
|
+
__properties: ClassVar[List[str]] = ["requests", "dashboardDisplayName", "dashboardDescription"]
|
|
34
|
+
|
|
35
|
+
model_config = ConfigDict(
|
|
36
|
+
populate_by_name=True,
|
|
37
|
+
validate_assignment=True,
|
|
38
|
+
protected_namespaces=(),
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def to_str(self) -> str:
|
|
43
|
+
"""Returns the string representation of the model using alias"""
|
|
44
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
45
|
+
|
|
46
|
+
def to_json(self) -> str:
|
|
47
|
+
"""Returns the JSON representation of the model using alias"""
|
|
48
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
49
|
+
return json.dumps(self.to_dict())
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
53
|
+
"""Create an instance of V1BatchCreateLeaderboardsWithoutCacheRequest from a JSON string"""
|
|
54
|
+
return cls.from_dict(json.loads(json_str))
|
|
55
|
+
|
|
56
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
57
|
+
"""Return the dictionary representation of the model using alias.
|
|
58
|
+
|
|
59
|
+
This has the following differences from calling pydantic's
|
|
60
|
+
`self.model_dump(by_alias=True)`:
|
|
61
|
+
|
|
62
|
+
* `None` is only added to the output dict for nullable fields that
|
|
63
|
+
were set at model initialization. Other fields with value `None`
|
|
64
|
+
are ignored.
|
|
65
|
+
"""
|
|
66
|
+
excluded_fields: Set[str] = set([
|
|
67
|
+
])
|
|
68
|
+
|
|
69
|
+
_dict = self.model_dump(
|
|
70
|
+
by_alias=True,
|
|
71
|
+
exclude=excluded_fields,
|
|
72
|
+
exclude_none=True,
|
|
73
|
+
)
|
|
74
|
+
# override the default output from pydantic by calling `to_dict()` of each item in requests (list)
|
|
75
|
+
_items = []
|
|
76
|
+
if self.requests:
|
|
77
|
+
for _item_requests in self.requests:
|
|
78
|
+
if _item_requests:
|
|
79
|
+
_items.append(_item_requests.to_dict())
|
|
80
|
+
_dict['requests'] = _items
|
|
81
|
+
return _dict
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
85
|
+
"""Create an instance of V1BatchCreateLeaderboardsWithoutCacheRequest from a dict"""
|
|
86
|
+
if obj is None:
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
if not isinstance(obj, dict):
|
|
90
|
+
return cls.model_validate(obj, strict=False)
|
|
91
|
+
|
|
92
|
+
_obj = cls.model_validate({
|
|
93
|
+
"requests": [V1CreateLeaderboardRequest.from_dict(_item) for _item in obj["requests"]] if obj.get("requests") is not None else None,
|
|
94
|
+
"dashboardDisplayName": obj.get("dashboardDisplayName"),
|
|
95
|
+
"dashboardDescription": obj.get("dashboardDescription")
|
|
96
|
+
}, strict=False)
|
|
97
|
+
return _obj
|
|
98
|
+
|
|
99
|
+
|