eval-studio-client 1.0.0a1__py3-none-any.whl → 1.1.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +79 -1
- eval_studio_client/api/api/__init__.py +9 -0
- eval_studio_client/api/api/adversarial_inputs_service_api.py +321 -0
- eval_studio_client/api/api/dashboard_service_api.py +1 -1
- eval_studio_client/api/api/document_service_api.py +1 -1
- eval_studio_client/api/api/evaluation_service_api.py +1 -1
- eval_studio_client/api/api/evaluator_service_api.py +1 -1
- eval_studio_client/api/api/generated_questions_validation_service_api.py +321 -0
- eval_studio_client/api/api/human_calibration_service_api.py +304 -0
- eval_studio_client/api/api/info_service_api.py +1 -1
- eval_studio_client/api/api/leaderboard_report_service_api.py +292 -0
- eval_studio_client/api/api/leaderboard_service_api.py +17 -17
- eval_studio_client/api/api/model_service_api.py +17 -17
- eval_studio_client/api/api/operation_progress_service_api.py +1 -1
- eval_studio_client/api/api/operation_service_api.py +272 -17
- eval_studio_client/api/api/perturbation_service_api.py +1 -1
- eval_studio_client/api/api/perturbator_service_api.py +285 -18
- eval_studio_client/api/api/prompt_generation_service_api.py +1 -1
- eval_studio_client/api/api/prompt_library_service_api.py +669 -0
- eval_studio_client/api/api/test_case_relationship_service_api.py +292 -0
- eval_studio_client/api/api/test_case_service_api.py +17 -17
- eval_studio_client/api/api/test_class_service_api.py +17 -17
- eval_studio_client/api/api/test_lab_service_api.py +1 -1
- eval_studio_client/api/api/test_service_api.py +1238 -102
- eval_studio_client/api/api/who_am_i_service_api.py +1 -1
- eval_studio_client/api/api/workflow_edge_service_api.py +835 -0
- eval_studio_client/api/api/workflow_node_service_api.py +2431 -0
- eval_studio_client/api/api/workflow_service_api.py +1893 -0
- eval_studio_client/api/api_client.py +1 -1
- eval_studio_client/api/configuration.py +1 -1
- eval_studio_client/api/docs/AdversarialInputsServiceApi.md +78 -0
- eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +45 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceApi.md +78 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.md +30 -0
- eval_studio_client/api/docs/HumanCalibrationServiceApi.md +77 -0
- eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +5 -5
- eval_studio_client/api/docs/ModelServiceApi.md +5 -5
- eval_studio_client/api/docs/OperationServiceApi.md +72 -5
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +1 -0
- eval_studio_client/api/docs/PerturbatorServiceApi.md +38 -8
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +4 -2
- eval_studio_client/api/docs/PromptLibraryServiceApi.md +155 -0
- eval_studio_client/api/docs/ProtobufNullValue.md +12 -0
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +3 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +47 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflowNode.md +44 -0
- eval_studio_client/api/docs/TestCaseRelationshipServiceApi.md +75 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +5 -5
- eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
- eval_studio_client/api/docs/TestServiceApi.md +285 -5
- eval_studio_client/api/docs/TestServiceCloneTestRequest.md +30 -0
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +3 -1
- eval_studio_client/api/docs/TestServiceImportTestCasesFromLibraryRequest.md +32 -0
- eval_studio_client/api/docs/TestServiceListTestCaseLibraryItemsRequest.md +35 -0
- eval_studio_client/api/docs/TestServicePerturbTestInPlaceRequest.md +30 -0
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +1 -0
- eval_studio_client/api/docs/V1AbortOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetWorkflowEdgesResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetWorkflowNodesResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneTestResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Context.md +32 -0
- eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1CreateWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1EstimateThresholdRequest.md +33 -0
- eval_studio_client/api/docs/V1GeneratedTestCase.md +30 -0
- eval_studio_client/api/docs/V1GetLeaderboardReportResponse.md +29 -0
- eval_studio_client/api/docs/V1GetWorkflowNodePrerequisitesResponse.md +30 -0
- eval_studio_client/api/docs/V1GetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1GetWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1ImportEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1ImportTestCasesFromLibraryResponse.md +29 -0
- eval_studio_client/api/docs/V1ImportTestCasesRequest.md +33 -0
- eval_studio_client/api/docs/V1Info.md +3 -0
- eval_studio_client/api/docs/V1InitWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1LabeledTestCase.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReport.md +32 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputData.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluator.md +42 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluatorParameter.md +38 -0
- eval_studio_client/api/docs/V1LeaderboardReportExplanation.md +34 -0
- eval_studio_client/api/docs/V1LeaderboardReportMetricsMetaEntry.md +41 -0
- eval_studio_client/api/docs/V1LeaderboardReportModel.md +39 -0
- eval_studio_client/api/docs/V1LeaderboardReportResult.md +45 -0
- eval_studio_client/api/docs/V1LeaderboardReportResultRelationship.md +32 -0
- eval_studio_client/api/docs/V1ListPromptLibraryItemsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCaseLibraryItemsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCaseRelationshipsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListWorkflowsResponse.md +29 -0
- eval_studio_client/api/docs/V1MetricScore.md +31 -0
- eval_studio_client/api/docs/V1MetricScores.md +29 -0
- eval_studio_client/api/docs/V1PerturbTestInPlaceResponse.md +29 -0
- eval_studio_client/api/docs/V1ProcessWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1PromptLibraryItem.md +42 -0
- eval_studio_client/api/docs/V1RepeatedString.md +29 -0
- eval_studio_client/api/docs/V1ResetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1TestCase.md +3 -0
- eval_studio_client/api/docs/V1TestSuiteEvaluates.md +11 -0
- eval_studio_client/api/docs/V1UpdateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Workflow.md +49 -0
- eval_studio_client/api/docs/V1WorkflowEdge.md +40 -0
- eval_studio_client/api/docs/V1WorkflowEdgeType.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNode.md +46 -0
- eval_studio_client/api/docs/V1WorkflowNodeArtifact.md +40 -0
- eval_studio_client/api/docs/V1WorkflowNodeArtifacts.md +29 -0
- eval_studio_client/api/docs/V1WorkflowNodeAttributes.md +30 -0
- eval_studio_client/api/docs/V1WorkflowNodeStatus.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNodeType.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNodeView.md +12 -0
- eval_studio_client/api/docs/V1WorkflowType.md +12 -0
- eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +215 -0
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +632 -0
- eval_studio_client/api/docs/WorkflowServiceApi.md +488 -0
- eval_studio_client/api/docs/WorkflowServiceCloneWorkflowRequest.md +33 -0
- eval_studio_client/api/exceptions.py +1 -1
- eval_studio_client/api/models/__init__.py +70 -1
- eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +143 -0
- eval_studio_client/api/models/generated_questions_validation_service_validate_generated_questions_request.py +97 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +9 -3
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +17 -6
- eval_studio_client/api/models/protobuf_any.py +1 -1
- eval_studio_client/api/models/protobuf_null_value.py +36 -0
- eval_studio_client/api/models/required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_document_to_update.py +1 -1
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_model_to_update.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_case_to_update.py +14 -3
- eval_studio_client/api/models/required_the_test_to_update.py +1 -1
- eval_studio_client/api/models/required_the_updated_workflow.py +160 -0
- eval_studio_client/api/models/required_the_updated_workflow_node.py +152 -0
- eval_studio_client/api/models/rpc_status.py +1 -1
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/models/test_service_clone_test_request.py +89 -0
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +8 -4
- eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +93 -0
- eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +99 -0
- eval_studio_client/api/models/test_service_perturb_test_in_place_request.py +97 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +5 -3
- eval_studio_client/api/models/v1_abort_operation_response.py +91 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_request.py +87 -0
- eval_studio_client/api/models/v1_batch_delete_workflows_response.py +95 -0
- eval_studio_client/api/models/v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +95 -0
- eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +95 -0
- eval_studio_client/api/models/v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/models/v1_check_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_clone_test_response.py +91 -0
- eval_studio_client/api/models/v1_clone_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_collection_info.py +1 -1
- eval_studio_client/api/models/v1_context.py +93 -0
- eval_studio_client/api/models/v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_document_response.py +1 -1
- eval_studio_client/api/models/v1_create_evaluation_request.py +8 -3
- eval_studio_client/api/models/v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/models/v1_create_model_response.py +1 -1
- eval_studio_client/api/models/v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_response.py +1 -1
- eval_studio_client/api/models/v1_create_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_dashboard.py +1 -1
- eval_studio_client/api/models/v1_dashboard_status.py +1 -1
- eval_studio_client/api/models/v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_document_response.py +1 -1
- eval_studio_client/api/models/v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_model_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_document.py +1 -1
- eval_studio_client/api/models/v1_estimate_threshold_request.py +103 -0
- eval_studio_client/api/models/v1_evaluation_test.py +1 -1
- eval_studio_client/api/models/v1_evaluator.py +1 -1
- eval_studio_client/api/models/v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/models/v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/models/v1_evaluator_view.py +1 -1
- eval_studio_client/api/models/v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/models/v1_find_all_test_cases_by_id_response.py +1 -1
- eval_studio_client/api/models/v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_generated_test_case.py +101 -0
- eval_studio_client/api/models/v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_document_response.py +1 -1
- eval_studio_client/api/models/v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_get_info_response.py +1 -1
- eval_studio_client/api/models/v1_get_leaderboard_report_response.py +91 -0
- eval_studio_client/api/models/v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_model_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_response.py +1 -1
- eval_studio_client/api/models/v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_class_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +89 -0
- eval_studio_client/api/models/v1_get_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_get_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_import_evaluation_request.py +8 -3
- eval_studio_client/api/models/v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +91 -0
- eval_studio_client/api/models/v1_import_test_cases_request.py +95 -0
- eval_studio_client/api/models/v1_info.py +10 -4
- eval_studio_client/api/models/v1_init_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_insight.py +1 -1
- eval_studio_client/api/models/v1_labeled_test_case.py +91 -0
- eval_studio_client/api/models/v1_leaderboard.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_report.py +115 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_data.py +93 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +101 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator.py +155 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator_parameter.py +109 -0
- eval_studio_client/api/models/v1_leaderboard_report_explanation.py +103 -0
- eval_studio_client/api/models/v1_leaderboard_report_metrics_meta_entry.py +129 -0
- eval_studio_client/api/models/v1_leaderboard_report_model.py +121 -0
- eval_studio_client/api/models/v1_leaderboard_report_result.py +175 -0
- eval_studio_client/api/models/v1_leaderboard_report_result_relationship.py +97 -0
- eval_studio_client/api/models/v1_leaderboard_status.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_type.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_view.py +1 -1
- eval_studio_client/api/models/v1_list_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_documents_response.py +1 -1
- eval_studio_client/api/models/v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_operations_response.py +1 -1
- eval_studio_client/api/models/v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/models/v1_list_prompt_library_items_response.py +95 -0
- eval_studio_client/api/models/v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_library_items_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_case_relationships_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/models/v1_list_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_workflows_response.py +95 -0
- eval_studio_client/api/models/v1_metric_score.py +89 -0
- eval_studio_client/api/models/v1_metric_scores.py +95 -0
- eval_studio_client/api/models/v1_model.py +1 -1
- eval_studio_client/api/models/v1_model_type.py +1 -1
- eval_studio_client/api/models/v1_operation.py +1 -1
- eval_studio_client/api/models/v1_operation_progress.py +1 -1
- eval_studio_client/api/models/v1_perturb_test_in_place_response.py +91 -0
- eval_studio_client/api/models/v1_perturb_test_response.py +1 -1
- eval_studio_client/api/models/v1_perturbator.py +1 -1
- eval_studio_client/api/models/v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/models/v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/models/v1_problem_and_action.py +1 -1
- eval_studio_client/api/models/v1_process_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_prompt_library_item.py +129 -0
- eval_studio_client/api/models/v1_repeated_string.py +87 -0
- eval_studio_client/api/models/v1_reset_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_test.py +1 -1
- eval_studio_client/api/models/v1_test_case.py +14 -3
- eval_studio_client/api/models/v1_test_case_relationship.py +1 -1
- eval_studio_client/api/models/v1_test_cases_generator.py +1 -1
- eval_studio_client/api/models/v1_test_class.py +1 -1
- eval_studio_client/api/models/v1_test_class_type.py +1 -1
- eval_studio_client/api/models/v1_test_lab.py +1 -1
- eval_studio_client/api/models/v1_test_suite_evaluates.py +39 -0
- eval_studio_client/api/models/v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_document_response.py +1 -1
- eval_studio_client/api/models/v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_model_response.py +1 -1
- eval_studio_client/api/models/v1_update_operation_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_update_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_who_am_i_response.py +1 -1
- eval_studio_client/api/models/v1_workflow.py +164 -0
- eval_studio_client/api/models/v1_workflow_edge.py +123 -0
- eval_studio_client/api/models/v1_workflow_edge_type.py +37 -0
- eval_studio_client/api/models/v1_workflow_node.py +156 -0
- eval_studio_client/api/models/v1_workflow_node_artifact.py +122 -0
- eval_studio_client/api/models/v1_workflow_node_artifacts.py +97 -0
- eval_studio_client/api/models/v1_workflow_node_attributes.py +87 -0
- eval_studio_client/api/models/v1_workflow_node_status.py +40 -0
- eval_studio_client/api/models/v1_workflow_node_type.py +44 -0
- eval_studio_client/api/models/v1_workflow_node_view.py +38 -0
- eval_studio_client/api/models/v1_workflow_type.py +37 -0
- eval_studio_client/api/models/workflow_service_clone_workflow_request.py +95 -0
- eval_studio_client/api/rest.py +1 -1
- eval_studio_client/api/test/test_adversarial_inputs_service_api.py +37 -0
- eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +128 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
- eval_studio_client/api/test/test_document_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
- eval_studio_client/api/test/test_generated_questions_validation_service_api.py +37 -0
- eval_studio_client/api/test/test_generated_questions_validation_service_validate_generated_questions_request.py +83 -0
- eval_studio_client/api/test/test_human_calibration_service_api.py +38 -0
- eval_studio_client/api/test/test_info_service_api.py +1 -1
- eval_studio_client/api/test/test_leaderboard_report_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
- eval_studio_client/api/test/test_model_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_service_api.py +7 -1
- eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +25 -3
- eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +13 -5
- eval_studio_client/api/test/test_prompt_library_service_api.py +43 -0
- eval_studio_client/api/test/test_protobuf_any.py +1 -1
- eval_studio_client/api/test/test_protobuf_null_value.py +33 -0
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +9 -2
- eval_studio_client/api/test/test_required_the_test_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_updated_workflow.py +91 -0
- eval_studio_client/api/test/test_required_the_updated_workflow_node.py +80 -0
- eval_studio_client/api/test/test_rpc_status.py +1 -1
- eval_studio_client/api/test/test_test_case_relationship_service_api.py +37 -0
- eval_studio_client/api/test/test_test_case_service_api.py +1 -1
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_test_class_service_api.py +1 -1
- eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
- eval_studio_client/api/test/test_test_service_api.py +25 -1
- eval_studio_client/api/test/test_test_service_clone_test_request.py +52 -0
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +8 -2
- eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +56 -0
- eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +63 -0
- eval_studio_client/api/test/test_test_service_perturb_test_in_place_request.py +59 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +5 -2
- eval_studio_client/api/test/test_v1_abort_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +9 -2
- eval_studio_client/api/test/test_v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +53 -0
- eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +95 -0
- eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +64 -0
- eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +84 -0
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_check_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_clone_test_response.py +67 -0
- eval_studio_client/api/test/test_v1_clone_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_collection_info.py +1 -1
- eval_studio_client/api/test/test_v1_context.py +54 -0
- eval_studio_client/api/test/test_v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +25 -3
- eval_studio_client/api/test/test_v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_create_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_create_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_dashboard.py +1 -1
- eval_studio_client/api/test/test_v1_dashboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_delete_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_delete_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_document.py +1 -1
- eval_studio_client/api/test/test_v1_estimate_threshold_request.py +60 -0
- eval_studio_client/api/test/test_v1_evaluation_test.py +9 -2
- eval_studio_client/api/test/test_v1_evaluator.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_view.py +1 -1
- eval_studio_client/api/test/test_v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +9 -2
- eval_studio_client/api/test/test_v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/test/test_v1_generated_test_case.py +79 -0
- eval_studio_client/api/test/test_v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_info_response.py +7 -2
- eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +175 -0
- eval_studio_client/api/test/test_v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_get_test_class_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +56 -0
- eval_studio_client/api/test/test_v1_get_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_get_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_import_evaluation_request.py +17 -2
- eval_studio_client/api/test/test_v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +71 -0
- eval_studio_client/api/test/test_v1_import_test_cases_request.py +57 -0
- eval_studio_client/api/test/test_v1_info.py +7 -2
- eval_studio_client/api/test/test_v1_init_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_insight.py +1 -1
- eval_studio_client/api/test/test_v1_labeled_test_case.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_report.py +174 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_data.py +52 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +56 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator.py +114 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator_parameter.py +63 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_explanation.py +58 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_metrics_meta_entry.py +66 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_model.py +62 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result.py +92 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result_relationship.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_type.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_view.py +1 -1
- eval_studio_client/api/test/test_v1_list_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +71 -0
- eval_studio_client/api/test/test_v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +71 -0
- eval_studio_client/api/test/test_v1_list_test_case_relationships_response.py +56 -0
- eval_studio_client/api/test/test_v1_list_test_cases_response.py +9 -2
- eval_studio_client/api/test/test_v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_workflows_response.py +95 -0
- eval_studio_client/api/test/test_v1_metric_score.py +52 -0
- eval_studio_client/api/test/test_v1_metric_scores.py +55 -0
- eval_studio_client/api/test/test_v1_model.py +1 -1
- eval_studio_client/api/test/test_v1_model_type.py +1 -1
- eval_studio_client/api/test/test_v1_operation.py +1 -1
- eval_studio_client/api/test/test_v1_operation_progress.py +1 -1
- eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +67 -0
- eval_studio_client/api/test/test_v1_perturb_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/test/test_v1_problem_and_action.py +1 -1
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +71 -0
- eval_studio_client/api/test/test_v1_prompt_library_item.py +68 -0
- eval_studio_client/api/test/test_v1_repeated_string.py +53 -0
- eval_studio_client/api/test/test_v1_reset_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_test.py +1 -1
- eval_studio_client/api/test/test_v1_test_case.py +9 -2
- eval_studio_client/api/test/test_v1_test_case_relationship.py +1 -1
- eval_studio_client/api/test/test_v1_test_cases_generator.py +1 -1
- eval_studio_client/api/test/test_v1_test_class.py +1 -1
- eval_studio_client/api/test/test_v1_test_class_type.py +1 -1
- eval_studio_client/api/test/test_v1_test_lab.py +1 -1
- eval_studio_client/api/test/test_v1_test_suite_evaluates.py +33 -0
- eval_studio_client/api/test/test_v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_update_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_update_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_who_am_i_response.py +1 -1
- eval_studio_client/api/test/test_v1_workflow.py +92 -0
- eval_studio_client/api/test/test_v1_workflow_edge.py +61 -0
- eval_studio_client/api/test/test_v1_workflow_edge_type.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node.py +81 -0
- eval_studio_client/api/test/test_v1_workflow_node_artifact.py +61 -0
- eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +64 -0
- eval_studio_client/api/test/test_v1_workflow_node_attributes.py +51 -0
- eval_studio_client/api/test/test_v1_workflow_node_status.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node_type.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node_view.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_type.py +33 -0
- eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
- eval_studio_client/api/test/test_workflow_edge_service_api.py +52 -0
- eval_studio_client/api/test/test_workflow_node_service_api.py +94 -0
- eval_studio_client/api/test/test_workflow_service_api.py +80 -0
- eval_studio_client/api/test/test_workflow_service_clone_workflow_request.py +55 -0
- eval_studio_client/client.py +7 -0
- eval_studio_client/dashboards.py +66 -18
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +5132 -1847
- eval_studio_client/leaderboards.py +125 -0
- eval_studio_client/models.py +3 -42
- eval_studio_client/test_labs.py +49 -21
- eval_studio_client/tests.py +323 -58
- eval_studio_client/utils.py +26 -0
- {eval_studio_client-1.0.0a1.dist-info → eval_studio_client-1.1.0a5.dist-info}/METADATA +2 -3
- eval_studio_client-1.1.0a5.dist-info/RECORD +720 -0
- {eval_studio_client-1.0.0a1.dist-info → eval_studio_client-1.1.0a5.dist-info}/WHEEL +1 -1
- eval_studio_client-1.0.0a1.dist-info/RECORD +0 -485
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_leaderboard_report_actual_output_data import V1LeaderboardReportActualOutputData
|
|
18
|
+
|
|
19
|
+
class TestV1LeaderboardReportActualOutputData(unittest.TestCase):
|
|
20
|
+
"""V1LeaderboardReportActualOutputData unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1LeaderboardReportActualOutputData:
|
|
29
|
+
"""Test V1LeaderboardReportActualOutputData
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1LeaderboardReportActualOutputData`
|
|
34
|
+
"""
|
|
35
|
+
model = V1LeaderboardReportActualOutputData()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1LeaderboardReportActualOutputData(
|
|
38
|
+
text = '',
|
|
39
|
+
metrics = None
|
|
40
|
+
)
|
|
41
|
+
else:
|
|
42
|
+
return V1LeaderboardReportActualOutputData(
|
|
43
|
+
)
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def testV1LeaderboardReportActualOutputData(self):
|
|
47
|
+
"""Test V1LeaderboardReportActualOutputData"""
|
|
48
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
49
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
50
|
+
|
|
51
|
+
if __name__ == '__main__':
|
|
52
|
+
unittest.main()
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_leaderboard_report_actual_output_meta import V1LeaderboardReportActualOutputMeta
|
|
18
|
+
|
|
19
|
+
class TestV1LeaderboardReportActualOutputMeta(unittest.TestCase):
|
|
20
|
+
"""V1LeaderboardReportActualOutputMeta unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1LeaderboardReportActualOutputMeta:
|
|
29
|
+
"""Test V1LeaderboardReportActualOutputMeta
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1LeaderboardReportActualOutputMeta`
|
|
34
|
+
"""
|
|
35
|
+
model = V1LeaderboardReportActualOutputMeta()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1LeaderboardReportActualOutputMeta(
|
|
38
|
+
tokenization = '',
|
|
39
|
+
data = [
|
|
40
|
+
eval_studio_client.api.models.v1_leaderboard_report_actual_output_data.v1LeaderboardReportActualOutputData(
|
|
41
|
+
text = '',
|
|
42
|
+
metrics = eval_studio_client.api.models.metrics.metrics(), )
|
|
43
|
+
]
|
|
44
|
+
)
|
|
45
|
+
else:
|
|
46
|
+
return V1LeaderboardReportActualOutputMeta(
|
|
47
|
+
)
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def testV1LeaderboardReportActualOutputMeta(self):
|
|
51
|
+
"""Test V1LeaderboardReportActualOutputMeta"""
|
|
52
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
53
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
54
|
+
|
|
55
|
+
if __name__ == '__main__':
|
|
56
|
+
unittest.main()
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_leaderboard_report_evaluator import V1LeaderboardReportEvaluator
|
|
18
|
+
|
|
19
|
+
class TestV1LeaderboardReportEvaluator(unittest.TestCase):
|
|
20
|
+
"""V1LeaderboardReportEvaluator unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1LeaderboardReportEvaluator:
|
|
29
|
+
"""Test V1LeaderboardReportEvaluator
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1LeaderboardReportEvaluator`
|
|
34
|
+
"""
|
|
35
|
+
model = V1LeaderboardReportEvaluator()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1LeaderboardReportEvaluator(
|
|
38
|
+
id = '',
|
|
39
|
+
name = '',
|
|
40
|
+
display_name = '',
|
|
41
|
+
tagline = '',
|
|
42
|
+
description = '',
|
|
43
|
+
brief_description = '',
|
|
44
|
+
model_types = [
|
|
45
|
+
''
|
|
46
|
+
],
|
|
47
|
+
can_explain = [
|
|
48
|
+
''
|
|
49
|
+
],
|
|
50
|
+
explanation_scopes = [
|
|
51
|
+
''
|
|
52
|
+
],
|
|
53
|
+
explanations = [
|
|
54
|
+
eval_studio_client.api.models.v1_leaderboard_report_explanation.v1LeaderboardReportExplanation(
|
|
55
|
+
explanation_type = '',
|
|
56
|
+
name = '',
|
|
57
|
+
category = '',
|
|
58
|
+
scope = '',
|
|
59
|
+
has_local = '',
|
|
60
|
+
formats = [
|
|
61
|
+
''
|
|
62
|
+
], )
|
|
63
|
+
],
|
|
64
|
+
parameters = [
|
|
65
|
+
eval_studio_client.api.models.v1_leaderboard_report_evaluator_parameter.v1LeaderboardReportEvaluatorParameter(
|
|
66
|
+
name = '',
|
|
67
|
+
description = '',
|
|
68
|
+
comment = '',
|
|
69
|
+
type = '',
|
|
70
|
+
predefined = [
|
|
71
|
+
None
|
|
72
|
+
],
|
|
73
|
+
tags = [
|
|
74
|
+
''
|
|
75
|
+
],
|
|
76
|
+
min = 1.337,
|
|
77
|
+
max = 1.337,
|
|
78
|
+
category = '', )
|
|
79
|
+
],
|
|
80
|
+
keywords = [
|
|
81
|
+
''
|
|
82
|
+
],
|
|
83
|
+
metrics_meta = [
|
|
84
|
+
eval_studio_client.api.models.v1_leaderboard_report_metrics_meta_entry.v1LeaderboardReportMetricsMetaEntry(
|
|
85
|
+
key = '',
|
|
86
|
+
display_name = '',
|
|
87
|
+
data_type = '',
|
|
88
|
+
display_value = '',
|
|
89
|
+
description = '',
|
|
90
|
+
value_range = [
|
|
91
|
+
1.337
|
|
92
|
+
],
|
|
93
|
+
value_enum = [
|
|
94
|
+
''
|
|
95
|
+
],
|
|
96
|
+
higher_is_better = True,
|
|
97
|
+
threshold = 1.337,
|
|
98
|
+
is_primary_metric = True,
|
|
99
|
+
parent_metric = '',
|
|
100
|
+
exclude = True, )
|
|
101
|
+
]
|
|
102
|
+
)
|
|
103
|
+
else:
|
|
104
|
+
return V1LeaderboardReportEvaluator(
|
|
105
|
+
)
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
def testV1LeaderboardReportEvaluator(self):
|
|
109
|
+
"""Test V1LeaderboardReportEvaluator"""
|
|
110
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
111
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
112
|
+
|
|
113
|
+
if __name__ == '__main__':
|
|
114
|
+
unittest.main()
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_leaderboard_report_evaluator_parameter import V1LeaderboardReportEvaluatorParameter
|
|
18
|
+
|
|
19
|
+
class TestV1LeaderboardReportEvaluatorParameter(unittest.TestCase):
|
|
20
|
+
"""V1LeaderboardReportEvaluatorParameter unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1LeaderboardReportEvaluatorParameter:
|
|
29
|
+
"""Test V1LeaderboardReportEvaluatorParameter
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1LeaderboardReportEvaluatorParameter`
|
|
34
|
+
"""
|
|
35
|
+
model = V1LeaderboardReportEvaluatorParameter()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1LeaderboardReportEvaluatorParameter(
|
|
38
|
+
name = '',
|
|
39
|
+
description = '',
|
|
40
|
+
comment = '',
|
|
41
|
+
type = '',
|
|
42
|
+
predefined = [
|
|
43
|
+
None
|
|
44
|
+
],
|
|
45
|
+
tags = [
|
|
46
|
+
''
|
|
47
|
+
],
|
|
48
|
+
min = 1.337,
|
|
49
|
+
max = 1.337,
|
|
50
|
+
category = ''
|
|
51
|
+
)
|
|
52
|
+
else:
|
|
53
|
+
return V1LeaderboardReportEvaluatorParameter(
|
|
54
|
+
)
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def testV1LeaderboardReportEvaluatorParameter(self):
|
|
58
|
+
"""Test V1LeaderboardReportEvaluatorParameter"""
|
|
59
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
60
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
61
|
+
|
|
62
|
+
if __name__ == '__main__':
|
|
63
|
+
unittest.main()
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_leaderboard_report_explanation import V1LeaderboardReportExplanation
|
|
18
|
+
|
|
19
|
+
class TestV1LeaderboardReportExplanation(unittest.TestCase):
|
|
20
|
+
"""V1LeaderboardReportExplanation unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1LeaderboardReportExplanation:
|
|
29
|
+
"""Test V1LeaderboardReportExplanation
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1LeaderboardReportExplanation`
|
|
34
|
+
"""
|
|
35
|
+
model = V1LeaderboardReportExplanation()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1LeaderboardReportExplanation(
|
|
38
|
+
explanation_type = '',
|
|
39
|
+
name = '',
|
|
40
|
+
category = '',
|
|
41
|
+
scope = '',
|
|
42
|
+
has_local = '',
|
|
43
|
+
formats = [
|
|
44
|
+
''
|
|
45
|
+
]
|
|
46
|
+
)
|
|
47
|
+
else:
|
|
48
|
+
return V1LeaderboardReportExplanation(
|
|
49
|
+
)
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def testV1LeaderboardReportExplanation(self):
|
|
53
|
+
"""Test V1LeaderboardReportExplanation"""
|
|
54
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
55
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
56
|
+
|
|
57
|
+
if __name__ == '__main__':
|
|
58
|
+
unittest.main()
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_leaderboard_report_metrics_meta_entry import V1LeaderboardReportMetricsMetaEntry
|
|
18
|
+
|
|
19
|
+
class TestV1LeaderboardReportMetricsMetaEntry(unittest.TestCase):
|
|
20
|
+
"""V1LeaderboardReportMetricsMetaEntry unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1LeaderboardReportMetricsMetaEntry:
|
|
29
|
+
"""Test V1LeaderboardReportMetricsMetaEntry
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1LeaderboardReportMetricsMetaEntry`
|
|
34
|
+
"""
|
|
35
|
+
model = V1LeaderboardReportMetricsMetaEntry()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1LeaderboardReportMetricsMetaEntry(
|
|
38
|
+
key = '',
|
|
39
|
+
display_name = '',
|
|
40
|
+
data_type = '',
|
|
41
|
+
display_value = '',
|
|
42
|
+
description = '',
|
|
43
|
+
value_range = [
|
|
44
|
+
1.337
|
|
45
|
+
],
|
|
46
|
+
value_enum = [
|
|
47
|
+
''
|
|
48
|
+
],
|
|
49
|
+
higher_is_better = True,
|
|
50
|
+
threshold = 1.337,
|
|
51
|
+
is_primary_metric = True,
|
|
52
|
+
parent_metric = '',
|
|
53
|
+
exclude = True
|
|
54
|
+
)
|
|
55
|
+
else:
|
|
56
|
+
return V1LeaderboardReportMetricsMetaEntry(
|
|
57
|
+
)
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
def testV1LeaderboardReportMetricsMetaEntry(self):
|
|
61
|
+
"""Test V1LeaderboardReportMetricsMetaEntry"""
|
|
62
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
63
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
64
|
+
|
|
65
|
+
if __name__ == '__main__':
|
|
66
|
+
unittest.main()
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_leaderboard_report_model import V1LeaderboardReportModel
|
|
18
|
+
|
|
19
|
+
class TestV1LeaderboardReportModel(unittest.TestCase):
|
|
20
|
+
"""V1LeaderboardReportModel unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1LeaderboardReportModel:
|
|
29
|
+
"""Test V1LeaderboardReportModel
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1LeaderboardReportModel`
|
|
34
|
+
"""
|
|
35
|
+
model = V1LeaderboardReportModel()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1LeaderboardReportModel(
|
|
38
|
+
connection = '',
|
|
39
|
+
model_type = '',
|
|
40
|
+
name = '',
|
|
41
|
+
collection_id = '',
|
|
42
|
+
collection_name = '',
|
|
43
|
+
llm_model_name = '',
|
|
44
|
+
documents = [
|
|
45
|
+
''
|
|
46
|
+
],
|
|
47
|
+
model_cfg = eval_studio_client.api.models.model_cfg.modelCfg(),
|
|
48
|
+
key = '',
|
|
49
|
+
llm_model_meta = eval_studio_client.api.models.llm_model_meta.llmModelMeta()
|
|
50
|
+
)
|
|
51
|
+
else:
|
|
52
|
+
return V1LeaderboardReportModel(
|
|
53
|
+
)
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def testV1LeaderboardReportModel(self):
|
|
57
|
+
"""Test V1LeaderboardReportModel"""
|
|
58
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
59
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
60
|
+
|
|
61
|
+
if __name__ == '__main__':
|
|
62
|
+
unittest.main()
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_leaderboard_report_result import V1LeaderboardReportResult
|
|
18
|
+
|
|
19
|
+
class TestV1LeaderboardReportResult(unittest.TestCase):
|
|
20
|
+
"""V1LeaderboardReportResult unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1LeaderboardReportResult:
|
|
29
|
+
"""Test V1LeaderboardReportResult
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1LeaderboardReportResult`
|
|
34
|
+
"""
|
|
35
|
+
model = V1LeaderboardReportResult()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1LeaderboardReportResult(
|
|
38
|
+
key = '',
|
|
39
|
+
input = '',
|
|
40
|
+
corpus = [
|
|
41
|
+
''
|
|
42
|
+
],
|
|
43
|
+
context = [
|
|
44
|
+
''
|
|
45
|
+
],
|
|
46
|
+
categories = [
|
|
47
|
+
''
|
|
48
|
+
],
|
|
49
|
+
relationships = [
|
|
50
|
+
eval_studio_client.api.models.v1_leaderboard_report_result_relationship.v1LeaderboardReportResultRelationship(
|
|
51
|
+
type = '',
|
|
52
|
+
target = '',
|
|
53
|
+
target_type = '', )
|
|
54
|
+
],
|
|
55
|
+
expected_output = '',
|
|
56
|
+
output_constraints = [
|
|
57
|
+
''
|
|
58
|
+
],
|
|
59
|
+
output_condition = '',
|
|
60
|
+
actual_output = '',
|
|
61
|
+
actual_duration = 1.337,
|
|
62
|
+
cost = 1.337,
|
|
63
|
+
model_key = '',
|
|
64
|
+
test_case_key = '',
|
|
65
|
+
metrics = [
|
|
66
|
+
eval_studio_client.api.models.v1_metric_score.v1MetricScore(
|
|
67
|
+
key = '',
|
|
68
|
+
value = 1.337, )
|
|
69
|
+
],
|
|
70
|
+
result_error_message = '',
|
|
71
|
+
actual_output_meta = [
|
|
72
|
+
eval_studio_client.api.models.v1_leaderboard_report_actual_output_meta.v1LeaderboardReportActualOutputMeta(
|
|
73
|
+
tokenization = '',
|
|
74
|
+
data = [
|
|
75
|
+
eval_studio_client.api.models.v1_leaderboard_report_actual_output_data.v1LeaderboardReportActualOutputData(
|
|
76
|
+
text = '',
|
|
77
|
+
metrics = eval_studio_client.api.models.metrics.metrics(), )
|
|
78
|
+
], )
|
|
79
|
+
]
|
|
80
|
+
)
|
|
81
|
+
else:
|
|
82
|
+
return V1LeaderboardReportResult(
|
|
83
|
+
)
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
def testV1LeaderboardReportResult(self):
|
|
87
|
+
"""Test V1LeaderboardReportResult"""
|
|
88
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
89
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
90
|
+
|
|
91
|
+
if __name__ == '__main__':
|
|
92
|
+
unittest.main()
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
import unittest
|
|
16
|
+
|
|
17
|
+
from eval_studio_client.api.models.v1_leaderboard_report_result_relationship import V1LeaderboardReportResultRelationship
|
|
18
|
+
|
|
19
|
+
class TestV1LeaderboardReportResultRelationship(unittest.TestCase):
|
|
20
|
+
"""V1LeaderboardReportResultRelationship unit test stubs"""
|
|
21
|
+
|
|
22
|
+
def setUp(self):
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def tearDown(self):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def make_instance(self, include_optional) -> V1LeaderboardReportResultRelationship:
|
|
29
|
+
"""Test V1LeaderboardReportResultRelationship
|
|
30
|
+
include_option is a boolean, when False only required
|
|
31
|
+
params are included, when True both required and
|
|
32
|
+
optional params are included """
|
|
33
|
+
# uncomment below to create an instance of `V1LeaderboardReportResultRelationship`
|
|
34
|
+
"""
|
|
35
|
+
model = V1LeaderboardReportResultRelationship()
|
|
36
|
+
if include_optional:
|
|
37
|
+
return V1LeaderboardReportResultRelationship(
|
|
38
|
+
type = '',
|
|
39
|
+
target = '',
|
|
40
|
+
target_type = ''
|
|
41
|
+
)
|
|
42
|
+
else:
|
|
43
|
+
return V1LeaderboardReportResultRelationship(
|
|
44
|
+
)
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def testV1LeaderboardReportResultRelationship(self):
|
|
48
|
+
"""Test V1LeaderboardReportResultRelationship"""
|
|
49
|
+
# inst_req_only = self.make_instance(include_optional=False)
|
|
50
|
+
# inst_req_and_optional = self.make_instance(include_optional=True)
|
|
51
|
+
|
|
52
|
+
if __name__ == '__main__':
|
|
53
|
+
unittest.main()
|