eval-studio-client 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/__about__.py +1 -0
- eval_studio_client/__init__.py +4 -0
- eval_studio_client/api/__init__.py +180 -0
- eval_studio_client/api/api/__init__.py +20 -0
- eval_studio_client/api/api/dashboard_service_api.py +2142 -0
- eval_studio_client/api/api/document_service_api.py +1868 -0
- eval_studio_client/api/api/evaluation_service_api.py +1603 -0
- eval_studio_client/api/api/evaluator_service_api.py +1343 -0
- eval_studio_client/api/api/info_service_api.py +275 -0
- eval_studio_client/api/api/leaderboard_service_api.py +3336 -0
- eval_studio_client/api/api/model_service_api.py +2913 -0
- eval_studio_client/api/api/operation_progress_service_api.py +292 -0
- eval_studio_client/api/api/operation_service_api.py +1359 -0
- eval_studio_client/api/api/perturbation_service_api.py +321 -0
- eval_studio_client/api/api/perturbator_service_api.py +532 -0
- eval_studio_client/api/api/test_case_service_api.py +1913 -0
- eval_studio_client/api/api/test_class_service_api.py +532 -0
- eval_studio_client/api/api/test_lab_service_api.py +634 -0
- eval_studio_client/api/api/test_service_api.py +2712 -0
- eval_studio_client/api/api/who_am_i_service_api.py +275 -0
- eval_studio_client/api/api_client.py +770 -0
- eval_studio_client/api/api_response.py +21 -0
- eval_studio_client/api/configuration.py +436 -0
- eval_studio_client/api/docs/DashboardServiceApi.md +549 -0
- eval_studio_client/api/docs/DocumentServiceApi.md +478 -0
- eval_studio_client/api/docs/EvaluationServiceApi.md +332 -0
- eval_studio_client/api/docs/EvaluatorServiceApi.md +345 -0
- eval_studio_client/api/docs/InfoServiceApi.md +71 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +835 -0
- eval_studio_client/api/docs/ModelServiceApi.md +750 -0
- eval_studio_client/api/docs/OperationProgressServiceApi.md +75 -0
- eval_studio_client/api/docs/OperationServiceApi.md +345 -0
- eval_studio_client/api/docs/PerturbationServiceApi.md +78 -0
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +31 -0
- eval_studio_client/api/docs/PerturbatorServiceApi.md +138 -0
- eval_studio_client/api/docs/ProtobufAny.md +30 -0
- eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +41 -0
- eval_studio_client/api/docs/RequiredTheDocumentToUpdate.md +38 -0
- eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +54 -0
- eval_studio_client/api/docs/RequiredTheModelToUpdate.md +41 -0
- eval_studio_client/api/docs/RequiredTheOperationToFinalize.md +39 -0
- eval_studio_client/api/docs/RequiredTheOperationToUpdate.md +39 -0
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +39 -0
- eval_studio_client/api/docs/RequiredTheTestToUpdate.md +39 -0
- eval_studio_client/api/docs/RpcStatus.md +32 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +486 -0
- eval_studio_client/api/docs/TestCaseServiceBatchDeleteTestCasesRequest.md +29 -0
- eval_studio_client/api/docs/TestClassServiceApi.md +138 -0
- eval_studio_client/api/docs/TestLabServiceApi.md +151 -0
- eval_studio_client/api/docs/TestServiceApi.md +689 -0
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +31 -0
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsRequest.md +31 -0
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsRequest.md +30 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteTestsRequest.md +30 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchImportLeaderboardRequest.md +37 -0
- eval_studio_client/api/docs/V1alphaBatchImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchImportTestsRequest.md +32 -0
- eval_studio_client/api/docs/V1alphaBatchImportTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCheckBaseModelsResponse.md +30 -0
- eval_studio_client/api/docs/V1alphaCollectionInfo.md +33 -0
- eval_studio_client/api/docs/V1alphaCreateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateEvaluationRequest.md +37 -0
- eval_studio_client/api/docs/V1alphaCreateEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateLeaderboardRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateLeaderboardWithoutCacheResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreatePerturbationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDashboard.md +41 -0
- eval_studio_client/api/docs/V1alphaDashboardStatus.md +12 -0
- eval_studio_client/api/docs/V1alphaDeleteDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDocument.md +38 -0
- eval_studio_client/api/docs/V1alphaEvaluationTest.md +32 -0
- eval_studio_client/api/docs/V1alphaEvaluator.md +45 -0
- eval_studio_client/api/docs/V1alphaEvaluatorParamType.md +12 -0
- eval_studio_client/api/docs/V1alphaEvaluatorParameter.md +40 -0
- eval_studio_client/api/docs/V1alphaEvaluatorView.md +12 -0
- eval_studio_client/api/docs/V1alphaFinalizeOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaFindAllTestCasesByIDResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaFindTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetInfoResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetOperationProgressByParentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetPerturbatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetTestClassResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaImportEvaluationRequest.md +33 -0
- eval_studio_client/api/docs/V1alphaImportLeaderboardRequest.md +37 -0
- eval_studio_client/api/docs/V1alphaImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaInfo.md +35 -0
- eval_studio_client/api/docs/V1alphaInsight.md +40 -0
- eval_studio_client/api/docs/V1alphaLeaderboard.md +54 -0
- eval_studio_client/api/docs/V1alphaLeaderboardStatus.md +12 -0
- eval_studio_client/api/docs/V1alphaLeaderboardType.md +12 -0
- eval_studio_client/api/docs/V1alphaLeaderboardView.md +12 -0
- eval_studio_client/api/docs/V1alphaListBaseModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListLLMModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListLeaderboardsResponse.md +30 -0
- eval_studio_client/api/docs/V1alphaListModelCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListPerturbatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListRAGCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListTestClassesResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaModel.md +42 -0
- eval_studio_client/api/docs/V1alphaModelType.md +12 -0
- eval_studio_client/api/docs/V1alphaOperation.md +40 -0
- eval_studio_client/api/docs/V1alphaOperationProgress.md +32 -0
- eval_studio_client/api/docs/V1alphaPerturbTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaPerturbator.md +39 -0
- eval_studio_client/api/docs/V1alphaPerturbatorConfiguration.md +32 -0
- eval_studio_client/api/docs/V1alphaPerturbatorIntensity.md +11 -0
- eval_studio_client/api/docs/V1alphaProblemAndAction.md +39 -0
- eval_studio_client/api/docs/V1alphaTest.md +40 -0
- eval_studio_client/api/docs/V1alphaTestCase.md +40 -0
- eval_studio_client/api/docs/V1alphaTestCaseRelationship.md +31 -0
- eval_studio_client/api/docs/V1alphaTestClass.md +41 -0
- eval_studio_client/api/docs/V1alphaTestClassType.md +12 -0
- eval_studio_client/api/docs/V1alphaTestLab.md +41 -0
- eval_studio_client/api/docs/V1alphaUpdateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaWhoAmIResponse.md +31 -0
- eval_studio_client/api/docs/WhoAmIServiceApi.md +72 -0
- eval_studio_client/api/exceptions.py +199 -0
- eval_studio_client/api/models/__init__.py +148 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +115 -0
- eval_studio_client/api/models/protobuf_any.py +100 -0
- eval_studio_client/api/models/required_the_dashboard_to_update.py +127 -0
- eval_studio_client/api/models/required_the_document_to_update.py +116 -0
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +178 -0
- eval_studio_client/api/models/required_the_model_to_update.py +127 -0
- eval_studio_client/api/models/required_the_operation_to_finalize.py +129 -0
- eval_studio_client/api/models/required_the_operation_to_update.py +129 -0
- eval_studio_client/api/models/required_the_test_case_to_update.py +120 -0
- eval_studio_client/api/models/required_the_test_to_update.py +122 -0
- eval_studio_client/api/models/rpc_status.py +99 -0
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +87 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +99 -0
- eval_studio_client/api/models/v1alpha_batch_create_leaderboards_request.py +99 -0
- eval_studio_client/api/models/v1alpha_batch_create_leaderboards_response.py +91 -0
- eval_studio_client/api/models/v1alpha_batch_delete_dashboards_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_documents_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_documents_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_evaluators_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_evaluators_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_leaderboards_request.py +90 -0
- eval_studio_client/api/models/v1alpha_batch_delete_leaderboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_models_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_test_cases_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_tests_request.py +89 -0
- eval_studio_client/api/models/v1alpha_batch_delete_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_documents_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_leaderboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_operations_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_import_leaderboard_request.py +104 -0
- eval_studio_client/api/models/v1alpha_batch_import_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_batch_import_tests_request.py +93 -0
- eval_studio_client/api/models/v1alpha_batch_import_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_check_base_models_response.py +89 -0
- eval_studio_client/api/models/v1alpha_collection_info.py +93 -0
- eval_studio_client/api/models/v1alpha_create_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_evaluation_request.py +115 -0
- eval_studio_client/api/models/v1alpha_create_evaluator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_leaderboard_request.py +91 -0
- eval_studio_client/api/models/v1alpha_create_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_leaderboard_without_cache_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_perturbation_response.py +87 -0
- eval_studio_client/api/models/v1alpha_create_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_test_lab_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_dashboard.py +131 -0
- eval_studio_client/api/models/v1alpha_dashboard_status.py +39 -0
- eval_studio_client/api/models/v1alpha_delete_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_evaluator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_document.py +120 -0
- eval_studio_client/api/models/v1alpha_evaluation_test.py +107 -0
- eval_studio_client/api/models/v1alpha_evaluator.py +155 -0
- eval_studio_client/api/models/v1alpha_evaluator_param_type.py +42 -0
- eval_studio_client/api/models/v1alpha_evaluator_parameter.py +126 -0
- eval_studio_client/api/models/v1alpha_evaluator_view.py +38 -0
- eval_studio_client/api/models/v1alpha_finalize_operation_response.py +91 -0
- eval_studio_client/api/models/v1alpha_find_all_test_cases_by_id_response.py +95 -0
- eval_studio_client/api/models/v1alpha_find_test_lab_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_evaluator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_info_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_operation_progress_by_parent_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_operation_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_perturbator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_test_class_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_import_evaluation_request.py +99 -0
- eval_studio_client/api/models/v1alpha_import_leaderboard_request.py +104 -0
- eval_studio_client/api/models/v1alpha_import_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_info.py +99 -0
- eval_studio_client/api/models/v1alpha_insight.py +107 -0
- eval_studio_client/api/models/v1alpha_leaderboard.py +182 -0
- eval_studio_client/api/models/v1alpha_leaderboard_status.py +39 -0
- eval_studio_client/api/models/v1alpha_leaderboard_type.py +39 -0
- eval_studio_client/api/models/v1alpha_leaderboard_view.py +39 -0
- eval_studio_client/api/models/v1alpha_list_base_models_response.py +87 -0
- eval_studio_client/api/models/v1alpha_list_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_documents_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_evaluators_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_leaderboards_response.py +97 -0
- eval_studio_client/api/models/v1alpha_list_llm_models_response.py +87 -0
- eval_studio_client/api/models/v1alpha_list_model_collections_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_leaderboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_operations_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_perturbators_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_rag_collections_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_test_cases_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_test_classes_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_model.py +131 -0
- eval_studio_client/api/models/v1alpha_model_type.py +46 -0
- eval_studio_client/api/models/v1alpha_operation.py +133 -0
- eval_studio_client/api/models/v1alpha_operation_progress.py +99 -0
- eval_studio_client/api/models/v1alpha_perturb_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_perturbator.py +122 -0
- eval_studio_client/api/models/v1alpha_perturbator_configuration.py +92 -0
- eval_studio_client/api/models/v1alpha_perturbator_intensity.py +39 -0
- eval_studio_client/api/models/v1alpha_problem_and_action.py +129 -0
- eval_studio_client/api/models/v1alpha_test.py +126 -0
- eval_studio_client/api/models/v1alpha_test_case.py +124 -0
- eval_studio_client/api/models/v1alpha_test_case_relationship.py +91 -0
- eval_studio_client/api/models/v1alpha_test_class.py +127 -0
- eval_studio_client/api/models/v1alpha_test_class_type.py +42 -0
- eval_studio_client/api/models/v1alpha_test_lab.py +137 -0
- eval_studio_client/api/models/v1alpha_update_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_operation_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_who_am_i_response.py +91 -0
- eval_studio_client/api/rest.py +257 -0
- eval_studio_client/api/test/__init__.py +0 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +79 -0
- eval_studio_client/api/test/test_document_service_api.py +73 -0
- eval_studio_client/api/test/test_evaluation_service_api.py +55 -0
- eval_studio_client/api/test/test_evaluator_service_api.py +61 -0
- eval_studio_client/api/test/test_info_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +103 -0
- eval_studio_client/api/test/test_model_service_api.py +97 -0
- eval_studio_client/api/test/test_operation_progress_service_api.py +37 -0
- eval_studio_client/api/test/test_operation_service_api.py +61 -0
- eval_studio_client/api/test/test_perturbation_service_api.py +37 -0
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +79 -0
- eval_studio_client/api/test/test_perturbator_service_api.py +43 -0
- eval_studio_client/api/test/test_protobuf_any.py +51 -0
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +64 -0
- eval_studio_client/api/test/test_required_the_document_to_update.py +59 -0
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +115 -0
- eval_studio_client/api/test/test_required_the_model_to_update.py +63 -0
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +71 -0
- eval_studio_client/api/test/test_required_the_operation_to_update.py +71 -0
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +63 -0
- eval_studio_client/api/test/test_required_the_test_to_update.py +65 -0
- eval_studio_client/api/test/test_rpc_status.py +57 -0
- eval_studio_client/api/test/test_test_case_service_api.py +73 -0
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +53 -0
- eval_studio_client/api/test/test_test_class_service_api.py +43 -0
- eval_studio_client/api/test/test_test_lab_service_api.py +43 -0
- eval_studio_client/api/test/test_test_service_api.py +91 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +58 -0
- eval_studio_client/api/test/test_v1alpha_batch_create_leaderboards_request.py +119 -0
- eval_studio_client/api/test/test_v1alpha_batch_create_leaderboards_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_dashboards_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_documents_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_documents_response.py +63 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_evaluators_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_evaluators_response.py +91 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_leaderboards_request.py +54 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_leaderboards_response.py +116 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_models_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_test_cases_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_tests_request.py +54 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_documents_response.py +63 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_leaderboards_response.py +116 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_operations_response.py +73 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_leaderboard_request.py +61 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_leaderboard_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_tests_request.py +54 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_check_base_models_response.py +52 -0
- eval_studio_client/api/test/test_v1alpha_collection_info.py +54 -0
- eval_studio_client/api/test/test_v1alpha_create_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_create_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_create_evaluation_request.py +107 -0
- eval_studio_client/api/test/test_v1alpha_create_evaluator_response.py +89 -0
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_request.py +114 -0
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_without_cache_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_create_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_create_perturbation_response.py +51 -0
- eval_studio_client/api/test/test_v1alpha_create_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_create_test_lab_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_create_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_dashboard.py +65 -0
- eval_studio_client/api/test/test_v1alpha_dashboard_status.py +33 -0
- eval_studio_client/api/test/test_v1alpha_delete_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_delete_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_delete_evaluator_response.py +89 -0
- eval_studio_client/api/test/test_v1alpha_delete_leaderboard_response.py +114 -0
- eval_studio_client/api/test/test_v1alpha_delete_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_delete_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_delete_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_document.py +60 -0
- eval_studio_client/api/test/test_v1alpha_evaluation_test.py +76 -0
- eval_studio_client/api/test/test_v1alpha_evaluator.py +91 -0
- eval_studio_client/api/test/test_v1alpha_evaluator_param_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_evaluator_parameter.py +68 -0
- eval_studio_client/api/test/test_v1alpha_evaluator_view.py +33 -0
- eval_studio_client/api/test/test_v1alpha_finalize_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_find_all_test_cases_by_id_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_find_test_lab_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_get_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_get_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_get_evaluator_response.py +89 -0
- eval_studio_client/api/test/test_v1alpha_get_info_response.py +60 -0
- eval_studio_client/api/test/test_v1alpha_get_leaderboard_response.py +114 -0
- eval_studio_client/api/test/test_v1alpha_get_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_get_operation_progress_by_parent_response.py +55 -0
- eval_studio_client/api/test/test_v1alpha_get_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_get_perturbator_response.py +64 -0
- eval_studio_client/api/test/test_v1alpha_get_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_get_test_class_response.py +70 -0
- eval_studio_client/api/test/test_v1alpha_get_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_import_evaluation_request.py +73 -0
- eval_studio_client/api/test/test_v1alpha_import_leaderboard_request.py +59 -0
- eval_studio_client/api/test/test_v1alpha_import_leaderboard_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_info.py +59 -0
- eval_studio_client/api/test/test_v1alpha_insight.py +67 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard.py +116 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard_status.py +33 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard_view.py +33 -0
- eval_studio_client/api/test/test_v1alpha_list_base_models_response.py +53 -0
- eval_studio_client/api/test/test_v1alpha_list_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_list_documents_response.py +63 -0
- eval_studio_client/api/test/test_v1alpha_list_evaluators_response.py +91 -0
- eval_studio_client/api/test/test_v1alpha_list_leaderboards_response.py +117 -0
- eval_studio_client/api/test/test_v1alpha_list_llm_models_response.py +53 -0
- eval_studio_client/api/test/test_v1alpha_list_model_collections_response.py +57 -0
- eval_studio_client/api/test/test_v1alpha_list_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_leaderboards_response.py +116 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_list_operations_response.py +73 -0
- eval_studio_client/api/test/test_v1alpha_list_perturbators_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_list_rag_collections_response.py +57 -0
- eval_studio_client/api/test/test_v1alpha_list_test_cases_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_list_test_classes_response.py +72 -0
- eval_studio_client/api/test/test_v1alpha_list_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_model.py +64 -0
- eval_studio_client/api/test/test_v1alpha_model_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_operation.py +72 -0
- eval_studio_client/api/test/test_v1alpha_operation_progress.py +54 -0
- eval_studio_client/api/test/test_v1alpha_perturb_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_perturbator.py +63 -0
- eval_studio_client/api/test/test_v1alpha_perturbator_configuration.py +53 -0
- eval_studio_client/api/test/test_v1alpha_perturbator_intensity.py +33 -0
- eval_studio_client/api/test/test_v1alpha_problem_and_action.py +65 -0
- eval_studio_client/api/test/test_v1alpha_test.py +66 -0
- eval_studio_client/api/test/test_v1alpha_test_case.py +64 -0
- eval_studio_client/api/test/test_v1alpha_test_case_relationship.py +53 -0
- eval_studio_client/api/test/test_v1alpha_test_class.py +69 -0
- eval_studio_client/api/test/test_v1alpha_test_class_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_test_lab.py +67 -0
- eval_studio_client/api/test/test_v1alpha_update_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_update_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_update_leaderboard_response.py +114 -0
- eval_studio_client/api/test/test_v1alpha_update_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_update_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_update_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_update_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_who_am_i_response.py +53 -0
- eval_studio_client/api/test/test_who_am_i_service_api.py +38 -0
- eval_studio_client/client.py +98 -0
- eval_studio_client/dashboards.py +187 -0
- eval_studio_client/documents.py +95 -0
- eval_studio_client/evaluators.py +65 -0
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +6043 -0
- eval_studio_client/insights.py +35 -0
- eval_studio_client/leaderboards.py +207 -0
- eval_studio_client/models.py +522 -0
- eval_studio_client/perturbators.py +101 -0
- eval_studio_client/problems.py +50 -0
- eval_studio_client/test_labs.py +319 -0
- eval_studio_client/tests.py +369 -0
- eval_studio_client-0.7.0.dist-info/METADATA +18 -0
- eval_studio_client-0.7.0.dist-info/RECORD +470 -0
- eval_studio_client-0.7.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
22
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class V1alphaDocument(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
V1alphaDocument
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
name: Optional[StrictStr] = None
|
|
31
|
+
create_time: Optional[datetime] = Field(default=None, description="Output only. Timestamp when the Document was created.", alias="createTime")
|
|
32
|
+
creator: Optional[StrictStr] = Field(default=None, description="Output only. Name of the user or service that requested creation of the Document.")
|
|
33
|
+
update_time: Optional[datetime] = Field(default=None, description="Output only. Optional. Timestamp when the Document was last updated.", alias="updateTime")
|
|
34
|
+
updater: Optional[StrictStr] = Field(default=None, description="Output only. Optional. Name of the user or service that requested update of the Document.")
|
|
35
|
+
delete_time: Optional[datetime] = Field(default=None, description="Output only. Optional. Set when the Document is deleted. When set Document should be considered as deleted.", alias="deleteTime")
|
|
36
|
+
deleter: Optional[StrictStr] = Field(default=None, description="Output only. Optional. Name of the user or service that requested deletion of the Document.")
|
|
37
|
+
display_name: Optional[StrictStr] = Field(default=None, description="Human readable name of the Document.", alias="displayName")
|
|
38
|
+
description: Optional[StrictStr] = Field(default=None, description="Optional. Arbitrary description of the Document.")
|
|
39
|
+
url: Optional[StrictStr] = Field(default=None, description="Required. Immutable. Absolute URL where the document can be downloaded.")
|
|
40
|
+
__properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "url"]
|
|
41
|
+
|
|
42
|
+
model_config = ConfigDict(
|
|
43
|
+
populate_by_name=True,
|
|
44
|
+
validate_assignment=True,
|
|
45
|
+
protected_namespaces=(),
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def to_str(self) -> str:
|
|
50
|
+
"""Returns the string representation of the model using alias"""
|
|
51
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
52
|
+
|
|
53
|
+
def to_json(self) -> str:
|
|
54
|
+
"""Returns the JSON representation of the model using alias"""
|
|
55
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
56
|
+
return json.dumps(self.to_dict())
|
|
57
|
+
|
|
58
|
+
@classmethod
|
|
59
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
60
|
+
"""Create an instance of V1alphaDocument from a JSON string"""
|
|
61
|
+
return cls.from_dict(json.loads(json_str))
|
|
62
|
+
|
|
63
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
64
|
+
"""Return the dictionary representation of the model using alias.
|
|
65
|
+
|
|
66
|
+
This has the following differences from calling pydantic's
|
|
67
|
+
`self.model_dump(by_alias=True)`:
|
|
68
|
+
|
|
69
|
+
* `None` is only added to the output dict for nullable fields that
|
|
70
|
+
were set at model initialization. Other fields with value `None`
|
|
71
|
+
are ignored.
|
|
72
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
73
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
74
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
75
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
76
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
77
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
78
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
79
|
+
"""
|
|
80
|
+
excluded_fields: Set[str] = set([
|
|
81
|
+
"name",
|
|
82
|
+
"create_time",
|
|
83
|
+
"creator",
|
|
84
|
+
"update_time",
|
|
85
|
+
"updater",
|
|
86
|
+
"delete_time",
|
|
87
|
+
"deleter",
|
|
88
|
+
])
|
|
89
|
+
|
|
90
|
+
_dict = self.model_dump(
|
|
91
|
+
by_alias=True,
|
|
92
|
+
exclude=excluded_fields,
|
|
93
|
+
exclude_none=True,
|
|
94
|
+
)
|
|
95
|
+
return _dict
|
|
96
|
+
|
|
97
|
+
@classmethod
|
|
98
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
99
|
+
"""Create an instance of V1alphaDocument from a dict"""
|
|
100
|
+
if obj is None:
|
|
101
|
+
return None
|
|
102
|
+
|
|
103
|
+
if not isinstance(obj, dict):
|
|
104
|
+
return cls.model_validate(obj)
|
|
105
|
+
|
|
106
|
+
_obj = cls.model_validate({
|
|
107
|
+
"name": obj.get("name"),
|
|
108
|
+
"createTime": obj.get("createTime"),
|
|
109
|
+
"creator": obj.get("creator"),
|
|
110
|
+
"updateTime": obj.get("updateTime"),
|
|
111
|
+
"updater": obj.get("updater"),
|
|
112
|
+
"deleteTime": obj.get("deleteTime"),
|
|
113
|
+
"deleter": obj.get("deleter"),
|
|
114
|
+
"displayName": obj.get("displayName"),
|
|
115
|
+
"description": obj.get("description"),
|
|
116
|
+
"url": obj.get("url")
|
|
117
|
+
})
|
|
118
|
+
return _obj
|
|
119
|
+
|
|
120
|
+
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1alpha_test_case import V1alphaTestCase
|
|
23
|
+
from eval_studio_client.api.models.v1alpha_test_case_relationship import V1alphaTestCaseRelationship
|
|
24
|
+
from typing import Optional, Set
|
|
25
|
+
from typing_extensions import Self
|
|
26
|
+
|
|
27
|
+
class V1alphaEvaluationTest(BaseModel):
|
|
28
|
+
"""
|
|
29
|
+
EvaluationTest defines a single test in a suite, with materialized corpus (documents) and test cases.
|
|
30
|
+
""" # noqa: E501
|
|
31
|
+
document_urls: Optional[List[StrictStr]] = Field(default=None, description="Optional. List of documents that create the context of the test.", alias="documentUrls")
|
|
32
|
+
test_cases: Optional[List[V1alphaTestCase]] = Field(default=None, description="Required. The test cases to run.", alias="testCases")
|
|
33
|
+
test_case_relationships: Optional[List[V1alphaTestCaseRelationship]] = Field(default=None, description="Optional. List of relationships between test cases.", alias="testCaseRelationships")
|
|
34
|
+
__properties: ClassVar[List[str]] = ["documentUrls", "testCases", "testCaseRelationships"]
|
|
35
|
+
|
|
36
|
+
model_config = ConfigDict(
|
|
37
|
+
populate_by_name=True,
|
|
38
|
+
validate_assignment=True,
|
|
39
|
+
protected_namespaces=(),
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def to_str(self) -> str:
|
|
44
|
+
"""Returns the string representation of the model using alias"""
|
|
45
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
46
|
+
|
|
47
|
+
def to_json(self) -> str:
|
|
48
|
+
"""Returns the JSON representation of the model using alias"""
|
|
49
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
50
|
+
return json.dumps(self.to_dict())
|
|
51
|
+
|
|
52
|
+
@classmethod
|
|
53
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
54
|
+
"""Create an instance of V1alphaEvaluationTest from a JSON string"""
|
|
55
|
+
return cls.from_dict(json.loads(json_str))
|
|
56
|
+
|
|
57
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
58
|
+
"""Return the dictionary representation of the model using alias.
|
|
59
|
+
|
|
60
|
+
This has the following differences from calling pydantic's
|
|
61
|
+
`self.model_dump(by_alias=True)`:
|
|
62
|
+
|
|
63
|
+
* `None` is only added to the output dict for nullable fields that
|
|
64
|
+
were set at model initialization. Other fields with value `None`
|
|
65
|
+
are ignored.
|
|
66
|
+
"""
|
|
67
|
+
excluded_fields: Set[str] = set([
|
|
68
|
+
])
|
|
69
|
+
|
|
70
|
+
_dict = self.model_dump(
|
|
71
|
+
by_alias=True,
|
|
72
|
+
exclude=excluded_fields,
|
|
73
|
+
exclude_none=True,
|
|
74
|
+
)
|
|
75
|
+
# override the default output from pydantic by calling `to_dict()` of each item in test_cases (list)
|
|
76
|
+
_items = []
|
|
77
|
+
if self.test_cases:
|
|
78
|
+
for _item in self.test_cases:
|
|
79
|
+
if _item:
|
|
80
|
+
_items.append(_item.to_dict())
|
|
81
|
+
_dict['testCases'] = _items
|
|
82
|
+
# override the default output from pydantic by calling `to_dict()` of each item in test_case_relationships (list)
|
|
83
|
+
_items = []
|
|
84
|
+
if self.test_case_relationships:
|
|
85
|
+
for _item in self.test_case_relationships:
|
|
86
|
+
if _item:
|
|
87
|
+
_items.append(_item.to_dict())
|
|
88
|
+
_dict['testCaseRelationships'] = _items
|
|
89
|
+
return _dict
|
|
90
|
+
|
|
91
|
+
@classmethod
|
|
92
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
93
|
+
"""Create an instance of V1alphaEvaluationTest from a dict"""
|
|
94
|
+
if obj is None:
|
|
95
|
+
return None
|
|
96
|
+
|
|
97
|
+
if not isinstance(obj, dict):
|
|
98
|
+
return cls.model_validate(obj)
|
|
99
|
+
|
|
100
|
+
_obj = cls.model_validate({
|
|
101
|
+
"documentUrls": obj.get("documentUrls"),
|
|
102
|
+
"testCases": [V1alphaTestCase.from_dict(_item) for _item in obj["testCases"]] if obj.get("testCases") is not None else None,
|
|
103
|
+
"testCaseRelationships": [V1alphaTestCaseRelationship.from_dict(_item) for _item in obj["testCaseRelationships"]] if obj.get("testCaseRelationships") is not None else None
|
|
104
|
+
})
|
|
105
|
+
return _obj
|
|
106
|
+
|
|
107
|
+
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr, field_validator
|
|
22
|
+
from typing import Any, ClassVar, Dict, List, Optional, Union
|
|
23
|
+
from typing_extensions import Annotated
|
|
24
|
+
from eval_studio_client.api.models.v1alpha_evaluator_parameter import V1alphaEvaluatorParameter
|
|
25
|
+
from typing import Optional, Set
|
|
26
|
+
from typing_extensions import Self
|
|
27
|
+
|
|
28
|
+
class V1alphaEvaluator(BaseModel):
|
|
29
|
+
"""
|
|
30
|
+
V1alphaEvaluator
|
|
31
|
+
""" # noqa: E501
|
|
32
|
+
name: Optional[StrictStr] = None
|
|
33
|
+
create_time: Optional[datetime] = Field(default=None, description="Output only. Timestamp when the Evaluator was created.", alias="createTime")
|
|
34
|
+
creator: Optional[StrictStr] = Field(default=None, description="Output only. Name of the user or service that requested creation of the Evaluator.")
|
|
35
|
+
update_time: Optional[datetime] = Field(default=None, description="Output only. Optional. Timestamp when the Evaluator was last updated.", alias="updateTime")
|
|
36
|
+
updater: Optional[StrictStr] = Field(default=None, description="Output only. Optional. Name of the user or service that requested update of the Evaluator.")
|
|
37
|
+
delete_time: Optional[datetime] = Field(default=None, description="Output only. Optional. Set when the Evaluator is deleted. When set Evaluator should be considered as deleted.", alias="deleteTime")
|
|
38
|
+
deleter: Optional[StrictStr] = Field(default=None, description="Output only. Optional. Name of the user or service that requested deletion of the Evaluator.")
|
|
39
|
+
display_name: Optional[StrictStr] = Field(default=None, description="Human readable name of the Evaluator.", alias="displayName")
|
|
40
|
+
description: Optional[StrictStr] = Field(default=None, description="Optional. Arbitrary description of the Evaluator.")
|
|
41
|
+
content: Optional[Union[Annotated[bytes, Field(strict=True)], Annotated[str, Field(strict=True)]]] = Field(default=None, description="Base64 encoded Evaluator implementation.")
|
|
42
|
+
mime_type: Optional[StrictStr] = Field(default=None, description="MIME type of the Evaluator implementation, e.g.: \"text/x-python\" or \"application/zip\".", alias="mimeType")
|
|
43
|
+
filename: Optional[StrictStr] = Field(default=None, description="Filename of the Evaluator implementation, e.g.: \"evaluator.py\" or \"evaluator.zip\".")
|
|
44
|
+
identifier: Optional[StrictStr] = Field(default=None, description="Well known identifier of the Evaluator implementation.")
|
|
45
|
+
tags: Optional[List[StrictStr]] = Field(default=None, description="Optional. Tags or other identifiers of the Evaluator.")
|
|
46
|
+
parameters: Optional[List[V1alphaEvaluatorParameter]] = Field(default=None, description="Optional. Additional parameters of the Evaluator.")
|
|
47
|
+
brief_description: Optional[StrictStr] = Field(default=None, description="Optional. Short preview of the Evaluator's description.", alias="briefDescription")
|
|
48
|
+
enabled: Optional[StrictBool] = Field(default=None, description="Output only. Whether this Evaluator can be used for creating evaluations. Evaluator might be disabled because it has some external requirements that are not met.")
|
|
49
|
+
__properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "content", "mimeType", "filename", "identifier", "tags", "parameters", "briefDescription", "enabled"]
|
|
50
|
+
|
|
51
|
+
@field_validator('content')
|
|
52
|
+
def content_validate_regular_expression(cls, value):
|
|
53
|
+
"""Validates the regular expression"""
|
|
54
|
+
if value is None:
|
|
55
|
+
return value
|
|
56
|
+
|
|
57
|
+
if not re.match(r"^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$", value):
|
|
58
|
+
raise ValueError(r"must validate the regular expression /^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/")
|
|
59
|
+
return value
|
|
60
|
+
|
|
61
|
+
model_config = ConfigDict(
|
|
62
|
+
populate_by_name=True,
|
|
63
|
+
validate_assignment=True,
|
|
64
|
+
protected_namespaces=(),
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def to_str(self) -> str:
|
|
69
|
+
"""Returns the string representation of the model using alias"""
|
|
70
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
71
|
+
|
|
72
|
+
def to_json(self) -> str:
|
|
73
|
+
"""Returns the JSON representation of the model using alias"""
|
|
74
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
75
|
+
return json.dumps(self.to_dict())
|
|
76
|
+
|
|
77
|
+
@classmethod
|
|
78
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
79
|
+
"""Create an instance of V1alphaEvaluator from a JSON string"""
|
|
80
|
+
return cls.from_dict(json.loads(json_str))
|
|
81
|
+
|
|
82
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
83
|
+
"""Return the dictionary representation of the model using alias.
|
|
84
|
+
|
|
85
|
+
This has the following differences from calling pydantic's
|
|
86
|
+
`self.model_dump(by_alias=True)`:
|
|
87
|
+
|
|
88
|
+
* `None` is only added to the output dict for nullable fields that
|
|
89
|
+
were set at model initialization. Other fields with value `None`
|
|
90
|
+
are ignored.
|
|
91
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
92
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
93
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
94
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
95
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
96
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
97
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
98
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
99
|
+
"""
|
|
100
|
+
excluded_fields: Set[str] = set([
|
|
101
|
+
"name",
|
|
102
|
+
"create_time",
|
|
103
|
+
"creator",
|
|
104
|
+
"update_time",
|
|
105
|
+
"updater",
|
|
106
|
+
"delete_time",
|
|
107
|
+
"deleter",
|
|
108
|
+
"enabled",
|
|
109
|
+
])
|
|
110
|
+
|
|
111
|
+
_dict = self.model_dump(
|
|
112
|
+
by_alias=True,
|
|
113
|
+
exclude=excluded_fields,
|
|
114
|
+
exclude_none=True,
|
|
115
|
+
)
|
|
116
|
+
# override the default output from pydantic by calling `to_dict()` of each item in parameters (list)
|
|
117
|
+
_items = []
|
|
118
|
+
if self.parameters:
|
|
119
|
+
for _item in self.parameters:
|
|
120
|
+
if _item:
|
|
121
|
+
_items.append(_item.to_dict())
|
|
122
|
+
_dict['parameters'] = _items
|
|
123
|
+
return _dict
|
|
124
|
+
|
|
125
|
+
@classmethod
|
|
126
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
127
|
+
"""Create an instance of V1alphaEvaluator from a dict"""
|
|
128
|
+
if obj is None:
|
|
129
|
+
return None
|
|
130
|
+
|
|
131
|
+
if not isinstance(obj, dict):
|
|
132
|
+
return cls.model_validate(obj)
|
|
133
|
+
|
|
134
|
+
_obj = cls.model_validate({
|
|
135
|
+
"name": obj.get("name"),
|
|
136
|
+
"createTime": obj.get("createTime"),
|
|
137
|
+
"creator": obj.get("creator"),
|
|
138
|
+
"updateTime": obj.get("updateTime"),
|
|
139
|
+
"updater": obj.get("updater"),
|
|
140
|
+
"deleteTime": obj.get("deleteTime"),
|
|
141
|
+
"deleter": obj.get("deleter"),
|
|
142
|
+
"displayName": obj.get("displayName"),
|
|
143
|
+
"description": obj.get("description"),
|
|
144
|
+
"content": obj.get("content"),
|
|
145
|
+
"mimeType": obj.get("mimeType"),
|
|
146
|
+
"filename": obj.get("filename"),
|
|
147
|
+
"identifier": obj.get("identifier"),
|
|
148
|
+
"tags": obj.get("tags"),
|
|
149
|
+
"parameters": [V1alphaEvaluatorParameter.from_dict(_item) for _item in obj["parameters"]] if obj.get("parameters") is not None else None,
|
|
150
|
+
"briefDescription": obj.get("briefDescription"),
|
|
151
|
+
"enabled": obj.get("enabled")
|
|
152
|
+
})
|
|
153
|
+
return _obj
|
|
154
|
+
|
|
155
|
+
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import json
|
|
17
|
+
from enum import Enum
|
|
18
|
+
from typing_extensions import Self
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class V1alphaEvaluatorParamType(str, Enum):
|
|
22
|
+
"""
|
|
23
|
+
- EVALUATOR_PARAM_TYPE_BOOL: Boolean parameter type. - EVALUATOR_PARAM_TYPE_INT: Integer parameter type. - EVALUATOR_PARAM_TYPE_FLOAT: Float parameter type. - EVALUATOR_PARAM_TYPE_STR: String parameter type. - EVALUATOR_PARAM_TYPE_LIST: List parameter type. - EVALUATOR_PARAM_TYPE_DICT: Dict parameter type.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
"""
|
|
27
|
+
allowed enum values
|
|
28
|
+
"""
|
|
29
|
+
EVALUATOR_PARAM_TYPE_UNSPECIFIED = 'EVALUATOR_PARAM_TYPE_UNSPECIFIED'
|
|
30
|
+
EVALUATOR_PARAM_TYPE_BOOL = 'EVALUATOR_PARAM_TYPE_BOOL'
|
|
31
|
+
EVALUATOR_PARAM_TYPE_INT = 'EVALUATOR_PARAM_TYPE_INT'
|
|
32
|
+
EVALUATOR_PARAM_TYPE_FLOAT = 'EVALUATOR_PARAM_TYPE_FLOAT'
|
|
33
|
+
EVALUATOR_PARAM_TYPE_STR = 'EVALUATOR_PARAM_TYPE_STR'
|
|
34
|
+
EVALUATOR_PARAM_TYPE_LIST = 'EVALUATOR_PARAM_TYPE_LIST'
|
|
35
|
+
EVALUATOR_PARAM_TYPE_DICT = 'EVALUATOR_PARAM_TYPE_DICT'
|
|
36
|
+
|
|
37
|
+
@classmethod
|
|
38
|
+
def from_json(cls, json_str: str) -> Self:
|
|
39
|
+
"""Create an instance of V1alphaEvaluatorParamType from a JSON string"""
|
|
40
|
+
return cls(json.loads(json_str))
|
|
41
|
+
|
|
42
|
+
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictFloat, StrictInt, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional, Union
|
|
22
|
+
from eval_studio_client.api.models.v1alpha_evaluator_param_type import V1alphaEvaluatorParamType
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class V1alphaEvaluatorParameter(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
V1alphaEvaluatorParameter
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
name: Optional[StrictStr] = Field(default=None, description="Output only. Parameter name.")
|
|
31
|
+
type: Optional[V1alphaEvaluatorParamType] = None
|
|
32
|
+
description: Optional[StrictStr] = Field(default=None, description="Output only. Parameter description.")
|
|
33
|
+
comment: Optional[StrictStr] = Field(default=None, description="Output only. Parameter comment.")
|
|
34
|
+
string_val: Optional[StrictStr] = Field(default=None, alias="stringVal")
|
|
35
|
+
float_val: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, alias="floatVal")
|
|
36
|
+
bool_val: Optional[StrictBool] = Field(default=None, alias="boolVal")
|
|
37
|
+
min: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Output only. Minimum value.")
|
|
38
|
+
max: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Output only. Maximum value.")
|
|
39
|
+
predefined: Optional[List[StrictStr]] = Field(default=None, description="Output only. Optional. Predefined values.")
|
|
40
|
+
tags: Optional[List[StrictStr]] = Field(default=None, description="Output only. Optional. Tags or other identifiers of the parameter.")
|
|
41
|
+
category: Optional[List[StrictStr]] = Field(default=None, description="Output only. Category of the parameter.")
|
|
42
|
+
__properties: ClassVar[List[str]] = ["name", "type", "description", "comment", "stringVal", "floatVal", "boolVal", "min", "max", "predefined", "tags", "category"]
|
|
43
|
+
|
|
44
|
+
model_config = ConfigDict(
|
|
45
|
+
populate_by_name=True,
|
|
46
|
+
validate_assignment=True,
|
|
47
|
+
protected_namespaces=(),
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def to_str(self) -> str:
|
|
52
|
+
"""Returns the string representation of the model using alias"""
|
|
53
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
54
|
+
|
|
55
|
+
def to_json(self) -> str:
|
|
56
|
+
"""Returns the JSON representation of the model using alias"""
|
|
57
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
58
|
+
return json.dumps(self.to_dict())
|
|
59
|
+
|
|
60
|
+
@classmethod
|
|
61
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
62
|
+
"""Create an instance of V1alphaEvaluatorParameter from a JSON string"""
|
|
63
|
+
return cls.from_dict(json.loads(json_str))
|
|
64
|
+
|
|
65
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
66
|
+
"""Return the dictionary representation of the model using alias.
|
|
67
|
+
|
|
68
|
+
This has the following differences from calling pydantic's
|
|
69
|
+
`self.model_dump(by_alias=True)`:
|
|
70
|
+
|
|
71
|
+
* `None` is only added to the output dict for nullable fields that
|
|
72
|
+
were set at model initialization. Other fields with value `None`
|
|
73
|
+
are ignored.
|
|
74
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
75
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
76
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
77
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
78
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
79
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
80
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
81
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
82
|
+
"""
|
|
83
|
+
excluded_fields: Set[str] = set([
|
|
84
|
+
"name",
|
|
85
|
+
"description",
|
|
86
|
+
"comment",
|
|
87
|
+
"min",
|
|
88
|
+
"max",
|
|
89
|
+
"predefined",
|
|
90
|
+
"tags",
|
|
91
|
+
"category",
|
|
92
|
+
])
|
|
93
|
+
|
|
94
|
+
_dict = self.model_dump(
|
|
95
|
+
by_alias=True,
|
|
96
|
+
exclude=excluded_fields,
|
|
97
|
+
exclude_none=True,
|
|
98
|
+
)
|
|
99
|
+
return _dict
|
|
100
|
+
|
|
101
|
+
@classmethod
|
|
102
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
103
|
+
"""Create an instance of V1alphaEvaluatorParameter from a dict"""
|
|
104
|
+
if obj is None:
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
if not isinstance(obj, dict):
|
|
108
|
+
return cls.model_validate(obj)
|
|
109
|
+
|
|
110
|
+
_obj = cls.model_validate({
|
|
111
|
+
"name": obj.get("name"),
|
|
112
|
+
"type": obj.get("type"),
|
|
113
|
+
"description": obj.get("description"),
|
|
114
|
+
"comment": obj.get("comment"),
|
|
115
|
+
"stringVal": obj.get("stringVal"),
|
|
116
|
+
"floatVal": obj.get("floatVal"),
|
|
117
|
+
"boolVal": obj.get("boolVal"),
|
|
118
|
+
"min": obj.get("min"),
|
|
119
|
+
"max": obj.get("max"),
|
|
120
|
+
"predefined": obj.get("predefined"),
|
|
121
|
+
"tags": obj.get("tags"),
|
|
122
|
+
"category": obj.get("category")
|
|
123
|
+
})
|
|
124
|
+
return _obj
|
|
125
|
+
|
|
126
|
+
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import json
|
|
17
|
+
from enum import Enum
|
|
18
|
+
from typing_extensions import Self
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class V1alphaEvaluatorView(str, Enum):
|
|
22
|
+
"""
|
|
23
|
+
EvaluatorView specifies the amount of information included in the Evaluator's description. - EVALUATOR_VIEW_UNSPECIFIED: The default / unset value. The API will default to the EVALUATOR_VIEW_BRIEF. - EVALUATOR_VIEW_BRIEF: Brief view of the Evaluator, which doesn't include the long description, only the brief one. - EVALUATOR_VIEW_FULL: Full view of the evaluator, including brief and full description.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
"""
|
|
27
|
+
allowed enum values
|
|
28
|
+
"""
|
|
29
|
+
EVALUATOR_VIEW_UNSPECIFIED = 'EVALUATOR_VIEW_UNSPECIFIED'
|
|
30
|
+
EVALUATOR_VIEW_BRIEF = 'EVALUATOR_VIEW_BRIEF'
|
|
31
|
+
EVALUATOR_VIEW_FULL = 'EVALUATOR_VIEW_FULL'
|
|
32
|
+
|
|
33
|
+
@classmethod
|
|
34
|
+
def from_json(cls, json_str: str) -> Self:
|
|
35
|
+
"""Create an instance of V1alphaEvaluatorView from a JSON string"""
|
|
36
|
+
return cls(json.loads(json_str))
|
|
37
|
+
|
|
38
|
+
|