eval-studio-client 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/__about__.py +1 -0
- eval_studio_client/__init__.py +4 -0
- eval_studio_client/api/__init__.py +180 -0
- eval_studio_client/api/api/__init__.py +20 -0
- eval_studio_client/api/api/dashboard_service_api.py +2142 -0
- eval_studio_client/api/api/document_service_api.py +1868 -0
- eval_studio_client/api/api/evaluation_service_api.py +1603 -0
- eval_studio_client/api/api/evaluator_service_api.py +1343 -0
- eval_studio_client/api/api/info_service_api.py +275 -0
- eval_studio_client/api/api/leaderboard_service_api.py +3336 -0
- eval_studio_client/api/api/model_service_api.py +2913 -0
- eval_studio_client/api/api/operation_progress_service_api.py +292 -0
- eval_studio_client/api/api/operation_service_api.py +1359 -0
- eval_studio_client/api/api/perturbation_service_api.py +321 -0
- eval_studio_client/api/api/perturbator_service_api.py +532 -0
- eval_studio_client/api/api/test_case_service_api.py +1913 -0
- eval_studio_client/api/api/test_class_service_api.py +532 -0
- eval_studio_client/api/api/test_lab_service_api.py +634 -0
- eval_studio_client/api/api/test_service_api.py +2712 -0
- eval_studio_client/api/api/who_am_i_service_api.py +275 -0
- eval_studio_client/api/api_client.py +770 -0
- eval_studio_client/api/api_response.py +21 -0
- eval_studio_client/api/configuration.py +436 -0
- eval_studio_client/api/docs/DashboardServiceApi.md +549 -0
- eval_studio_client/api/docs/DocumentServiceApi.md +478 -0
- eval_studio_client/api/docs/EvaluationServiceApi.md +332 -0
- eval_studio_client/api/docs/EvaluatorServiceApi.md +345 -0
- eval_studio_client/api/docs/InfoServiceApi.md +71 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +835 -0
- eval_studio_client/api/docs/ModelServiceApi.md +750 -0
- eval_studio_client/api/docs/OperationProgressServiceApi.md +75 -0
- eval_studio_client/api/docs/OperationServiceApi.md +345 -0
- eval_studio_client/api/docs/PerturbationServiceApi.md +78 -0
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +31 -0
- eval_studio_client/api/docs/PerturbatorServiceApi.md +138 -0
- eval_studio_client/api/docs/ProtobufAny.md +30 -0
- eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +41 -0
- eval_studio_client/api/docs/RequiredTheDocumentToUpdate.md +38 -0
- eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +54 -0
- eval_studio_client/api/docs/RequiredTheModelToUpdate.md +41 -0
- eval_studio_client/api/docs/RequiredTheOperationToFinalize.md +39 -0
- eval_studio_client/api/docs/RequiredTheOperationToUpdate.md +39 -0
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +39 -0
- eval_studio_client/api/docs/RequiredTheTestToUpdate.md +39 -0
- eval_studio_client/api/docs/RpcStatus.md +32 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +486 -0
- eval_studio_client/api/docs/TestCaseServiceBatchDeleteTestCasesRequest.md +29 -0
- eval_studio_client/api/docs/TestClassServiceApi.md +138 -0
- eval_studio_client/api/docs/TestLabServiceApi.md +151 -0
- eval_studio_client/api/docs/TestServiceApi.md +689 -0
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +31 -0
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsRequest.md +31 -0
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsRequest.md +30 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteTestsRequest.md +30 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchImportLeaderboardRequest.md +37 -0
- eval_studio_client/api/docs/V1alphaBatchImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchImportTestsRequest.md +32 -0
- eval_studio_client/api/docs/V1alphaBatchImportTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCheckBaseModelsResponse.md +30 -0
- eval_studio_client/api/docs/V1alphaCollectionInfo.md +33 -0
- eval_studio_client/api/docs/V1alphaCreateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateEvaluationRequest.md +37 -0
- eval_studio_client/api/docs/V1alphaCreateEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateLeaderboardRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateLeaderboardWithoutCacheResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreatePerturbationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDashboard.md +41 -0
- eval_studio_client/api/docs/V1alphaDashboardStatus.md +12 -0
- eval_studio_client/api/docs/V1alphaDeleteDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDocument.md +38 -0
- eval_studio_client/api/docs/V1alphaEvaluationTest.md +32 -0
- eval_studio_client/api/docs/V1alphaEvaluator.md +45 -0
- eval_studio_client/api/docs/V1alphaEvaluatorParamType.md +12 -0
- eval_studio_client/api/docs/V1alphaEvaluatorParameter.md +40 -0
- eval_studio_client/api/docs/V1alphaEvaluatorView.md +12 -0
- eval_studio_client/api/docs/V1alphaFinalizeOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaFindAllTestCasesByIDResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaFindTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetInfoResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetOperationProgressByParentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetPerturbatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetTestClassResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaImportEvaluationRequest.md +33 -0
- eval_studio_client/api/docs/V1alphaImportLeaderboardRequest.md +37 -0
- eval_studio_client/api/docs/V1alphaImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaInfo.md +35 -0
- eval_studio_client/api/docs/V1alphaInsight.md +40 -0
- eval_studio_client/api/docs/V1alphaLeaderboard.md +54 -0
- eval_studio_client/api/docs/V1alphaLeaderboardStatus.md +12 -0
- eval_studio_client/api/docs/V1alphaLeaderboardType.md +12 -0
- eval_studio_client/api/docs/V1alphaLeaderboardView.md +12 -0
- eval_studio_client/api/docs/V1alphaListBaseModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListLLMModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListLeaderboardsResponse.md +30 -0
- eval_studio_client/api/docs/V1alphaListModelCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListPerturbatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListRAGCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListTestClassesResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaModel.md +42 -0
- eval_studio_client/api/docs/V1alphaModelType.md +12 -0
- eval_studio_client/api/docs/V1alphaOperation.md +40 -0
- eval_studio_client/api/docs/V1alphaOperationProgress.md +32 -0
- eval_studio_client/api/docs/V1alphaPerturbTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaPerturbator.md +39 -0
- eval_studio_client/api/docs/V1alphaPerturbatorConfiguration.md +32 -0
- eval_studio_client/api/docs/V1alphaPerturbatorIntensity.md +11 -0
- eval_studio_client/api/docs/V1alphaProblemAndAction.md +39 -0
- eval_studio_client/api/docs/V1alphaTest.md +40 -0
- eval_studio_client/api/docs/V1alphaTestCase.md +40 -0
- eval_studio_client/api/docs/V1alphaTestCaseRelationship.md +31 -0
- eval_studio_client/api/docs/V1alphaTestClass.md +41 -0
- eval_studio_client/api/docs/V1alphaTestClassType.md +12 -0
- eval_studio_client/api/docs/V1alphaTestLab.md +41 -0
- eval_studio_client/api/docs/V1alphaUpdateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaWhoAmIResponse.md +31 -0
- eval_studio_client/api/docs/WhoAmIServiceApi.md +72 -0
- eval_studio_client/api/exceptions.py +199 -0
- eval_studio_client/api/models/__init__.py +148 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +115 -0
- eval_studio_client/api/models/protobuf_any.py +100 -0
- eval_studio_client/api/models/required_the_dashboard_to_update.py +127 -0
- eval_studio_client/api/models/required_the_document_to_update.py +116 -0
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +178 -0
- eval_studio_client/api/models/required_the_model_to_update.py +127 -0
- eval_studio_client/api/models/required_the_operation_to_finalize.py +129 -0
- eval_studio_client/api/models/required_the_operation_to_update.py +129 -0
- eval_studio_client/api/models/required_the_test_case_to_update.py +120 -0
- eval_studio_client/api/models/required_the_test_to_update.py +122 -0
- eval_studio_client/api/models/rpc_status.py +99 -0
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +87 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +99 -0
- eval_studio_client/api/models/v1alpha_batch_create_leaderboards_request.py +99 -0
- eval_studio_client/api/models/v1alpha_batch_create_leaderboards_response.py +91 -0
- eval_studio_client/api/models/v1alpha_batch_delete_dashboards_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_documents_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_documents_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_evaluators_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_evaluators_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_leaderboards_request.py +90 -0
- eval_studio_client/api/models/v1alpha_batch_delete_leaderboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_models_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_test_cases_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_tests_request.py +89 -0
- eval_studio_client/api/models/v1alpha_batch_delete_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_documents_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_leaderboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_operations_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_import_leaderboard_request.py +104 -0
- eval_studio_client/api/models/v1alpha_batch_import_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_batch_import_tests_request.py +93 -0
- eval_studio_client/api/models/v1alpha_batch_import_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_check_base_models_response.py +89 -0
- eval_studio_client/api/models/v1alpha_collection_info.py +93 -0
- eval_studio_client/api/models/v1alpha_create_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_evaluation_request.py +115 -0
- eval_studio_client/api/models/v1alpha_create_evaluator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_leaderboard_request.py +91 -0
- eval_studio_client/api/models/v1alpha_create_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_leaderboard_without_cache_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_perturbation_response.py +87 -0
- eval_studio_client/api/models/v1alpha_create_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_test_lab_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_dashboard.py +131 -0
- eval_studio_client/api/models/v1alpha_dashboard_status.py +39 -0
- eval_studio_client/api/models/v1alpha_delete_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_evaluator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_document.py +120 -0
- eval_studio_client/api/models/v1alpha_evaluation_test.py +107 -0
- eval_studio_client/api/models/v1alpha_evaluator.py +155 -0
- eval_studio_client/api/models/v1alpha_evaluator_param_type.py +42 -0
- eval_studio_client/api/models/v1alpha_evaluator_parameter.py +126 -0
- eval_studio_client/api/models/v1alpha_evaluator_view.py +38 -0
- eval_studio_client/api/models/v1alpha_finalize_operation_response.py +91 -0
- eval_studio_client/api/models/v1alpha_find_all_test_cases_by_id_response.py +95 -0
- eval_studio_client/api/models/v1alpha_find_test_lab_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_evaluator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_info_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_operation_progress_by_parent_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_operation_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_perturbator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_test_class_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_import_evaluation_request.py +99 -0
- eval_studio_client/api/models/v1alpha_import_leaderboard_request.py +104 -0
- eval_studio_client/api/models/v1alpha_import_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_info.py +99 -0
- eval_studio_client/api/models/v1alpha_insight.py +107 -0
- eval_studio_client/api/models/v1alpha_leaderboard.py +182 -0
- eval_studio_client/api/models/v1alpha_leaderboard_status.py +39 -0
- eval_studio_client/api/models/v1alpha_leaderboard_type.py +39 -0
- eval_studio_client/api/models/v1alpha_leaderboard_view.py +39 -0
- eval_studio_client/api/models/v1alpha_list_base_models_response.py +87 -0
- eval_studio_client/api/models/v1alpha_list_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_documents_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_evaluators_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_leaderboards_response.py +97 -0
- eval_studio_client/api/models/v1alpha_list_llm_models_response.py +87 -0
- eval_studio_client/api/models/v1alpha_list_model_collections_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_leaderboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_operations_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_perturbators_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_rag_collections_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_test_cases_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_test_classes_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_model.py +131 -0
- eval_studio_client/api/models/v1alpha_model_type.py +46 -0
- eval_studio_client/api/models/v1alpha_operation.py +133 -0
- eval_studio_client/api/models/v1alpha_operation_progress.py +99 -0
- eval_studio_client/api/models/v1alpha_perturb_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_perturbator.py +122 -0
- eval_studio_client/api/models/v1alpha_perturbator_configuration.py +92 -0
- eval_studio_client/api/models/v1alpha_perturbator_intensity.py +39 -0
- eval_studio_client/api/models/v1alpha_problem_and_action.py +129 -0
- eval_studio_client/api/models/v1alpha_test.py +126 -0
- eval_studio_client/api/models/v1alpha_test_case.py +124 -0
- eval_studio_client/api/models/v1alpha_test_case_relationship.py +91 -0
- eval_studio_client/api/models/v1alpha_test_class.py +127 -0
- eval_studio_client/api/models/v1alpha_test_class_type.py +42 -0
- eval_studio_client/api/models/v1alpha_test_lab.py +137 -0
- eval_studio_client/api/models/v1alpha_update_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_operation_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_who_am_i_response.py +91 -0
- eval_studio_client/api/rest.py +257 -0
- eval_studio_client/api/test/__init__.py +0 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +79 -0
- eval_studio_client/api/test/test_document_service_api.py +73 -0
- eval_studio_client/api/test/test_evaluation_service_api.py +55 -0
- eval_studio_client/api/test/test_evaluator_service_api.py +61 -0
- eval_studio_client/api/test/test_info_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +103 -0
- eval_studio_client/api/test/test_model_service_api.py +97 -0
- eval_studio_client/api/test/test_operation_progress_service_api.py +37 -0
- eval_studio_client/api/test/test_operation_service_api.py +61 -0
- eval_studio_client/api/test/test_perturbation_service_api.py +37 -0
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +79 -0
- eval_studio_client/api/test/test_perturbator_service_api.py +43 -0
- eval_studio_client/api/test/test_protobuf_any.py +51 -0
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +64 -0
- eval_studio_client/api/test/test_required_the_document_to_update.py +59 -0
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +115 -0
- eval_studio_client/api/test/test_required_the_model_to_update.py +63 -0
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +71 -0
- eval_studio_client/api/test/test_required_the_operation_to_update.py +71 -0
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +63 -0
- eval_studio_client/api/test/test_required_the_test_to_update.py +65 -0
- eval_studio_client/api/test/test_rpc_status.py +57 -0
- eval_studio_client/api/test/test_test_case_service_api.py +73 -0
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +53 -0
- eval_studio_client/api/test/test_test_class_service_api.py +43 -0
- eval_studio_client/api/test/test_test_lab_service_api.py +43 -0
- eval_studio_client/api/test/test_test_service_api.py +91 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +58 -0
- eval_studio_client/api/test/test_v1alpha_batch_create_leaderboards_request.py +119 -0
- eval_studio_client/api/test/test_v1alpha_batch_create_leaderboards_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_dashboards_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_documents_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_documents_response.py +63 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_evaluators_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_evaluators_response.py +91 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_leaderboards_request.py +54 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_leaderboards_response.py +116 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_models_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_test_cases_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_tests_request.py +54 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_documents_response.py +63 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_leaderboards_response.py +116 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_operations_response.py +73 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_leaderboard_request.py +61 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_leaderboard_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_tests_request.py +54 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_check_base_models_response.py +52 -0
- eval_studio_client/api/test/test_v1alpha_collection_info.py +54 -0
- eval_studio_client/api/test/test_v1alpha_create_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_create_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_create_evaluation_request.py +107 -0
- eval_studio_client/api/test/test_v1alpha_create_evaluator_response.py +89 -0
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_request.py +114 -0
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_without_cache_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_create_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_create_perturbation_response.py +51 -0
- eval_studio_client/api/test/test_v1alpha_create_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_create_test_lab_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_create_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_dashboard.py +65 -0
- eval_studio_client/api/test/test_v1alpha_dashboard_status.py +33 -0
- eval_studio_client/api/test/test_v1alpha_delete_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_delete_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_delete_evaluator_response.py +89 -0
- eval_studio_client/api/test/test_v1alpha_delete_leaderboard_response.py +114 -0
- eval_studio_client/api/test/test_v1alpha_delete_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_delete_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_delete_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_document.py +60 -0
- eval_studio_client/api/test/test_v1alpha_evaluation_test.py +76 -0
- eval_studio_client/api/test/test_v1alpha_evaluator.py +91 -0
- eval_studio_client/api/test/test_v1alpha_evaluator_param_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_evaluator_parameter.py +68 -0
- eval_studio_client/api/test/test_v1alpha_evaluator_view.py +33 -0
- eval_studio_client/api/test/test_v1alpha_finalize_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_find_all_test_cases_by_id_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_find_test_lab_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_get_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_get_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_get_evaluator_response.py +89 -0
- eval_studio_client/api/test/test_v1alpha_get_info_response.py +60 -0
- eval_studio_client/api/test/test_v1alpha_get_leaderboard_response.py +114 -0
- eval_studio_client/api/test/test_v1alpha_get_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_get_operation_progress_by_parent_response.py +55 -0
- eval_studio_client/api/test/test_v1alpha_get_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_get_perturbator_response.py +64 -0
- eval_studio_client/api/test/test_v1alpha_get_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_get_test_class_response.py +70 -0
- eval_studio_client/api/test/test_v1alpha_get_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_import_evaluation_request.py +73 -0
- eval_studio_client/api/test/test_v1alpha_import_leaderboard_request.py +59 -0
- eval_studio_client/api/test/test_v1alpha_import_leaderboard_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_info.py +59 -0
- eval_studio_client/api/test/test_v1alpha_insight.py +67 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard.py +116 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard_status.py +33 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard_view.py +33 -0
- eval_studio_client/api/test/test_v1alpha_list_base_models_response.py +53 -0
- eval_studio_client/api/test/test_v1alpha_list_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_list_documents_response.py +63 -0
- eval_studio_client/api/test/test_v1alpha_list_evaluators_response.py +91 -0
- eval_studio_client/api/test/test_v1alpha_list_leaderboards_response.py +117 -0
- eval_studio_client/api/test/test_v1alpha_list_llm_models_response.py +53 -0
- eval_studio_client/api/test/test_v1alpha_list_model_collections_response.py +57 -0
- eval_studio_client/api/test/test_v1alpha_list_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_leaderboards_response.py +116 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_list_operations_response.py +73 -0
- eval_studio_client/api/test/test_v1alpha_list_perturbators_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_list_rag_collections_response.py +57 -0
- eval_studio_client/api/test/test_v1alpha_list_test_cases_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_list_test_classes_response.py +72 -0
- eval_studio_client/api/test/test_v1alpha_list_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_model.py +64 -0
- eval_studio_client/api/test/test_v1alpha_model_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_operation.py +72 -0
- eval_studio_client/api/test/test_v1alpha_operation_progress.py +54 -0
- eval_studio_client/api/test/test_v1alpha_perturb_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_perturbator.py +63 -0
- eval_studio_client/api/test/test_v1alpha_perturbator_configuration.py +53 -0
- eval_studio_client/api/test/test_v1alpha_perturbator_intensity.py +33 -0
- eval_studio_client/api/test/test_v1alpha_problem_and_action.py +65 -0
- eval_studio_client/api/test/test_v1alpha_test.py +66 -0
- eval_studio_client/api/test/test_v1alpha_test_case.py +64 -0
- eval_studio_client/api/test/test_v1alpha_test_case_relationship.py +53 -0
- eval_studio_client/api/test/test_v1alpha_test_class.py +69 -0
- eval_studio_client/api/test/test_v1alpha_test_class_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_test_lab.py +67 -0
- eval_studio_client/api/test/test_v1alpha_update_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_update_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_update_leaderboard_response.py +114 -0
- eval_studio_client/api/test/test_v1alpha_update_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_update_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_update_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_update_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_who_am_i_response.py +53 -0
- eval_studio_client/api/test/test_who_am_i_service_api.py +38 -0
- eval_studio_client/client.py +98 -0
- eval_studio_client/dashboards.py +187 -0
- eval_studio_client/documents.py +95 -0
- eval_studio_client/evaluators.py +65 -0
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +6043 -0
- eval_studio_client/insights.py +35 -0
- eval_studio_client/leaderboards.py +207 -0
- eval_studio_client/models.py +522 -0
- eval_studio_client/perturbators.py +101 -0
- eval_studio_client/problems.py +50 -0
- eval_studio_client/test_labs.py +319 -0
- eval_studio_client/tests.py +369 -0
- eval_studio_client-0.7.0.dist-info/METADATA +18 -0
- eval_studio_client-0.7.0.dist-info/RECORD +470 -0
- eval_studio_client-0.7.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
22
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
23
|
+
from eval_studio_client.api.models.v1alpha_insight import V1alphaInsight
|
|
24
|
+
from eval_studio_client.api.models.v1alpha_leaderboard_status import V1alphaLeaderboardStatus
|
|
25
|
+
from eval_studio_client.api.models.v1alpha_leaderboard_type import V1alphaLeaderboardType
|
|
26
|
+
from eval_studio_client.api.models.v1alpha_problem_and_action import V1alphaProblemAndAction
|
|
27
|
+
from typing import Optional, Set
|
|
28
|
+
from typing_extensions import Self
|
|
29
|
+
|
|
30
|
+
class V1alphaLeaderboard(BaseModel):
|
|
31
|
+
"""
|
|
32
|
+
V1alphaLeaderboard
|
|
33
|
+
""" # noqa: E501
|
|
34
|
+
name: Optional[StrictStr] = None
|
|
35
|
+
create_time: Optional[datetime] = Field(default=None, description="Output only. Timestamp when the Leaderboard was created.", alias="createTime")
|
|
36
|
+
creator: Optional[StrictStr] = Field(default=None, description="Output only. Name of the user or service that requested creation of the Leaderboard.")
|
|
37
|
+
update_time: Optional[datetime] = Field(default=None, description="Output only. Optional. Timestamp when the Leaderboard was last updated.", alias="updateTime")
|
|
38
|
+
updater: Optional[StrictStr] = Field(default=None, description="Output only. Optional. Name of the user or service that requested update of the Leaderboard.")
|
|
39
|
+
delete_time: Optional[datetime] = Field(default=None, description="Output only. Optional. Set when the Leaderboard is deleted. When set Leaderboard should be considered as deleted.", alias="deleteTime")
|
|
40
|
+
deleter: Optional[StrictStr] = Field(default=None, description="Output only. Optional. Name of the user or service that requested deletion of the Leaderboard.")
|
|
41
|
+
display_name: Optional[StrictStr] = Field(default=None, description="Human readable name of the Leaderboard.", alias="displayName")
|
|
42
|
+
description: Optional[StrictStr] = Field(default=None, description="Optional. Arbitrary description of the Leaderboard.")
|
|
43
|
+
status: Optional[V1alphaLeaderboardStatus] = None
|
|
44
|
+
evaluator: Optional[StrictStr] = Field(default=None, description="Immutable. Resource name of the Evaluator used in this Leaderboard.")
|
|
45
|
+
tests: Optional[List[StrictStr]] = Field(default=None, description="Immutable. Resource names of the Tests used in this Leaderboard.")
|
|
46
|
+
model: Optional[StrictStr] = Field(default=None, description="Immutable. Resource name of the Model used in this Leaderboard.")
|
|
47
|
+
create_operation: Optional[StrictStr] = Field(default=None, description="Output only. Operation resource name that created this Leaderboard.", alias="createOperation")
|
|
48
|
+
leaderboard_report: Optional[StrictStr] = Field(default=None, alias="leaderboardReport")
|
|
49
|
+
leaderboard_table: Optional[StrictStr] = Field(default=None, description="Output only. Leaderboard table in JSON format.", alias="leaderboardTable")
|
|
50
|
+
leaderboard_summary: Optional[StrictStr] = Field(default=None, description="Output only. Leaderboard summary in Markdown format.", alias="leaderboardSummary")
|
|
51
|
+
llm_models: Optional[List[StrictStr]] = Field(default=None, description="Immutable. System names of the LLM models used in this Leaderboard.", alias="llmModels")
|
|
52
|
+
leaderboard_problems: Optional[List[V1alphaProblemAndAction]] = Field(default=None, description="Output only. Leaderboard problems and actions.", alias="leaderboardProblems")
|
|
53
|
+
evaluator_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Evaluator parameters setup.", alias="evaluatorParameters")
|
|
54
|
+
insights: Optional[List[V1alphaInsight]] = Field(default=None, description="Output only. Insights from the Leaderboard.")
|
|
55
|
+
model_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Prameters overrides in JSON format.", alias="modelParameters")
|
|
56
|
+
h2ogpte_collection: Optional[StrictStr] = Field(default=None, description="The existing collection name in H2OGPTe.", alias="h2ogpteCollection")
|
|
57
|
+
type: Optional[V1alphaLeaderboardType] = None
|
|
58
|
+
demo: Optional[StrictBool] = Field(default=None, description="Output only. Whether the Leaderboard is a demo resource or not. Demo resources are read only.")
|
|
59
|
+
test_lab: Optional[StrictStr] = Field(default=None, description="Optional. Resource name of the TestLab if Leaderboard was created from a imported TestLab.", alias="testLab")
|
|
60
|
+
__properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "status", "evaluator", "tests", "model", "createOperation", "leaderboardReport", "leaderboardTable", "leaderboardSummary", "llmModels", "leaderboardProblems", "evaluatorParameters", "insights", "modelParameters", "h2ogpteCollection", "type", "demo", "testLab"]
|
|
61
|
+
|
|
62
|
+
model_config = ConfigDict(
|
|
63
|
+
populate_by_name=True,
|
|
64
|
+
validate_assignment=True,
|
|
65
|
+
protected_namespaces=(),
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def to_str(self) -> str:
|
|
70
|
+
"""Returns the string representation of the model using alias"""
|
|
71
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
72
|
+
|
|
73
|
+
def to_json(self) -> str:
|
|
74
|
+
"""Returns the JSON representation of the model using alias"""
|
|
75
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
76
|
+
return json.dumps(self.to_dict())
|
|
77
|
+
|
|
78
|
+
@classmethod
|
|
79
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
80
|
+
"""Create an instance of V1alphaLeaderboard from a JSON string"""
|
|
81
|
+
return cls.from_dict(json.loads(json_str))
|
|
82
|
+
|
|
83
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
84
|
+
"""Return the dictionary representation of the model using alias.
|
|
85
|
+
|
|
86
|
+
This has the following differences from calling pydantic's
|
|
87
|
+
`self.model_dump(by_alias=True)`:
|
|
88
|
+
|
|
89
|
+
* `None` is only added to the output dict for nullable fields that
|
|
90
|
+
were set at model initialization. Other fields with value `None`
|
|
91
|
+
are ignored.
|
|
92
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
93
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
94
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
95
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
96
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
97
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
98
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
99
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
100
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
101
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
102
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
103
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
104
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
105
|
+
"""
|
|
106
|
+
excluded_fields: Set[str] = set([
|
|
107
|
+
"name",
|
|
108
|
+
"create_time",
|
|
109
|
+
"creator",
|
|
110
|
+
"update_time",
|
|
111
|
+
"updater",
|
|
112
|
+
"delete_time",
|
|
113
|
+
"deleter",
|
|
114
|
+
"create_operation",
|
|
115
|
+
"leaderboard_table",
|
|
116
|
+
"leaderboard_summary",
|
|
117
|
+
"leaderboard_problems",
|
|
118
|
+
"insights",
|
|
119
|
+
"demo",
|
|
120
|
+
])
|
|
121
|
+
|
|
122
|
+
_dict = self.model_dump(
|
|
123
|
+
by_alias=True,
|
|
124
|
+
exclude=excluded_fields,
|
|
125
|
+
exclude_none=True,
|
|
126
|
+
)
|
|
127
|
+
# override the default output from pydantic by calling `to_dict()` of each item in leaderboard_problems (list)
|
|
128
|
+
_items = []
|
|
129
|
+
if self.leaderboard_problems:
|
|
130
|
+
for _item in self.leaderboard_problems:
|
|
131
|
+
if _item:
|
|
132
|
+
_items.append(_item.to_dict())
|
|
133
|
+
_dict['leaderboardProblems'] = _items
|
|
134
|
+
# override the default output from pydantic by calling `to_dict()` of each item in insights (list)
|
|
135
|
+
_items = []
|
|
136
|
+
if self.insights:
|
|
137
|
+
for _item in self.insights:
|
|
138
|
+
if _item:
|
|
139
|
+
_items.append(_item.to_dict())
|
|
140
|
+
_dict['insights'] = _items
|
|
141
|
+
return _dict
|
|
142
|
+
|
|
143
|
+
@classmethod
|
|
144
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
145
|
+
"""Create an instance of V1alphaLeaderboard from a dict"""
|
|
146
|
+
if obj is None:
|
|
147
|
+
return None
|
|
148
|
+
|
|
149
|
+
if not isinstance(obj, dict):
|
|
150
|
+
return cls.model_validate(obj)
|
|
151
|
+
|
|
152
|
+
_obj = cls.model_validate({
|
|
153
|
+
"name": obj.get("name"),
|
|
154
|
+
"createTime": obj.get("createTime"),
|
|
155
|
+
"creator": obj.get("creator"),
|
|
156
|
+
"updateTime": obj.get("updateTime"),
|
|
157
|
+
"updater": obj.get("updater"),
|
|
158
|
+
"deleteTime": obj.get("deleteTime"),
|
|
159
|
+
"deleter": obj.get("deleter"),
|
|
160
|
+
"displayName": obj.get("displayName"),
|
|
161
|
+
"description": obj.get("description"),
|
|
162
|
+
"status": obj.get("status"),
|
|
163
|
+
"evaluator": obj.get("evaluator"),
|
|
164
|
+
"tests": obj.get("tests"),
|
|
165
|
+
"model": obj.get("model"),
|
|
166
|
+
"createOperation": obj.get("createOperation"),
|
|
167
|
+
"leaderboardReport": obj.get("leaderboardReport"),
|
|
168
|
+
"leaderboardTable": obj.get("leaderboardTable"),
|
|
169
|
+
"leaderboardSummary": obj.get("leaderboardSummary"),
|
|
170
|
+
"llmModels": obj.get("llmModels"),
|
|
171
|
+
"leaderboardProblems": [V1alphaProblemAndAction.from_dict(_item) for _item in obj["leaderboardProblems"]] if obj.get("leaderboardProblems") is not None else None,
|
|
172
|
+
"evaluatorParameters": obj.get("evaluatorParameters"),
|
|
173
|
+
"insights": [V1alphaInsight.from_dict(_item) for _item in obj["insights"]] if obj.get("insights") is not None else None,
|
|
174
|
+
"modelParameters": obj.get("modelParameters"),
|
|
175
|
+
"h2ogpteCollection": obj.get("h2ogpteCollection"),
|
|
176
|
+
"type": obj.get("type"),
|
|
177
|
+
"demo": obj.get("demo"),
|
|
178
|
+
"testLab": obj.get("testLab")
|
|
179
|
+
})
|
|
180
|
+
return _obj
|
|
181
|
+
|
|
182
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import json
|
|
17
|
+
from enum import Enum
|
|
18
|
+
from typing_extensions import Self
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class V1alphaLeaderboardStatus(str, Enum):
|
|
22
|
+
"""
|
|
23
|
+
- LEADERBOARD_STATUS_UNSPECIFIED: Unspecified status. - LEADERBOARD_STATUS_PROCESSING: Leaderboard is being processed. See the Operation for details. - LEADERBOARD_STATUS_COMPLETED: Leaderboard is completed successfully. - LEADERBOARD_STATUS_FAILED: Leaderboard failed. See the Operation for details.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
"""
|
|
27
|
+
allowed enum values
|
|
28
|
+
"""
|
|
29
|
+
LEADERBOARD_STATUS_UNSPECIFIED = 'LEADERBOARD_STATUS_UNSPECIFIED'
|
|
30
|
+
LEADERBOARD_STATUS_PROCESSING = 'LEADERBOARD_STATUS_PROCESSING'
|
|
31
|
+
LEADERBOARD_STATUS_COMPLETED = 'LEADERBOARD_STATUS_COMPLETED'
|
|
32
|
+
LEADERBOARD_STATUS_FAILED = 'LEADERBOARD_STATUS_FAILED'
|
|
33
|
+
|
|
34
|
+
@classmethod
|
|
35
|
+
def from_json(cls, json_str: str) -> Self:
|
|
36
|
+
"""Create an instance of V1alphaLeaderboardStatus from a JSON string"""
|
|
37
|
+
return cls(json.loads(json_str))
|
|
38
|
+
|
|
39
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import json
|
|
17
|
+
from enum import Enum
|
|
18
|
+
from typing_extensions import Self
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class V1alphaLeaderboardType(str, Enum):
|
|
22
|
+
"""
|
|
23
|
+
- LEADERBOARD_TYPE_UNSPECIFIED: Unspecified type. - LEADERBOARD_TYPE_STANDALONE: Standalone leaderboard. - LEADERBOARD_TYPE_DASHBOARD: Leaderboard is part of a dashboard. - LEADERBOARD_TYPE_SERVICE: Leaderboard created by other service such as h2oGPTe.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
"""
|
|
27
|
+
allowed enum values
|
|
28
|
+
"""
|
|
29
|
+
LEADERBOARD_TYPE_UNSPECIFIED = 'LEADERBOARD_TYPE_UNSPECIFIED'
|
|
30
|
+
LEADERBOARD_TYPE_STANDALONE = 'LEADERBOARD_TYPE_STANDALONE'
|
|
31
|
+
LEADERBOARD_TYPE_DASHBOARD = 'LEADERBOARD_TYPE_DASHBOARD'
|
|
32
|
+
LEADERBOARD_TYPE_SERVICE = 'LEADERBOARD_TYPE_SERVICE'
|
|
33
|
+
|
|
34
|
+
@classmethod
|
|
35
|
+
def from_json(cls, json_str: str) -> Self:
|
|
36
|
+
"""Create an instance of V1alphaLeaderboardType from a JSON string"""
|
|
37
|
+
return cls(json.loads(json_str))
|
|
38
|
+
|
|
39
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import json
|
|
17
|
+
from enum import Enum
|
|
18
|
+
from typing_extensions import Self
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class V1alphaLeaderboardView(str, Enum):
|
|
22
|
+
"""
|
|
23
|
+
- result - leaderboard_table - leaderboard_summary - LEADERBOARD_VIEW_FULL: Full view of the Leaderboard. No fields are omitted. - LEADERBOARD_VIEW_BASIC_WITH_TABLE: View of the Leaderboard that is the same as LEADERBOARD_VIEW_BASIC but it includes the leaderboard_table field.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
"""
|
|
27
|
+
allowed enum values
|
|
28
|
+
"""
|
|
29
|
+
LEADERBOARD_VIEW_UNSPECIFIED = 'LEADERBOARD_VIEW_UNSPECIFIED'
|
|
30
|
+
LEADERBOARD_VIEW_BASIC = 'LEADERBOARD_VIEW_BASIC'
|
|
31
|
+
LEADERBOARD_VIEW_FULL = 'LEADERBOARD_VIEW_FULL'
|
|
32
|
+
LEADERBOARD_VIEW_BASIC_WITH_TABLE = 'LEADERBOARD_VIEW_BASIC_WITH_TABLE'
|
|
33
|
+
|
|
34
|
+
@classmethod
|
|
35
|
+
def from_json(cls, json_str: str) -> Self:
|
|
36
|
+
"""Create an instance of V1alphaLeaderboardView from a JSON string"""
|
|
37
|
+
return cls(json.loads(json_str))
|
|
38
|
+
|
|
39
|
+
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class V1alphaListBaseModelsResponse(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
V1alphaListBaseModelsResponse
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
base_models: Optional[List[StrictStr]] = Field(default=None, description="The list of Models.", alias="baseModels")
|
|
30
|
+
__properties: ClassVar[List[str]] = ["baseModels"]
|
|
31
|
+
|
|
32
|
+
model_config = ConfigDict(
|
|
33
|
+
populate_by_name=True,
|
|
34
|
+
validate_assignment=True,
|
|
35
|
+
protected_namespaces=(),
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def to_str(self) -> str:
|
|
40
|
+
"""Returns the string representation of the model using alias"""
|
|
41
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
42
|
+
|
|
43
|
+
def to_json(self) -> str:
|
|
44
|
+
"""Returns the JSON representation of the model using alias"""
|
|
45
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
46
|
+
return json.dumps(self.to_dict())
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
50
|
+
"""Create an instance of V1alphaListBaseModelsResponse from a JSON string"""
|
|
51
|
+
return cls.from_dict(json.loads(json_str))
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
54
|
+
"""Return the dictionary representation of the model using alias.
|
|
55
|
+
|
|
56
|
+
This has the following differences from calling pydantic's
|
|
57
|
+
`self.model_dump(by_alias=True)`:
|
|
58
|
+
|
|
59
|
+
* `None` is only added to the output dict for nullable fields that
|
|
60
|
+
were set at model initialization. Other fields with value `None`
|
|
61
|
+
are ignored.
|
|
62
|
+
"""
|
|
63
|
+
excluded_fields: Set[str] = set([
|
|
64
|
+
])
|
|
65
|
+
|
|
66
|
+
_dict = self.model_dump(
|
|
67
|
+
by_alias=True,
|
|
68
|
+
exclude=excluded_fields,
|
|
69
|
+
exclude_none=True,
|
|
70
|
+
)
|
|
71
|
+
return _dict
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
75
|
+
"""Create an instance of V1alphaListBaseModelsResponse from a dict"""
|
|
76
|
+
if obj is None:
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
if not isinstance(obj, dict):
|
|
80
|
+
return cls.model_validate(obj)
|
|
81
|
+
|
|
82
|
+
_obj = cls.model_validate({
|
|
83
|
+
"baseModels": obj.get("baseModels")
|
|
84
|
+
})
|
|
85
|
+
return _obj
|
|
86
|
+
|
|
87
|
+
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1alpha_dashboard import V1alphaDashboard
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class V1alphaListDashboardsResponse(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
V1alphaListDashboardsResponse
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
dashboards: Optional[List[V1alphaDashboard]] = Field(default=None, description="The list of Dashboards.")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["dashboards"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of V1alphaListDashboardsResponse from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([
|
|
65
|
+
])
|
|
66
|
+
|
|
67
|
+
_dict = self.model_dump(
|
|
68
|
+
by_alias=True,
|
|
69
|
+
exclude=excluded_fields,
|
|
70
|
+
exclude_none=True,
|
|
71
|
+
)
|
|
72
|
+
# override the default output from pydantic by calling `to_dict()` of each item in dashboards (list)
|
|
73
|
+
_items = []
|
|
74
|
+
if self.dashboards:
|
|
75
|
+
for _item in self.dashboards:
|
|
76
|
+
if _item:
|
|
77
|
+
_items.append(_item.to_dict())
|
|
78
|
+
_dict['dashboards'] = _items
|
|
79
|
+
return _dict
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
+
"""Create an instance of V1alphaListDashboardsResponse from a dict"""
|
|
84
|
+
if obj is None:
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
if not isinstance(obj, dict):
|
|
88
|
+
return cls.model_validate(obj)
|
|
89
|
+
|
|
90
|
+
_obj = cls.model_validate({
|
|
91
|
+
"dashboards": [V1alphaDashboard.from_dict(_item) for _item in obj["dashboards"]] if obj.get("dashboards") is not None else None
|
|
92
|
+
})
|
|
93
|
+
return _obj
|
|
94
|
+
|
|
95
|
+
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1alpha_document import V1alphaDocument
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class V1alphaListDocumentsResponse(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
V1alphaListDocumentsResponse
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
documents: Optional[List[V1alphaDocument]] = Field(default=None, description="The list of Documents.")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["documents"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of V1alphaListDocumentsResponse from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([
|
|
65
|
+
])
|
|
66
|
+
|
|
67
|
+
_dict = self.model_dump(
|
|
68
|
+
by_alias=True,
|
|
69
|
+
exclude=excluded_fields,
|
|
70
|
+
exclude_none=True,
|
|
71
|
+
)
|
|
72
|
+
# override the default output from pydantic by calling `to_dict()` of each item in documents (list)
|
|
73
|
+
_items = []
|
|
74
|
+
if self.documents:
|
|
75
|
+
for _item in self.documents:
|
|
76
|
+
if _item:
|
|
77
|
+
_items.append(_item.to_dict())
|
|
78
|
+
_dict['documents'] = _items
|
|
79
|
+
return _dict
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
+
"""Create an instance of V1alphaListDocumentsResponse from a dict"""
|
|
84
|
+
if obj is None:
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
if not isinstance(obj, dict):
|
|
88
|
+
return cls.model_validate(obj)
|
|
89
|
+
|
|
90
|
+
_obj = cls.model_validate({
|
|
91
|
+
"documents": [V1alphaDocument.from_dict(_item) for _item in obj["documents"]] if obj.get("documents") is not None else None
|
|
92
|
+
})
|
|
93
|
+
return _obj
|
|
94
|
+
|
|
95
|
+
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1alpha/collection.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1alpha_evaluator import V1alphaEvaluator
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class V1alphaListEvaluatorsResponse(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
V1alphaListEvaluatorsResponse
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
evaluators: Optional[List[V1alphaEvaluator]] = Field(default=None, description="The list of Evaluators.")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["evaluators"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of V1alphaListEvaluatorsResponse from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([
|
|
65
|
+
])
|
|
66
|
+
|
|
67
|
+
_dict = self.model_dump(
|
|
68
|
+
by_alias=True,
|
|
69
|
+
exclude=excluded_fields,
|
|
70
|
+
exclude_none=True,
|
|
71
|
+
)
|
|
72
|
+
# override the default output from pydantic by calling `to_dict()` of each item in evaluators (list)
|
|
73
|
+
_items = []
|
|
74
|
+
if self.evaluators:
|
|
75
|
+
for _item in self.evaluators:
|
|
76
|
+
if _item:
|
|
77
|
+
_items.append(_item.to_dict())
|
|
78
|
+
_dict['evaluators'] = _items
|
|
79
|
+
return _dict
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
+
"""Create an instance of V1alphaListEvaluatorsResponse from a dict"""
|
|
84
|
+
if obj is None:
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
if not isinstance(obj, dict):
|
|
88
|
+
return cls.model_validate(obj)
|
|
89
|
+
|
|
90
|
+
_obj = cls.model_validate({
|
|
91
|
+
"evaluators": [V1alphaEvaluator.from_dict(_item) for _item in obj["evaluators"]] if obj.get("evaluators") is not None else None
|
|
92
|
+
})
|
|
93
|
+
return _obj
|
|
94
|
+
|
|
95
|
+
|