eval-studio-client 0.8.0a2__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/__init__.py +2 -1
- eval_studio_client/api/__init__.py +125 -120
- eval_studio_client/api/api/__init__.py +1 -0
- eval_studio_client/api/api/dashboard_service_api.py +71 -71
- eval_studio_client/api/api/document_service_api.py +64 -64
- eval_studio_client/api/api/evaluation_service_api.py +42 -42
- eval_studio_client/api/api/evaluator_service_api.py +50 -50
- eval_studio_client/api/api/info_service_api.py +8 -8
- eval_studio_client/api/api/leaderboard_service_api.py +126 -126
- eval_studio_client/api/api/model_service_api.py +92 -92
- eval_studio_client/api/api/operation_progress_service_api.py +8 -8
- eval_studio_client/api/api/operation_service_api.py +36 -36
- eval_studio_client/api/api/perturbation_service_api.py +8 -8
- eval_studio_client/api/api/perturbator_service_api.py +15 -15
- eval_studio_client/api/api/prompt_generation_service_api.py +321 -0
- eval_studio_client/api/api/test_case_service_api.py +57 -57
- eval_studio_client/api/api/test_class_service_api.py +15 -15
- eval_studio_client/api/api/test_lab_service_api.py +22 -22
- eval_studio_client/api/api/test_service_api.py +376 -92
- eval_studio_client/api/api/who_am_i_service_api.py +8 -8
- eval_studio_client/api/api_client.py +1 -1
- eval_studio_client/api/configuration.py +1 -1
- eval_studio_client/api/docs/DashboardServiceApi.md +38 -38
- eval_studio_client/api/docs/DocumentServiceApi.md +34 -34
- eval_studio_client/api/docs/EvaluationServiceApi.md +22 -22
- eval_studio_client/api/docs/EvaluatorServiceApi.md +26 -26
- eval_studio_client/api/docs/InfoServiceApi.md +4 -4
- eval_studio_client/api/docs/LeaderboardServiceApi.md +66 -66
- eval_studio_client/api/docs/ModelServiceApi.md +50 -50
- eval_studio_client/api/docs/OperationProgressServiceApi.md +4 -4
- eval_studio_client/api/docs/OperationServiceApi.md +20 -20
- eval_studio_client/api/docs/PerturbationServiceApi.md +4 -4
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +3 -3
- eval_studio_client/api/docs/PerturbatorServiceApi.md +8 -8
- eval_studio_client/api/docs/PromptGenerationServiceApi.md +78 -0
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +35 -0
- eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +1 -1
- eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +4 -4
- eval_studio_client/api/docs/RequiredTheModelToUpdate.md +1 -1
- eval_studio_client/api/docs/TestCaseServiceApi.md +31 -31
- eval_studio_client/api/docs/TestClassServiceApi.md +8 -8
- eval_studio_client/api/docs/TestLabServiceApi.md +11 -11
- eval_studio_client/api/docs/TestServiceApi.md +119 -49
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +33 -0
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +1 -1
- eval_studio_client/api/docs/V1BatchCreateLeaderboardsRequest.md +31 -0
- eval_studio_client/api/docs/V1BatchCreateLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteDashboardsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteDocumentsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteEvaluatorsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteLeaderboardsRequest.md +30 -0
- eval_studio_client/api/docs/V1BatchDeleteLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteModelsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaBatchDeleteTestsRequest.md → V1BatchDeleteTestsRequest.md} +8 -8
- eval_studio_client/api/docs/V1BatchDeleteTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetTestsResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaBatchImportLeaderboardRequest.md → V1BatchImportLeaderboardRequest.md} +9 -9
- eval_studio_client/api/docs/V1BatchImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaBatchImportTestsRequest.md → V1BatchImportTestsRequest.md} +8 -8
- eval_studio_client/api/docs/V1BatchImportTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1CheckBaseModelsResponse.md +30 -0
- eval_studio_client/api/docs/{V1alphaCollectionInfo.md → V1CollectionInfo.md} +8 -8
- eval_studio_client/api/docs/V1CreateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaCreateEvaluationRequest.md → V1CreateEvaluationRequest.md} +10 -10
- eval_studio_client/api/docs/V1CreateEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateLeaderboardRequest.md +29 -0
- eval_studio_client/api/docs/V1CreateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateLeaderboardWithoutCacheResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1CreatePerturbationResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaDashboard.md → V1Dashboard.md} +9 -9
- eval_studio_client/api/docs/{V1alphaDashboardStatus.md → V1DashboardStatus.md} +1 -1
- eval_studio_client/api/docs/V1DeleteDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteModelResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaDocument.md → V1Document.md} +8 -8
- eval_studio_client/api/docs/V1EvaluationTest.md +32 -0
- eval_studio_client/api/docs/{V1alphaEvaluator.md → V1Evaluator.md} +10 -9
- eval_studio_client/api/docs/{V1alphaEvaluatorParamType.md → V1EvaluatorParamType.md} +1 -1
- eval_studio_client/api/docs/{V1alphaEvaluatorParameter.md → V1EvaluatorParameter.md} +9 -9
- eval_studio_client/api/docs/{V1alphaEvaluatorView.md → V1EvaluatorView.md} +1 -1
- eval_studio_client/api/docs/V1FinalizeOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1FindAllTestCasesByIDResponse.md +29 -0
- eval_studio_client/api/docs/V1FindTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1GenerateTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1GetDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1GetDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1GetEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1GetInfoResponse.md +29 -0
- eval_studio_client/api/docs/V1GetLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1GetModelResponse.md +29 -0
- eval_studio_client/api/docs/V1GetOperationProgressByParentResponse.md +29 -0
- eval_studio_client/api/docs/V1GetOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1GetPerturbatorResponse.md +29 -0
- eval_studio_client/api/docs/V1GetTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1GetTestClassResponse.md +29 -0
- eval_studio_client/api/docs/V1GetTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaImportEvaluationRequest.md → V1ImportEvaluationRequest.md} +9 -9
- eval_studio_client/api/docs/{V1alphaImportLeaderboardRequest.md → V1ImportLeaderboardRequest.md} +9 -9
- eval_studio_client/api/docs/V1ImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaInfo.md → V1Info.md} +8 -8
- eval_studio_client/api/docs/{V1alphaInsight.md → V1Insight.md} +8 -8
- eval_studio_client/api/docs/{V1alphaLeaderboard.md → V1Leaderboard.md} +12 -12
- eval_studio_client/api/docs/{V1alphaLeaderboardStatus.md → V1LeaderboardStatus.md} +1 -1
- eval_studio_client/api/docs/{V1alphaLeaderboardType.md → V1LeaderboardType.md} +1 -1
- eval_studio_client/api/docs/{V1alphaLeaderboardView.md → V1LeaderboardView.md} +1 -1
- eval_studio_client/api/docs/V1ListBaseModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListLLMModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListLeaderboardsResponse.md +30 -0
- eval_studio_client/api/docs/V1ListModelCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListMostRecentDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListMostRecentLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListMostRecentModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListMostRecentTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListPerturbatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListRAGCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestClassesResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestsResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaModel.md → V1Model.md} +9 -9
- eval_studio_client/api/docs/{V1alphaModelType.md → V1ModelType.md} +1 -1
- eval_studio_client/api/docs/{V1alphaOperation.md → V1Operation.md} +8 -8
- eval_studio_client/api/docs/{V1alphaOperationProgress.md → V1OperationProgress.md} +8 -8
- eval_studio_client/api/docs/V1PerturbTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaPerturbator.md → V1Perturbator.md} +8 -8
- eval_studio_client/api/docs/V1PerturbatorConfiguration.md +32 -0
- eval_studio_client/api/docs/{V1alphaPerturbatorIntensity.md → V1PerturbatorIntensity.md} +1 -1
- eval_studio_client/api/docs/{V1alphaProblemAndAction.md → V1ProblemAndAction.md} +8 -8
- eval_studio_client/api/docs/{V1alphaTest.md → V1Test.md} +8 -8
- eval_studio_client/api/docs/{V1alphaTestCase.md → V1TestCase.md} +8 -8
- eval_studio_client/api/docs/{V1alphaTestCaseRelationship.md → V1TestCaseRelationship.md} +8 -8
- eval_studio_client/api/docs/V1TestCasesGenerator.md +11 -0
- eval_studio_client/api/docs/{V1alphaTestClass.md → V1TestClass.md} +9 -9
- eval_studio_client/api/docs/{V1alphaTestClassType.md → V1TestClassType.md} +1 -1
- eval_studio_client/api/docs/{V1alphaTestLab.md → V1TestLab.md} +8 -8
- eval_studio_client/api/docs/V1UpdateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaWhoAmIResponse.md → V1WhoAmIResponse.md} +8 -8
- eval_studio_client/api/docs/WhoAmIServiceApi.md +4 -4
- eval_studio_client/api/exceptions.py +1 -1
- eval_studio_client/api/models/__init__.py +124 -120
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +10 -10
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +104 -0
- eval_studio_client/api/models/protobuf_any.py +1 -1
- eval_studio_client/api/models/required_the_dashboard_to_update.py +3 -3
- eval_studio_client/api/models/required_the_document_to_update.py +1 -1
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +11 -11
- eval_studio_client/api/models/required_the_model_to_update.py +3 -3
- eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_case_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_to_update.py +1 -1
- eval_studio_client/api/models/rpc_status.py +1 -1
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +96 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +4 -4
- eval_studio_client/api/models/{v1alpha_batch_create_leaderboards_request.py → v1_batch_create_leaderboards_request.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_operation_response.py → v1_batch_create_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_dashboards_request.py → v1_batch_delete_dashboards_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_dashboards_response.py → v1_batch_delete_dashboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_documents_request.py → v1_batch_delete_documents_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_documents_response.py → v1_batch_delete_documents_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_evaluators_request.py → v1_batch_delete_evaluators_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_evaluators_response.py → v1_batch_delete_evaluators_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_leaderboards_request.py → v1_batch_delete_leaderboards_request.py} +7 -7
- eval_studio_client/api/models/{v1alpha_batch_get_leaderboards_response.py → v1_batch_delete_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_models_request.py → v1_batch_delete_models_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_models_response.py → v1_batch_delete_models_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_test_cases_response.py → v1_batch_delete_test_cases_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_tests_request.py → v1_batch_delete_tests_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_tests_response.py → v1_batch_delete_tests_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_dashboards_response.py → v1_batch_get_dashboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_documents_response.py → v1_batch_get_documents_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_leaderboards_response.py → v1_batch_get_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_models_response.py → v1_batch_get_models_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_operations_response.py → v1_batch_get_operations_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_tests_response.py → v1_batch_get_tests_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_import_leaderboard_request.py → v1_batch_import_leaderboard_request.py} +7 -7
- eval_studio_client/api/models/{v1alpha_import_leaderboard_response.py → v1_batch_import_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_import_tests_request.py → v1_batch_import_tests_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_batch_delete_tests_response.py → v1_batch_import_tests_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_check_base_models_response.py → v1_check_base_models_response.py} +5 -5
- eval_studio_client/api/models/{v1alpha_collection_info.py → v1_collection_info.py} +4 -4
- eval_studio_client/api/models/{v1alpha_get_dashboard_response.py → v1_create_dashboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_document_response.py → v1_create_document_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_evaluation_request.py → v1_create_evaluation_request.py} +11 -11
- eval_studio_client/api/models/{v1alpha_get_evaluator_response.py → v1_create_evaluator_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_leaderboard_response.py → v1_create_leaderboard_request.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_operation_response.py → v1_create_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_get_model_response.py → v1_create_model_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_perturbation_response.py → v1_create_perturbation_response.py} +5 -5
- eval_studio_client/api/models/{v1alpha_get_test_case_response.py → v1_create_test_case_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_find_test_lab_response.py → v1_create_test_lab_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_test_response.py → v1_create_test_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_dashboard.py → v1_dashboard.py} +7 -7
- eval_studio_client/api/models/{v1alpha_dashboard_status.py → v1_dashboard_status.py} +3 -3
- eval_studio_client/api/models/{v1alpha_update_dashboard_response.py → v1_delete_dashboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_document_response.py → v1_delete_document_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_evaluator_response.py → v1_delete_evaluator_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_leaderboard_request.py → v1_delete_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_model_response.py → v1_delete_model_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_test_case_response.py → v1_delete_test_case_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_test_response.py → v1_delete_test_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_document.py → v1_document.py} +5 -5
- eval_studio_client/api/models/{v1alpha_evaluation_test.py → v1_evaluation_test.py} +10 -10
- eval_studio_client/api/models/{v1alpha_evaluator.py → v1_evaluator.py} +14 -10
- eval_studio_client/api/models/{v1alpha_evaluator_param_type.py → v1_evaluator_param_type.py} +3 -3
- eval_studio_client/api/models/{v1alpha_evaluator_parameter.py → v1_evaluator_parameter.py} +7 -7
- eval_studio_client/api/models/{v1alpha_evaluator_view.py → v1_evaluator_view.py} +3 -3
- eval_studio_client/api/models/{v1alpha_create_leaderboard_response.py → v1_finalize_operation_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_find_all_test_cases_by_id_response.py → v1_find_all_test_cases_by_id_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_test_lab_response.py → v1_find_test_lab_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_finalize_operation_response.py → v1_generate_test_cases_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_dashboard_response.py → v1_get_dashboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_document_response.py → v1_get_document_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_evaluator_response.py → v1_get_evaluator_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_info_response.py → v1_get_info_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_leaderboard_response.py → v1_get_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_model_response.py → v1_get_model_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_operation_progress_by_parent_response.py → v1_get_operation_progress_by_parent_response.py} +8 -8
- eval_studio_client/api/models/v1_get_operation_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_get_perturbator_response.py → v1_get_perturbator_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_test_case_response.py → v1_get_test_case_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_test_class_response.py → v1_get_test_class_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_test_response.py → v1_get_test_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_import_evaluation_request.py → v1_import_evaluation_request.py} +8 -8
- eval_studio_client/api/models/{v1alpha_import_leaderboard_request.py → v1_import_leaderboard_request.py} +7 -7
- eval_studio_client/api/models/v1_import_leaderboard_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_info.py → v1_info.py} +5 -5
- eval_studio_client/api/models/{v1alpha_insight.py → v1_insight.py} +4 -4
- eval_studio_client/api/models/{v1alpha_leaderboard.py → v1_leaderboard.py} +15 -15
- eval_studio_client/api/models/{v1alpha_leaderboard_status.py → v1_leaderboard_status.py} +3 -3
- eval_studio_client/api/models/{v1alpha_leaderboard_type.py → v1_leaderboard_type.py} +3 -3
- eval_studio_client/api/models/{v1alpha_leaderboard_view.py → v1_leaderboard_view.py} +3 -3
- eval_studio_client/api/models/{v1alpha_list_base_models_response.py → v1_list_base_models_response.py} +5 -5
- eval_studio_client/api/models/{v1alpha_batch_delete_dashboards_response.py → v1_list_dashboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_documents_response.py → v1_list_documents_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_evaluators_response.py → v1_list_evaluators_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_leaderboards_response.py → v1_list_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_llm_models_response.py → v1_list_llm_models_response.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_model_collections_response.py → v1_list_model_collections_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_models_response.py → v1_list_models_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_most_recent_dashboards_response.py → v1_list_most_recent_dashboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_most_recent_leaderboards_response.py → v1_list_most_recent_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_most_recent_models_response.py → v1_list_most_recent_models_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_import_tests_response.py → v1_list_most_recent_tests_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_operations_response.py → v1_list_operations_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_perturbators_response.py → v1_list_perturbators_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_rag_collections_response.py → v1_list_rag_collections_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_test_cases_response.py → v1_list_test_cases_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_test_classes_response.py → v1_list_test_classes_response.py} +8 -8
- eval_studio_client/api/models/v1_list_tests_response.py +95 -0
- eval_studio_client/api/models/{v1alpha_model.py → v1_model.py} +7 -7
- eval_studio_client/api/models/{v1alpha_model_type.py → v1_model_type.py} +3 -3
- eval_studio_client/api/models/{v1alpha_operation.py → v1_operation.py} +4 -4
- eval_studio_client/api/models/{v1alpha_operation_progress.py → v1_operation_progress.py} +5 -5
- eval_studio_client/api/models/{v1alpha_delete_test_response.py → v1_perturb_test_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_perturbator.py → v1_perturbator.py} +5 -5
- eval_studio_client/api/models/{v1alpha_perturbator_configuration.py → v1_perturbator_configuration.py} +6 -6
- eval_studio_client/api/models/{v1alpha_perturbator_intensity.py → v1_perturbator_intensity.py} +4 -4
- eval_studio_client/api/models/{v1alpha_problem_and_action.py → v1_problem_and_action.py} +5 -5
- eval_studio_client/api/models/{v1alpha_test.py → v1_test.py} +5 -5
- eval_studio_client/api/models/{v1alpha_test_case.py → v1_test_case.py} +5 -5
- eval_studio_client/api/models/{v1alpha_test_case_relationship.py → v1_test_case_relationship.py} +5 -5
- eval_studio_client/api/models/v1_test_cases_generator.py +50 -0
- eval_studio_client/api/models/{v1alpha_test_class.py → v1_test_class.py} +7 -7
- eval_studio_client/api/models/{v1alpha_test_class_type.py → v1_test_class_type.py} +3 -3
- eval_studio_client/api/models/{v1alpha_test_lab.py → v1_test_lab.py} +5 -5
- eval_studio_client/api/models/{v1alpha_delete_dashboard_response.py → v1_update_dashboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_document_response.py → v1_update_document_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_leaderboard_response.py → v1_update_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_model_response.py → v1_update_model_response.py} +8 -8
- eval_studio_client/api/models/v1_update_operation_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_update_test_case_response.py → v1_update_test_case_response.py} +8 -8
- eval_studio_client/api/models/v1_update_test_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_who_am_i_response.py → v1_who_am_i_response.py} +5 -5
- eval_studio_client/api/rest.py +1 -1
- eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
- eval_studio_client/api/test/test_document_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
- eval_studio_client/api/test/test_info_service_api.py +1 -1
- eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
- eval_studio_client/api/test/test_model_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +4 -4
- eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_api.py +37 -0
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +75 -0
- eval_studio_client/api/test/test_protobuf_any.py +1 -1
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +3 -3
- eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_to_update.py +1 -1
- eval_studio_client/api/test/test_rpc_status.py +1 -1
- eval_studio_client/api/test/test_test_case_service_api.py +1 -1
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_test_class_service_api.py +1 -1
- eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
- eval_studio_client/api/test/test_test_service_api.py +7 -1
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +57 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +2 -2
- eval_studio_client/api/test/{test_v1alpha_batch_create_leaderboards_request.py → test_v1_batch_create_leaderboards_request.py} +16 -16
- eval_studio_client/api/test/{test_v1alpha_create_leaderboard_response.py → test_v1_batch_create_leaderboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_models_request.py → test_v1_batch_delete_dashboards_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_get_dashboards_response.py → test_v1_batch_delete_dashboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_documents_request.py → test_v1_batch_delete_documents_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_get_documents_response.py → test_v1_batch_delete_documents_response.py} +13 -13
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +53 -0
- eval_studio_client/api/test/{test_v1alpha_batch_delete_evaluators_response.py → test_v1_batch_delete_evaluators_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_batch_delete_leaderboards_request.py → test_v1_batch_delete_leaderboards_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_get_leaderboards_response.py → test_v1_batch_delete_leaderboards_response.py} +15 -15
- eval_studio_client/api/test/test_v1_batch_delete_models_request.py +53 -0
- eval_studio_client/api/test/{test_v1alpha_batch_get_models_response.py → test_v1_batch_delete_models_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_test_cases_response.py → test_v1_batch_delete_test_cases_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_tests_request.py → test_v1_batch_delete_tests_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_get_tests_response.py → test_v1_batch_delete_tests_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_dashboards_response.py → test_v1_batch_get_dashboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_documents_response.py → test_v1_batch_get_documents_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_leaderboards_response.py → test_v1_batch_get_leaderboards_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_list_models_response.py → test_v1_batch_get_models_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_operations_response.py → test_v1_batch_get_operations_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_tests_response.py → test_v1_batch_get_tests_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_import_leaderboard_request.py → test_v1_batch_import_leaderboard_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_import_leaderboard_response.py → test_v1_batch_import_leaderboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_import_tests_request.py → test_v1_batch_import_tests_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_import_tests_response.py → test_v1_batch_import_tests_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_check_base_models_response.py → test_v1_check_base_models_response.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_collection_info.py → test_v1_collection_info.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_get_dashboard_response.py → test_v1_create_dashboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_document_response.py → test_v1_create_document_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_evaluation_request.py → test_v1_create_evaluation_request.py} +16 -16
- eval_studio_client/api/test/{test_v1alpha_get_evaluator_response.py → test_v1_create_evaluator_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_get_leaderboard_response.py → test_v1_create_leaderboard_request.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_update_operation_response.py → test_v1_create_leaderboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_import_leaderboard_response.py → test_v1_create_leaderboard_without_cache_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_model_response.py → test_v1_create_model_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_perturbation_response.py → test_v1_create_perturbation_response.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_get_test_case_response.py → test_v1_create_test_case_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_find_test_lab_response.py → test_v1_create_test_lab_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_test_response.py → test_v1_create_test_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_dashboard.py → test_v1_dashboard.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_evaluator_view.py → test_v1_dashboard_status.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_update_dashboard_response.py → test_v1_delete_dashboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_update_document_response.py → test_v1_delete_document_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_evaluator_response.py → test_v1_delete_evaluator_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_create_leaderboard_request.py → test_v1_delete_leaderboard_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_delete_model_response.py → test_v1_delete_model_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_test_case_response.py → test_v1_delete_test_case_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_test_response.py → test_v1_delete_test_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_document.py → test_v1_document.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_evaluation_test.py → test_v1_evaluation_test.py} +14 -14
- eval_studio_client/api/test/{test_v1alpha_evaluator.py → test_v1_evaluator.py} +14 -13
- eval_studio_client/api/test/{test_v1alpha_test_class_type.py → test_v1_evaluator_param_type.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_evaluator_parameter.py → test_v1_evaluator_parameter.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_model_type.py → test_v1_evaluator_view.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_get_operation_response.py → test_v1_finalize_operation_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_find_all_test_cases_by_id_response.py → test_v1_find_all_test_cases_by_id_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_test_lab_response.py → test_v1_find_test_lab_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_finalize_operation_response.py → test_v1_generate_test_cases_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_dashboard_response.py → test_v1_get_dashboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_document_response.py → test_v1_get_document_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_evaluator_response.py → test_v1_get_evaluator_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_get_info_response.py → test_v1_get_info_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_leaderboard_response.py → test_v1_get_leaderboard_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_create_model_response.py → test_v1_get_model_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_operation_progress_by_parent_response.py → test_v1_get_operation_progress_by_parent_response.py} +13 -13
- eval_studio_client/api/test/test_v1_get_operation_response.py +71 -0
- eval_studio_client/api/test/{test_v1alpha_get_perturbator_response.py → test_v1_get_perturbator_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_test_case_response.py → test_v1_get_test_case_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_test_class_response.py → test_v1_get_test_class_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_test_response.py → test_v1_get_test_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_import_evaluation_request.py → test_v1_import_evaluation_request.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_import_leaderboard_request.py → test_v1_import_leaderboard_request.py} +12 -12
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +71 -0
- eval_studio_client/api/test/{test_v1alpha_info.py → test_v1_info.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_insight.py → test_v1_insight.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_leaderboard.py → test_v1_leaderboard.py} +14 -14
- eval_studio_client/api/test/{test_v1alpha_dashboard_status.py → test_v1_leaderboard_status.py} +7 -7
- eval_studio_client/api/test/test_v1_leaderboard_type.py +33 -0
- eval_studio_client/api/test/test_v1_leaderboard_view.py +33 -0
- eval_studio_client/api/test/{test_v1alpha_list_base_models_response.py → test_v1_list_base_models_response.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_delete_dashboards_response.py → test_v1_list_dashboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_documents_response.py → test_v1_list_documents_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_evaluators_response.py → test_v1_list_evaluators_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_list_leaderboards_response.py → test_v1_list_leaderboards_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_list_llm_models_response.py → test_v1_list_llm_models_response.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_list_rag_collections_response.py → test_v1_list_model_collections_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_models_response.py → test_v1_list_models_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_most_recent_dashboards_response.py → test_v1_list_most_recent_dashboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_most_recent_leaderboards_response.py → test_v1_list_most_recent_leaderboards_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_list_most_recent_models_response.py → test_v1_list_most_recent_models_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_tests_response.py → test_v1_list_most_recent_tests_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_get_operations_response.py → test_v1_list_operations_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_perturbators_response.py → test_v1_list_perturbators_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_model_collections_response.py → test_v1_list_rag_collections_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_test_cases_response.py → test_v1_list_test_cases_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_test_classes_response.py → test_v1_list_test_classes_response.py} +13 -13
- eval_studio_client/api/test/test_v1_list_tests_response.py +69 -0
- eval_studio_client/api/test/{test_v1alpha_model.py → test_v1_model.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_leaderboard_view.py → test_v1_model_type.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_operation.py → test_v1_operation.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_operation_progress.py → test_v1_operation_progress.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_update_test_response.py → test_v1_perturb_test_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_perturbator.py → test_v1_perturbator.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_perturbator_configuration.py → test_v1_perturbator_configuration.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_leaderboard_type.py → test_v1_perturbator_intensity.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_problem_and_action.py → test_v1_problem_and_action.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_test.py → test_v1_test.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_test_case.py → test_v1_test_case.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_test_case_relationship.py → test_v1_test_case_relationship.py} +12 -12
- eval_studio_client/api/test/test_v1_test_cases_generator.py +33 -0
- eval_studio_client/api/test/{test_v1alpha_test_class.py → test_v1_test_class.py} +12 -12
- eval_studio_client/api/test/test_v1_test_class_type.py +33 -0
- eval_studio_client/api/test/{test_v1alpha_test_lab.py → test_v1_test_lab.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_delete_dashboard_response.py → test_v1_update_dashboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_document_response.py → test_v1_update_document_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_update_leaderboard_response.py → test_v1_update_leaderboard_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_update_model_response.py → test_v1_update_model_response.py} +13 -13
- eval_studio_client/api/test/test_v1_update_operation_response.py +71 -0
- eval_studio_client/api/test/{test_v1alpha_update_test_case_response.py → test_v1_update_test_case_response.py} +13 -13
- eval_studio_client/api/test/test_v1_update_test_response.py +67 -0
- eval_studio_client/api/test/{test_v1alpha_who_am_i_response.py → test_v1_who_am_i_response.py} +12 -12
- eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
- eval_studio_client/dashboards.py +50 -9
- eval_studio_client/documents.py +3 -3
- eval_studio_client/evaluators.py +1 -1
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +568 -387
- eval_studio_client/insights.py +1 -1
- eval_studio_client/leaderboards.py +16 -13
- eval_studio_client/models.py +117 -29
- eval_studio_client/perturbators.py +5 -7
- eval_studio_client/problems.py +1 -1
- eval_studio_client/test_labs.py +2 -2
- eval_studio_client/tests.py +222 -8
- eval_studio_client/utils.py +26 -0
- {eval_studio_client-0.8.0a2.dist-info → eval_studio_client-1.0.0.dist-info}/METADATA +2 -2
- eval_studio_client-1.0.0.dist-info/RECORD +486 -0
- {eval_studio_client-0.8.0a2.dist-info → eval_studio_client-1.0.0.dist-info}/WHEEL +1 -1
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsRequest.md +0 -31
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsRequest.md +0 -30
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteTestCasesResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetDashboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetDocumentsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetLeaderboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetOperationsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchImportLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchImportTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCheckBaseModelsResponse.md +0 -30
- eval_studio_client/api/docs/V1alphaCreateDashboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateDocumentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateEvaluatorResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateLeaderboardRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateLeaderboardWithoutCacheResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateModelResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreatePerturbationResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateTestCaseResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateTestLabResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateTestResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteDashboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteDocumentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteEvaluatorResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteModelResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteTestCaseResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteTestResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaEvaluationTest.md +0 -32
- eval_studio_client/api/docs/V1alphaFinalizeOperationResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaFindAllTestCasesByIDResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaFindTestLabResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetDashboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetDocumentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetEvaluatorResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetInfoResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetModelResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetOperationProgressByParentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetOperationResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetPerturbatorResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetTestCaseResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetTestClassResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetTestResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaImportLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListBaseModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListDashboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListDocumentsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListEvaluatorsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListLLMModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListLeaderboardsResponse.md +0 -30
- eval_studio_client/api/docs/V1alphaListModelCollectionsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListMostRecentDashboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListMostRecentLeaderboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListMostRecentModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListMostRecentTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListOperationsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListPerturbatorsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListRAGCollectionsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListTestCasesResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListTestClassesResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaPerturbTestResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaPerturbatorConfiguration.md +0 -32
- eval_studio_client/api/docs/V1alphaUpdateDashboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateDocumentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateModelResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateOperationResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateTestCaseResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateTestResponse.md +0 -29
- eval_studio_client/api/models/v1alpha_batch_create_leaderboards_response.py +0 -91
- eval_studio_client/api/models/v1alpha_batch_import_leaderboard_response.py +0 -91
- eval_studio_client/api/models/v1alpha_create_leaderboard_without_cache_response.py +0 -91
- eval_studio_client/api/models/v1alpha_list_most_recent_tests_response.py +0 -95
- eval_studio_client/api/models/v1alpha_perturb_test_response.py +0 -91
- eval_studio_client/api/test/test_v1alpha_batch_create_leaderboards_response.py +0 -71
- eval_studio_client/api/test/test_v1alpha_batch_delete_dashboards_request.py +0 -53
- eval_studio_client/api/test/test_v1alpha_batch_delete_evaluators_request.py +0 -53
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_without_cache_response.py +0 -71
- eval_studio_client/api/test/test_v1alpha_evaluator_param_type.py +0 -33
- eval_studio_client/api/test/test_v1alpha_leaderboard_status.py +0 -33
- eval_studio_client/api/test/test_v1alpha_list_most_recent_tests_response.py +0 -69
- eval_studio_client/api/test/test_v1alpha_perturb_test_response.py +0 -67
- eval_studio_client/api/test/test_v1alpha_perturbator_intensity.py +0 -33
- eval_studio_client-0.8.0a2.dist-info/RECORD +0 -470
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -20,16 +20,16 @@ import json
|
|
|
20
20
|
from datetime import datetime
|
|
21
21
|
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
22
22
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
23
|
-
from eval_studio_client.api.models.
|
|
24
|
-
from eval_studio_client.api.models.
|
|
25
|
-
from eval_studio_client.api.models.
|
|
26
|
-
from eval_studio_client.api.models.
|
|
23
|
+
from eval_studio_client.api.models.v1_insight import V1Insight
|
|
24
|
+
from eval_studio_client.api.models.v1_leaderboard_status import V1LeaderboardStatus
|
|
25
|
+
from eval_studio_client.api.models.v1_leaderboard_type import V1LeaderboardType
|
|
26
|
+
from eval_studio_client.api.models.v1_problem_and_action import V1ProblemAndAction
|
|
27
27
|
from typing import Optional, Set
|
|
28
28
|
from typing_extensions import Self
|
|
29
29
|
|
|
30
|
-
class
|
|
30
|
+
class V1Leaderboard(BaseModel):
|
|
31
31
|
"""
|
|
32
|
-
|
|
32
|
+
V1Leaderboard
|
|
33
33
|
""" # noqa: E501
|
|
34
34
|
name: Optional[StrictStr] = None
|
|
35
35
|
create_time: Optional[datetime] = Field(default=None, description="Output only. Timestamp when the Leaderboard was created.", alias="createTime")
|
|
@@ -40,7 +40,7 @@ class V1alphaLeaderboard(BaseModel):
|
|
|
40
40
|
deleter: Optional[StrictStr] = Field(default=None, description="Output only. Optional. Name of the user or service that requested deletion of the Leaderboard.")
|
|
41
41
|
display_name: Optional[StrictStr] = Field(default=None, description="Human readable name of the Leaderboard.", alias="displayName")
|
|
42
42
|
description: Optional[StrictStr] = Field(default=None, description="Optional. Arbitrary description of the Leaderboard.")
|
|
43
|
-
status: Optional[
|
|
43
|
+
status: Optional[V1LeaderboardStatus] = None
|
|
44
44
|
evaluator: Optional[StrictStr] = Field(default=None, description="Immutable. Resource name of the Evaluator used in this Leaderboard.")
|
|
45
45
|
tests: Optional[List[StrictStr]] = Field(default=None, description="Immutable. Resource names of the Tests used in this Leaderboard.")
|
|
46
46
|
model: Optional[StrictStr] = Field(default=None, description="Immutable. Resource name of the Model used in this Leaderboard.")
|
|
@@ -49,12 +49,12 @@ class V1alphaLeaderboard(BaseModel):
|
|
|
49
49
|
leaderboard_table: Optional[StrictStr] = Field(default=None, description="Output only. Leaderboard table in JSON format.", alias="leaderboardTable")
|
|
50
50
|
leaderboard_summary: Optional[StrictStr] = Field(default=None, description="Output only. Leaderboard summary in Markdown format.", alias="leaderboardSummary")
|
|
51
51
|
llm_models: Optional[List[StrictStr]] = Field(default=None, description="Immutable. System names of the LLM models used in this Leaderboard.", alias="llmModels")
|
|
52
|
-
leaderboard_problems: Optional[List[
|
|
52
|
+
leaderboard_problems: Optional[List[V1ProblemAndAction]] = Field(default=None, description="Output only. Leaderboard problems and actions.", alias="leaderboardProblems")
|
|
53
53
|
evaluator_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Evaluator parameters setup.", alias="evaluatorParameters")
|
|
54
|
-
insights: Optional[List[
|
|
54
|
+
insights: Optional[List[V1Insight]] = Field(default=None, description="Output only. Insights from the Leaderboard.")
|
|
55
55
|
model_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Prameters overrides in JSON format.", alias="modelParameters")
|
|
56
56
|
h2ogpte_collection: Optional[StrictStr] = Field(default=None, description="The existing collection name in H2OGPTe.", alias="h2ogpteCollection")
|
|
57
|
-
type: Optional[
|
|
57
|
+
type: Optional[V1LeaderboardType] = None
|
|
58
58
|
demo: Optional[StrictBool] = Field(default=None, description="Output only. Whether the Leaderboard is a demo resource or not. Demo resources are read only.")
|
|
59
59
|
test_lab: Optional[StrictStr] = Field(default=None, description="Optional. Resource name of the TestLab if Leaderboard was created from a imported TestLab.", alias="testLab")
|
|
60
60
|
__properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "status", "evaluator", "tests", "model", "createOperation", "leaderboardReport", "leaderboardTable", "leaderboardSummary", "llmModels", "leaderboardProblems", "evaluatorParameters", "insights", "modelParameters", "h2ogpteCollection", "type", "demo", "testLab"]
|
|
@@ -77,7 +77,7 @@ class V1alphaLeaderboard(BaseModel):
|
|
|
77
77
|
|
|
78
78
|
@classmethod
|
|
79
79
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
80
|
-
"""Create an instance of
|
|
80
|
+
"""Create an instance of V1Leaderboard from a JSON string"""
|
|
81
81
|
return cls.from_dict(json.loads(json_str))
|
|
82
82
|
|
|
83
83
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -142,7 +142,7 @@ class V1alphaLeaderboard(BaseModel):
|
|
|
142
142
|
|
|
143
143
|
@classmethod
|
|
144
144
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
145
|
-
"""Create an instance of
|
|
145
|
+
"""Create an instance of V1Leaderboard from a dict"""
|
|
146
146
|
if obj is None:
|
|
147
147
|
return None
|
|
148
148
|
|
|
@@ -168,9 +168,9 @@ class V1alphaLeaderboard(BaseModel):
|
|
|
168
168
|
"leaderboardTable": obj.get("leaderboardTable"),
|
|
169
169
|
"leaderboardSummary": obj.get("leaderboardSummary"),
|
|
170
170
|
"llmModels": obj.get("llmModels"),
|
|
171
|
-
"leaderboardProblems": [
|
|
171
|
+
"leaderboardProblems": [V1ProblemAndAction.from_dict(_item) for _item in obj["leaderboardProblems"]] if obj.get("leaderboardProblems") is not None else None,
|
|
172
172
|
"evaluatorParameters": obj.get("evaluatorParameters"),
|
|
173
|
-
"insights": [
|
|
173
|
+
"insights": [V1Insight.from_dict(_item) for _item in obj["insights"]] if obj.get("insights") is not None else None,
|
|
174
174
|
"modelParameters": obj.get("modelParameters"),
|
|
175
175
|
"h2ogpteCollection": obj.get("h2ogpteCollection"),
|
|
176
176
|
"type": obj.get("type"),
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -18,7 +18,7 @@ from enum import Enum
|
|
|
18
18
|
from typing_extensions import Self
|
|
19
19
|
|
|
20
20
|
|
|
21
|
-
class
|
|
21
|
+
class V1LeaderboardStatus(str, Enum):
|
|
22
22
|
"""
|
|
23
23
|
- LEADERBOARD_STATUS_UNSPECIFIED: Unspecified status. - LEADERBOARD_STATUS_PROCESSING: Leaderboard is being processed. See the Operation for details. - LEADERBOARD_STATUS_COMPLETED: Leaderboard is completed successfully. - LEADERBOARD_STATUS_FAILED: Leaderboard failed. See the Operation for details.
|
|
24
24
|
"""
|
|
@@ -33,7 +33,7 @@ class V1alphaLeaderboardStatus(str, Enum):
|
|
|
33
33
|
|
|
34
34
|
@classmethod
|
|
35
35
|
def from_json(cls, json_str: str) -> Self:
|
|
36
|
-
"""Create an instance of
|
|
36
|
+
"""Create an instance of V1LeaderboardStatus from a JSON string"""
|
|
37
37
|
return cls(json.loads(json_str))
|
|
38
38
|
|
|
39
39
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -18,7 +18,7 @@ from enum import Enum
|
|
|
18
18
|
from typing_extensions import Self
|
|
19
19
|
|
|
20
20
|
|
|
21
|
-
class
|
|
21
|
+
class V1LeaderboardType(str, Enum):
|
|
22
22
|
"""
|
|
23
23
|
- LEADERBOARD_TYPE_UNSPECIFIED: Unspecified type. - LEADERBOARD_TYPE_STANDALONE: Standalone leaderboard. - LEADERBOARD_TYPE_DASHBOARD: Leaderboard is part of a dashboard. - LEADERBOARD_TYPE_SERVICE: Leaderboard created by other service such as h2oGPTe.
|
|
24
24
|
"""
|
|
@@ -33,7 +33,7 @@ class V1alphaLeaderboardType(str, Enum):
|
|
|
33
33
|
|
|
34
34
|
@classmethod
|
|
35
35
|
def from_json(cls, json_str: str) -> Self:
|
|
36
|
-
"""Create an instance of
|
|
36
|
+
"""Create an instance of V1LeaderboardType from a JSON string"""
|
|
37
37
|
return cls(json.loads(json_str))
|
|
38
38
|
|
|
39
39
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -18,7 +18,7 @@ from enum import Enum
|
|
|
18
18
|
from typing_extensions import Self
|
|
19
19
|
|
|
20
20
|
|
|
21
|
-
class
|
|
21
|
+
class V1LeaderboardView(str, Enum):
|
|
22
22
|
"""
|
|
23
23
|
- result - leaderboard_table - leaderboard_summary - LEADERBOARD_VIEW_FULL: Full view of the Leaderboard. No fields are omitted. - LEADERBOARD_VIEW_BASIC_WITH_TABLE: View of the Leaderboard that is the same as LEADERBOARD_VIEW_BASIC but it includes the leaderboard_table field.
|
|
24
24
|
"""
|
|
@@ -33,7 +33,7 @@ class V1alphaLeaderboardView(str, Enum):
|
|
|
33
33
|
|
|
34
34
|
@classmethod
|
|
35
35
|
def from_json(cls, json_str: str) -> Self:
|
|
36
|
-
"""Create an instance of
|
|
36
|
+
"""Create an instance of V1LeaderboardView from a JSON string"""
|
|
37
37
|
return cls(json.loads(json_str))
|
|
38
38
|
|
|
39
39
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -22,9 +22,9 @@ from typing import Any, ClassVar, Dict, List, Optional
|
|
|
22
22
|
from typing import Optional, Set
|
|
23
23
|
from typing_extensions import Self
|
|
24
24
|
|
|
25
|
-
class
|
|
25
|
+
class V1ListBaseModelsResponse(BaseModel):
|
|
26
26
|
"""
|
|
27
|
-
|
|
27
|
+
V1ListBaseModelsResponse
|
|
28
28
|
""" # noqa: E501
|
|
29
29
|
base_models: Optional[List[StrictStr]] = Field(default=None, description="The list of Models.", alias="baseModels")
|
|
30
30
|
__properties: ClassVar[List[str]] = ["baseModels"]
|
|
@@ -47,7 +47,7 @@ class V1alphaListBaseModelsResponse(BaseModel):
|
|
|
47
47
|
|
|
48
48
|
@classmethod
|
|
49
49
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
50
|
-
"""Create an instance of
|
|
50
|
+
"""Create an instance of V1ListBaseModelsResponse from a JSON string"""
|
|
51
51
|
return cls.from_dict(json.loads(json_str))
|
|
52
52
|
|
|
53
53
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -72,7 +72,7 @@ class V1alphaListBaseModelsResponse(BaseModel):
|
|
|
72
72
|
|
|
73
73
|
@classmethod
|
|
74
74
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
75
|
-
"""Create an instance of
|
|
75
|
+
"""Create an instance of V1ListBaseModelsResponse from a dict"""
|
|
76
76
|
if obj is None:
|
|
77
77
|
return None
|
|
78
78
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -19,15 +19,15 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
-
from eval_studio_client.api.models.
|
|
22
|
+
from eval_studio_client.api.models.v1_dashboard import V1Dashboard
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
25
25
|
|
|
26
|
-
class
|
|
26
|
+
class V1ListDashboardsResponse(BaseModel):
|
|
27
27
|
"""
|
|
28
|
-
|
|
28
|
+
V1ListDashboardsResponse
|
|
29
29
|
""" # noqa: E501
|
|
30
|
-
dashboards: Optional[List[
|
|
30
|
+
dashboards: Optional[List[V1Dashboard]] = Field(default=None, description="The list of Dashboards.")
|
|
31
31
|
__properties: ClassVar[List[str]] = ["dashboards"]
|
|
32
32
|
|
|
33
33
|
model_config = ConfigDict(
|
|
@@ -48,7 +48,7 @@ class V1alphaBatchDeleteDashboardsResponse(BaseModel):
|
|
|
48
48
|
|
|
49
49
|
@classmethod
|
|
50
50
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
-
"""Create an instance of
|
|
51
|
+
"""Create an instance of V1ListDashboardsResponse from a JSON string"""
|
|
52
52
|
return cls.from_dict(json.loads(json_str))
|
|
53
53
|
|
|
54
54
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -80,7 +80,7 @@ class V1alphaBatchDeleteDashboardsResponse(BaseModel):
|
|
|
80
80
|
|
|
81
81
|
@classmethod
|
|
82
82
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
-
"""Create an instance of
|
|
83
|
+
"""Create an instance of V1ListDashboardsResponse from a dict"""
|
|
84
84
|
if obj is None:
|
|
85
85
|
return None
|
|
86
86
|
|
|
@@ -88,7 +88,7 @@ class V1alphaBatchDeleteDashboardsResponse(BaseModel):
|
|
|
88
88
|
return cls.model_validate(obj)
|
|
89
89
|
|
|
90
90
|
_obj = cls.model_validate({
|
|
91
|
-
"dashboards": [
|
|
91
|
+
"dashboards": [V1Dashboard.from_dict(_item) for _item in obj["dashboards"]] if obj.get("dashboards") is not None else None
|
|
92
92
|
})
|
|
93
93
|
return _obj
|
|
94
94
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -19,15 +19,15 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
-
from eval_studio_client.api.models.
|
|
22
|
+
from eval_studio_client.api.models.v1_document import V1Document
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
25
25
|
|
|
26
|
-
class
|
|
26
|
+
class V1ListDocumentsResponse(BaseModel):
|
|
27
27
|
"""
|
|
28
|
-
|
|
28
|
+
V1ListDocumentsResponse
|
|
29
29
|
""" # noqa: E501
|
|
30
|
-
documents: Optional[List[
|
|
30
|
+
documents: Optional[List[V1Document]] = Field(default=None, description="The list of Documents.")
|
|
31
31
|
__properties: ClassVar[List[str]] = ["documents"]
|
|
32
32
|
|
|
33
33
|
model_config = ConfigDict(
|
|
@@ -48,7 +48,7 @@ class V1alphaBatchDeleteDocumentsResponse(BaseModel):
|
|
|
48
48
|
|
|
49
49
|
@classmethod
|
|
50
50
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
-
"""Create an instance of
|
|
51
|
+
"""Create an instance of V1ListDocumentsResponse from a JSON string"""
|
|
52
52
|
return cls.from_dict(json.loads(json_str))
|
|
53
53
|
|
|
54
54
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -80,7 +80,7 @@ class V1alphaBatchDeleteDocumentsResponse(BaseModel):
|
|
|
80
80
|
|
|
81
81
|
@classmethod
|
|
82
82
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
-
"""Create an instance of
|
|
83
|
+
"""Create an instance of V1ListDocumentsResponse from a dict"""
|
|
84
84
|
if obj is None:
|
|
85
85
|
return None
|
|
86
86
|
|
|
@@ -88,7 +88,7 @@ class V1alphaBatchDeleteDocumentsResponse(BaseModel):
|
|
|
88
88
|
return cls.model_validate(obj)
|
|
89
89
|
|
|
90
90
|
_obj = cls.model_validate({
|
|
91
|
-
"documents": [
|
|
91
|
+
"documents": [V1Document.from_dict(_item) for _item in obj["documents"]] if obj.get("documents") is not None else None
|
|
92
92
|
})
|
|
93
93
|
return _obj
|
|
94
94
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -19,15 +19,15 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
-
from eval_studio_client.api.models.
|
|
22
|
+
from eval_studio_client.api.models.v1_evaluator import V1Evaluator
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
25
25
|
|
|
26
|
-
class
|
|
26
|
+
class V1ListEvaluatorsResponse(BaseModel):
|
|
27
27
|
"""
|
|
28
|
-
|
|
28
|
+
V1ListEvaluatorsResponse
|
|
29
29
|
""" # noqa: E501
|
|
30
|
-
evaluators: Optional[List[
|
|
30
|
+
evaluators: Optional[List[V1Evaluator]] = Field(default=None, description="The list of Evaluators.")
|
|
31
31
|
__properties: ClassVar[List[str]] = ["evaluators"]
|
|
32
32
|
|
|
33
33
|
model_config = ConfigDict(
|
|
@@ -48,7 +48,7 @@ class V1alphaBatchDeleteEvaluatorsResponse(BaseModel):
|
|
|
48
48
|
|
|
49
49
|
@classmethod
|
|
50
50
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
-
"""Create an instance of
|
|
51
|
+
"""Create an instance of V1ListEvaluatorsResponse from a JSON string"""
|
|
52
52
|
return cls.from_dict(json.loads(json_str))
|
|
53
53
|
|
|
54
54
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -80,7 +80,7 @@ class V1alphaBatchDeleteEvaluatorsResponse(BaseModel):
|
|
|
80
80
|
|
|
81
81
|
@classmethod
|
|
82
82
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
-
"""Create an instance of
|
|
83
|
+
"""Create an instance of V1ListEvaluatorsResponse from a dict"""
|
|
84
84
|
if obj is None:
|
|
85
85
|
return None
|
|
86
86
|
|
|
@@ -88,7 +88,7 @@ class V1alphaBatchDeleteEvaluatorsResponse(BaseModel):
|
|
|
88
88
|
return cls.model_validate(obj)
|
|
89
89
|
|
|
90
90
|
_obj = cls.model_validate({
|
|
91
|
-
"evaluators": [
|
|
91
|
+
"evaluators": [V1Evaluator.from_dict(_item) for _item in obj["evaluators"]] if obj.get("evaluators") is not None else None
|
|
92
92
|
})
|
|
93
93
|
return _obj
|
|
94
94
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -19,15 +19,15 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
-
from eval_studio_client.api.models.
|
|
22
|
+
from eval_studio_client.api.models.v1_leaderboard import V1Leaderboard
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
25
25
|
|
|
26
|
-
class
|
|
26
|
+
class V1ListLeaderboardsResponse(BaseModel):
|
|
27
27
|
"""
|
|
28
|
-
|
|
28
|
+
V1ListLeaderboardsResponse
|
|
29
29
|
""" # noqa: E501
|
|
30
|
-
leaderboards: Optional[List[
|
|
30
|
+
leaderboards: Optional[List[V1Leaderboard]] = Field(default=None, description="The list of Leaderboards.")
|
|
31
31
|
next_page_token: Optional[StrictStr] = Field(default=None, description="A token that can be sent as `page_token` to retrieve the next page. If this field is empty/omitted, there are no subsequent pages.", alias="nextPageToken")
|
|
32
32
|
__properties: ClassVar[List[str]] = ["leaderboards", "nextPageToken"]
|
|
33
33
|
|
|
@@ -49,7 +49,7 @@ class V1alphaListLeaderboardsResponse(BaseModel):
|
|
|
49
49
|
|
|
50
50
|
@classmethod
|
|
51
51
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
52
|
-
"""Create an instance of
|
|
52
|
+
"""Create an instance of V1ListLeaderboardsResponse from a JSON string"""
|
|
53
53
|
return cls.from_dict(json.loads(json_str))
|
|
54
54
|
|
|
55
55
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -81,7 +81,7 @@ class V1alphaListLeaderboardsResponse(BaseModel):
|
|
|
81
81
|
|
|
82
82
|
@classmethod
|
|
83
83
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
84
|
-
"""Create an instance of
|
|
84
|
+
"""Create an instance of V1ListLeaderboardsResponse from a dict"""
|
|
85
85
|
if obj is None:
|
|
86
86
|
return None
|
|
87
87
|
|
|
@@ -89,7 +89,7 @@ class V1alphaListLeaderboardsResponse(BaseModel):
|
|
|
89
89
|
return cls.model_validate(obj)
|
|
90
90
|
|
|
91
91
|
_obj = cls.model_validate({
|
|
92
|
-
"leaderboards": [
|
|
92
|
+
"leaderboards": [V1Leaderboard.from_dict(_item) for _item in obj["leaderboards"]] if obj.get("leaderboards") is not None else None,
|
|
93
93
|
"nextPageToken": obj.get("nextPageToken")
|
|
94
94
|
})
|
|
95
95
|
return _obj
|
eval_studio_client/api/models/{v1alpha_list_llm_models_response.py → v1_list_llm_models_response.py}
RENAMED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -22,9 +22,9 @@ from typing import Any, ClassVar, Dict, List, Optional
|
|
|
22
22
|
from typing import Optional, Set
|
|
23
23
|
from typing_extensions import Self
|
|
24
24
|
|
|
25
|
-
class
|
|
25
|
+
class V1ListLLMModelsResponse(BaseModel):
|
|
26
26
|
"""
|
|
27
|
-
|
|
27
|
+
V1ListLLMModelsResponse
|
|
28
28
|
""" # noqa: E501
|
|
29
29
|
models: Optional[List[StrictStr]] = Field(default=None, description="Required. List of LLM models available for evaluation.")
|
|
30
30
|
__properties: ClassVar[List[str]] = ["models"]
|
|
@@ -47,7 +47,7 @@ class V1alphaListLLMModelsResponse(BaseModel):
|
|
|
47
47
|
|
|
48
48
|
@classmethod
|
|
49
49
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
50
|
-
"""Create an instance of
|
|
50
|
+
"""Create an instance of V1ListLLMModelsResponse from a JSON string"""
|
|
51
51
|
return cls.from_dict(json.loads(json_str))
|
|
52
52
|
|
|
53
53
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -72,7 +72,7 @@ class V1alphaListLLMModelsResponse(BaseModel):
|
|
|
72
72
|
|
|
73
73
|
@classmethod
|
|
74
74
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
75
|
-
"""Create an instance of
|
|
75
|
+
"""Create an instance of V1ListLLMModelsResponse from a dict"""
|
|
76
76
|
if obj is None:
|
|
77
77
|
return None
|
|
78
78
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -19,15 +19,15 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
-
from eval_studio_client.api.models.
|
|
22
|
+
from eval_studio_client.api.models.v1_collection_info import V1CollectionInfo
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
25
25
|
|
|
26
|
-
class
|
|
26
|
+
class V1ListModelCollectionsResponse(BaseModel):
|
|
27
27
|
"""
|
|
28
|
-
|
|
28
|
+
V1ListModelCollectionsResponse
|
|
29
29
|
""" # noqa: E501
|
|
30
|
-
collections: Optional[List[
|
|
30
|
+
collections: Optional[List[V1CollectionInfo]] = Field(default=None, description="The list of collections.")
|
|
31
31
|
__properties: ClassVar[List[str]] = ["collections"]
|
|
32
32
|
|
|
33
33
|
model_config = ConfigDict(
|
|
@@ -48,7 +48,7 @@ class V1alphaListModelCollectionsResponse(BaseModel):
|
|
|
48
48
|
|
|
49
49
|
@classmethod
|
|
50
50
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
-
"""Create an instance of
|
|
51
|
+
"""Create an instance of V1ListModelCollectionsResponse from a JSON string"""
|
|
52
52
|
return cls.from_dict(json.loads(json_str))
|
|
53
53
|
|
|
54
54
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -80,7 +80,7 @@ class V1alphaListModelCollectionsResponse(BaseModel):
|
|
|
80
80
|
|
|
81
81
|
@classmethod
|
|
82
82
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
-
"""Create an instance of
|
|
83
|
+
"""Create an instance of V1ListModelCollectionsResponse from a dict"""
|
|
84
84
|
if obj is None:
|
|
85
85
|
return None
|
|
86
86
|
|
|
@@ -88,7 +88,7 @@ class V1alphaListModelCollectionsResponse(BaseModel):
|
|
|
88
88
|
return cls.model_validate(obj)
|
|
89
89
|
|
|
90
90
|
_obj = cls.model_validate({
|
|
91
|
-
"collections": [
|
|
91
|
+
"collections": [V1CollectionInfo.from_dict(_item) for _item in obj["collections"]] if obj.get("collections") is not None else None
|
|
92
92
|
})
|
|
93
93
|
return _obj
|
|
94
94
|
|
eval_studio_client/api/models/{v1alpha_batch_delete_models_response.py → v1_list_models_response.py}
RENAMED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -19,15 +19,15 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
-
from eval_studio_client.api.models.
|
|
22
|
+
from eval_studio_client.api.models.v1_model import V1Model
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
25
25
|
|
|
26
|
-
class
|
|
26
|
+
class V1ListModelsResponse(BaseModel):
|
|
27
27
|
"""
|
|
28
|
-
|
|
28
|
+
V1ListModelsResponse
|
|
29
29
|
""" # noqa: E501
|
|
30
|
-
models: Optional[List[
|
|
30
|
+
models: Optional[List[V1Model]] = Field(default=None, description="The list of Models.")
|
|
31
31
|
__properties: ClassVar[List[str]] = ["models"]
|
|
32
32
|
|
|
33
33
|
model_config = ConfigDict(
|
|
@@ -48,7 +48,7 @@ class V1alphaBatchDeleteModelsResponse(BaseModel):
|
|
|
48
48
|
|
|
49
49
|
@classmethod
|
|
50
50
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
-
"""Create an instance of
|
|
51
|
+
"""Create an instance of V1ListModelsResponse from a JSON string"""
|
|
52
52
|
return cls.from_dict(json.loads(json_str))
|
|
53
53
|
|
|
54
54
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -80,7 +80,7 @@ class V1alphaBatchDeleteModelsResponse(BaseModel):
|
|
|
80
80
|
|
|
81
81
|
@classmethod
|
|
82
82
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
-
"""Create an instance of
|
|
83
|
+
"""Create an instance of V1ListModelsResponse from a dict"""
|
|
84
84
|
if obj is None:
|
|
85
85
|
return None
|
|
86
86
|
|
|
@@ -88,7 +88,7 @@ class V1alphaBatchDeleteModelsResponse(BaseModel):
|
|
|
88
88
|
return cls.model_validate(obj)
|
|
89
89
|
|
|
90
90
|
_obj = cls.model_validate({
|
|
91
|
-
"models": [
|
|
91
|
+
"models": [V1Model.from_dict(_item) for _item in obj["models"]] if obj.get("models") is not None else None
|
|
92
92
|
})
|
|
93
93
|
return _obj
|
|
94
94
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/
|
|
4
|
+
ai/h2o/eval_studio/v1/collection.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -19,15 +19,15 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
-
from eval_studio_client.api.models.
|
|
22
|
+
from eval_studio_client.api.models.v1_dashboard import V1Dashboard
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
25
25
|
|
|
26
|
-
class
|
|
26
|
+
class V1ListMostRecentDashboardsResponse(BaseModel):
|
|
27
27
|
"""
|
|
28
|
-
|
|
28
|
+
V1ListMostRecentDashboardsResponse
|
|
29
29
|
""" # noqa: E501
|
|
30
|
-
dashboards: Optional[List[
|
|
30
|
+
dashboards: Optional[List[V1Dashboard]] = Field(default=None, description="The list of Dashboards.")
|
|
31
31
|
__properties: ClassVar[List[str]] = ["dashboards"]
|
|
32
32
|
|
|
33
33
|
model_config = ConfigDict(
|
|
@@ -48,7 +48,7 @@ class V1alphaListMostRecentDashboardsResponse(BaseModel):
|
|
|
48
48
|
|
|
49
49
|
@classmethod
|
|
50
50
|
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
-
"""Create an instance of
|
|
51
|
+
"""Create an instance of V1ListMostRecentDashboardsResponse from a JSON string"""
|
|
52
52
|
return cls.from_dict(json.loads(json_str))
|
|
53
53
|
|
|
54
54
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -80,7 +80,7 @@ class V1alphaListMostRecentDashboardsResponse(BaseModel):
|
|
|
80
80
|
|
|
81
81
|
@classmethod
|
|
82
82
|
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
-
"""Create an instance of
|
|
83
|
+
"""Create an instance of V1ListMostRecentDashboardsResponse from a dict"""
|
|
84
84
|
if obj is None:
|
|
85
85
|
return None
|
|
86
86
|
|
|
@@ -88,7 +88,7 @@ class V1alphaListMostRecentDashboardsResponse(BaseModel):
|
|
|
88
88
|
return cls.model_validate(obj)
|
|
89
89
|
|
|
90
90
|
_obj = cls.model_validate({
|
|
91
|
-
"dashboards": [
|
|
91
|
+
"dashboards": [V1Dashboard.from_dict(_item) for _item in obj["dashboards"]] if obj.get("dashboards") is not None else None
|
|
92
92
|
})
|
|
93
93
|
return _obj
|
|
94
94
|
|