eval-studio-client 0.8.0a2__py3-none-any.whl → 1.0.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/__init__.py +2 -1
- eval_studio_client/api/__init__.py +125 -120
- eval_studio_client/api/api/__init__.py +1 -0
- eval_studio_client/api/api/dashboard_service_api.py +71 -71
- eval_studio_client/api/api/document_service_api.py +64 -64
- eval_studio_client/api/api/evaluation_service_api.py +42 -42
- eval_studio_client/api/api/evaluator_service_api.py +50 -50
- eval_studio_client/api/api/info_service_api.py +8 -8
- eval_studio_client/api/api/leaderboard_service_api.py +126 -126
- eval_studio_client/api/api/model_service_api.py +92 -92
- eval_studio_client/api/api/operation_progress_service_api.py +8 -8
- eval_studio_client/api/api/operation_service_api.py +36 -36
- eval_studio_client/api/api/perturbation_service_api.py +8 -8
- eval_studio_client/api/api/perturbator_service_api.py +15 -15
- eval_studio_client/api/api/prompt_generation_service_api.py +321 -0
- eval_studio_client/api/api/test_case_service_api.py +57 -57
- eval_studio_client/api/api/test_class_service_api.py +15 -15
- eval_studio_client/api/api/test_lab_service_api.py +22 -22
- eval_studio_client/api/api/test_service_api.py +376 -92
- eval_studio_client/api/api/who_am_i_service_api.py +8 -8
- eval_studio_client/api/api_client.py +1 -1
- eval_studio_client/api/configuration.py +1 -1
- eval_studio_client/api/docs/DashboardServiceApi.md +38 -38
- eval_studio_client/api/docs/DocumentServiceApi.md +34 -34
- eval_studio_client/api/docs/EvaluationServiceApi.md +22 -22
- eval_studio_client/api/docs/EvaluatorServiceApi.md +26 -26
- eval_studio_client/api/docs/InfoServiceApi.md +4 -4
- eval_studio_client/api/docs/LeaderboardServiceApi.md +66 -66
- eval_studio_client/api/docs/ModelServiceApi.md +50 -50
- eval_studio_client/api/docs/OperationProgressServiceApi.md +4 -4
- eval_studio_client/api/docs/OperationServiceApi.md +20 -20
- eval_studio_client/api/docs/PerturbationServiceApi.md +4 -4
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +3 -3
- eval_studio_client/api/docs/PerturbatorServiceApi.md +8 -8
- eval_studio_client/api/docs/PromptGenerationServiceApi.md +78 -0
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +35 -0
- eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +1 -1
- eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +4 -4
- eval_studio_client/api/docs/RequiredTheModelToUpdate.md +1 -1
- eval_studio_client/api/docs/TestCaseServiceApi.md +31 -31
- eval_studio_client/api/docs/TestClassServiceApi.md +8 -8
- eval_studio_client/api/docs/TestLabServiceApi.md +11 -11
- eval_studio_client/api/docs/TestServiceApi.md +119 -49
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +33 -0
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +1 -1
- eval_studio_client/api/docs/V1BatchCreateLeaderboardsRequest.md +31 -0
- eval_studio_client/api/docs/V1BatchCreateLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteDashboardsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteDocumentsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteEvaluatorsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteLeaderboardsRequest.md +30 -0
- eval_studio_client/api/docs/V1BatchDeleteLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteModelsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaBatchDeleteTestsRequest.md → V1BatchDeleteTestsRequest.md} +8 -8
- eval_studio_client/api/docs/V1BatchDeleteTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetTestsResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaBatchImportLeaderboardRequest.md → V1BatchImportLeaderboardRequest.md} +9 -9
- eval_studio_client/api/docs/V1BatchImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaBatchImportTestsRequest.md → V1BatchImportTestsRequest.md} +8 -8
- eval_studio_client/api/docs/V1BatchImportTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1CheckBaseModelsResponse.md +30 -0
- eval_studio_client/api/docs/{V1alphaCollectionInfo.md → V1CollectionInfo.md} +8 -8
- eval_studio_client/api/docs/V1CreateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaCreateEvaluationRequest.md → V1CreateEvaluationRequest.md} +10 -10
- eval_studio_client/api/docs/V1CreateEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateLeaderboardRequest.md +29 -0
- eval_studio_client/api/docs/V1CreateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateLeaderboardWithoutCacheResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1CreatePerturbationResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaDashboard.md → V1Dashboard.md} +9 -9
- eval_studio_client/api/docs/{V1alphaDashboardStatus.md → V1DashboardStatus.md} +1 -1
- eval_studio_client/api/docs/V1DeleteDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteModelResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaDocument.md → V1Document.md} +8 -8
- eval_studio_client/api/docs/V1EvaluationTest.md +32 -0
- eval_studio_client/api/docs/{V1alphaEvaluator.md → V1Evaluator.md} +10 -9
- eval_studio_client/api/docs/{V1alphaEvaluatorParamType.md → V1EvaluatorParamType.md} +1 -1
- eval_studio_client/api/docs/{V1alphaEvaluatorParameter.md → V1EvaluatorParameter.md} +9 -9
- eval_studio_client/api/docs/{V1alphaEvaluatorView.md → V1EvaluatorView.md} +1 -1
- eval_studio_client/api/docs/V1FinalizeOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1FindAllTestCasesByIDResponse.md +29 -0
- eval_studio_client/api/docs/V1FindTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1GenerateTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1GetDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1GetDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1GetEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1GetInfoResponse.md +29 -0
- eval_studio_client/api/docs/V1GetLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1GetModelResponse.md +29 -0
- eval_studio_client/api/docs/V1GetOperationProgressByParentResponse.md +29 -0
- eval_studio_client/api/docs/V1GetOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1GetPerturbatorResponse.md +29 -0
- eval_studio_client/api/docs/V1GetTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1GetTestClassResponse.md +29 -0
- eval_studio_client/api/docs/V1GetTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaImportEvaluationRequest.md → V1ImportEvaluationRequest.md} +9 -9
- eval_studio_client/api/docs/{V1alphaImportLeaderboardRequest.md → V1ImportLeaderboardRequest.md} +9 -9
- eval_studio_client/api/docs/V1ImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaInfo.md → V1Info.md} +8 -8
- eval_studio_client/api/docs/{V1alphaInsight.md → V1Insight.md} +8 -8
- eval_studio_client/api/docs/{V1alphaLeaderboard.md → V1Leaderboard.md} +12 -12
- eval_studio_client/api/docs/{V1alphaLeaderboardStatus.md → V1LeaderboardStatus.md} +1 -1
- eval_studio_client/api/docs/{V1alphaLeaderboardType.md → V1LeaderboardType.md} +1 -1
- eval_studio_client/api/docs/{V1alphaLeaderboardView.md → V1LeaderboardView.md} +1 -1
- eval_studio_client/api/docs/V1ListBaseModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListLLMModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListLeaderboardsResponse.md +30 -0
- eval_studio_client/api/docs/V1ListModelCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListMostRecentDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListMostRecentLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListMostRecentModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListMostRecentTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListPerturbatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListRAGCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestClassesResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestsResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaModel.md → V1Model.md} +9 -9
- eval_studio_client/api/docs/{V1alphaModelType.md → V1ModelType.md} +1 -1
- eval_studio_client/api/docs/{V1alphaOperation.md → V1Operation.md} +8 -8
- eval_studio_client/api/docs/{V1alphaOperationProgress.md → V1OperationProgress.md} +8 -8
- eval_studio_client/api/docs/V1PerturbTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaPerturbator.md → V1Perturbator.md} +8 -8
- eval_studio_client/api/docs/V1PerturbatorConfiguration.md +32 -0
- eval_studio_client/api/docs/{V1alphaPerturbatorIntensity.md → V1PerturbatorIntensity.md} +1 -1
- eval_studio_client/api/docs/{V1alphaProblemAndAction.md → V1ProblemAndAction.md} +8 -8
- eval_studio_client/api/docs/{V1alphaTest.md → V1Test.md} +8 -8
- eval_studio_client/api/docs/{V1alphaTestCase.md → V1TestCase.md} +8 -8
- eval_studio_client/api/docs/{V1alphaTestCaseRelationship.md → V1TestCaseRelationship.md} +8 -8
- eval_studio_client/api/docs/V1TestCasesGenerator.md +11 -0
- eval_studio_client/api/docs/{V1alphaTestClass.md → V1TestClass.md} +9 -9
- eval_studio_client/api/docs/{V1alphaTestClassType.md → V1TestClassType.md} +1 -1
- eval_studio_client/api/docs/{V1alphaTestLab.md → V1TestLab.md} +8 -8
- eval_studio_client/api/docs/V1UpdateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateTestResponse.md +29 -0
- eval_studio_client/api/docs/{V1alphaWhoAmIResponse.md → V1WhoAmIResponse.md} +8 -8
- eval_studio_client/api/docs/WhoAmIServiceApi.md +4 -4
- eval_studio_client/api/exceptions.py +1 -1
- eval_studio_client/api/models/__init__.py +124 -120
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +10 -10
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +104 -0
- eval_studio_client/api/models/protobuf_any.py +1 -1
- eval_studio_client/api/models/required_the_dashboard_to_update.py +3 -3
- eval_studio_client/api/models/required_the_document_to_update.py +1 -1
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +11 -11
- eval_studio_client/api/models/required_the_model_to_update.py +3 -3
- eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_case_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_to_update.py +1 -1
- eval_studio_client/api/models/rpc_status.py +1 -1
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +96 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +4 -4
- eval_studio_client/api/models/{v1alpha_batch_create_leaderboards_request.py → v1_batch_create_leaderboards_request.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_operation_response.py → v1_batch_create_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_dashboards_request.py → v1_batch_delete_dashboards_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_dashboards_response.py → v1_batch_delete_dashboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_documents_request.py → v1_batch_delete_documents_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_documents_response.py → v1_batch_delete_documents_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_evaluators_request.py → v1_batch_delete_evaluators_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_evaluators_response.py → v1_batch_delete_evaluators_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_leaderboards_request.py → v1_batch_delete_leaderboards_request.py} +7 -7
- eval_studio_client/api/models/{v1alpha_batch_get_leaderboards_response.py → v1_batch_delete_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_models_request.py → v1_batch_delete_models_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_models_response.py → v1_batch_delete_models_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_test_cases_response.py → v1_batch_delete_test_cases_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_tests_request.py → v1_batch_delete_tests_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_tests_response.py → v1_batch_delete_tests_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_dashboards_response.py → v1_batch_get_dashboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_documents_response.py → v1_batch_get_documents_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_leaderboards_response.py → v1_batch_get_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_models_response.py → v1_batch_get_models_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_operations_response.py → v1_batch_get_operations_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_tests_response.py → v1_batch_get_tests_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_import_leaderboard_request.py → v1_batch_import_leaderboard_request.py} +7 -7
- eval_studio_client/api/models/{v1alpha_import_leaderboard_response.py → v1_batch_import_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_import_tests_request.py → v1_batch_import_tests_request.py} +5 -5
- eval_studio_client/api/models/{v1alpha_batch_delete_tests_response.py → v1_batch_import_tests_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_check_base_models_response.py → v1_check_base_models_response.py} +5 -5
- eval_studio_client/api/models/{v1alpha_collection_info.py → v1_collection_info.py} +4 -4
- eval_studio_client/api/models/{v1alpha_get_dashboard_response.py → v1_create_dashboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_document_response.py → v1_create_document_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_evaluation_request.py → v1_create_evaluation_request.py} +11 -11
- eval_studio_client/api/models/{v1alpha_get_evaluator_response.py → v1_create_evaluator_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_leaderboard_response.py → v1_create_leaderboard_request.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_operation_response.py → v1_create_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_get_model_response.py → v1_create_model_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_perturbation_response.py → v1_create_perturbation_response.py} +5 -5
- eval_studio_client/api/models/{v1alpha_get_test_case_response.py → v1_create_test_case_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_find_test_lab_response.py → v1_create_test_lab_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_test_response.py → v1_create_test_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_dashboard.py → v1_dashboard.py} +7 -7
- eval_studio_client/api/models/{v1alpha_dashboard_status.py → v1_dashboard_status.py} +3 -3
- eval_studio_client/api/models/{v1alpha_update_dashboard_response.py → v1_delete_dashboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_document_response.py → v1_delete_document_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_evaluator_response.py → v1_delete_evaluator_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_leaderboard_request.py → v1_delete_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_model_response.py → v1_delete_model_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_test_case_response.py → v1_delete_test_case_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_test_response.py → v1_delete_test_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_document.py → v1_document.py} +5 -5
- eval_studio_client/api/models/{v1alpha_evaluation_test.py → v1_evaluation_test.py} +10 -10
- eval_studio_client/api/models/{v1alpha_evaluator.py → v1_evaluator.py} +14 -10
- eval_studio_client/api/models/{v1alpha_evaluator_param_type.py → v1_evaluator_param_type.py} +3 -3
- eval_studio_client/api/models/{v1alpha_evaluator_parameter.py → v1_evaluator_parameter.py} +7 -7
- eval_studio_client/api/models/{v1alpha_evaluator_view.py → v1_evaluator_view.py} +3 -3
- eval_studio_client/api/models/{v1alpha_create_leaderboard_response.py → v1_finalize_operation_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_find_all_test_cases_by_id_response.py → v1_find_all_test_cases_by_id_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_test_lab_response.py → v1_find_test_lab_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_finalize_operation_response.py → v1_generate_test_cases_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_dashboard_response.py → v1_get_dashboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_document_response.py → v1_get_document_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_evaluator_response.py → v1_get_evaluator_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_info_response.py → v1_get_info_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_leaderboard_response.py → v1_get_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_model_response.py → v1_get_model_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_operation_progress_by_parent_response.py → v1_get_operation_progress_by_parent_response.py} +8 -8
- eval_studio_client/api/models/v1_get_operation_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_get_perturbator_response.py → v1_get_perturbator_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_test_case_response.py → v1_get_test_case_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_get_test_class_response.py → v1_get_test_class_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_update_test_response.py → v1_get_test_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_import_evaluation_request.py → v1_import_evaluation_request.py} +8 -8
- eval_studio_client/api/models/{v1alpha_import_leaderboard_request.py → v1_import_leaderboard_request.py} +7 -7
- eval_studio_client/api/models/v1_import_leaderboard_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_info.py → v1_info.py} +5 -5
- eval_studio_client/api/models/{v1alpha_insight.py → v1_insight.py} +4 -4
- eval_studio_client/api/models/{v1alpha_leaderboard.py → v1_leaderboard.py} +15 -15
- eval_studio_client/api/models/{v1alpha_leaderboard_status.py → v1_leaderboard_status.py} +3 -3
- eval_studio_client/api/models/{v1alpha_leaderboard_type.py → v1_leaderboard_type.py} +3 -3
- eval_studio_client/api/models/{v1alpha_leaderboard_view.py → v1_leaderboard_view.py} +3 -3
- eval_studio_client/api/models/{v1alpha_list_base_models_response.py → v1_list_base_models_response.py} +5 -5
- eval_studio_client/api/models/{v1alpha_batch_delete_dashboards_response.py → v1_list_dashboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_documents_response.py → v1_list_documents_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_evaluators_response.py → v1_list_evaluators_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_leaderboards_response.py → v1_list_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_llm_models_response.py → v1_list_llm_models_response.py} +5 -5
- eval_studio_client/api/models/{v1alpha_list_model_collections_response.py → v1_list_model_collections_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_models_response.py → v1_list_models_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_most_recent_dashboards_response.py → v1_list_most_recent_dashboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_most_recent_leaderboards_response.py → v1_list_most_recent_leaderboards_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_most_recent_models_response.py → v1_list_most_recent_models_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_import_tests_response.py → v1_list_most_recent_tests_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_get_operations_response.py → v1_list_operations_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_perturbators_response.py → v1_list_perturbators_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_rag_collections_response.py → v1_list_rag_collections_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_batch_delete_test_cases_response.py → v1_list_test_cases_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_list_test_classes_response.py → v1_list_test_classes_response.py} +8 -8
- eval_studio_client/api/models/v1_list_tests_response.py +95 -0
- eval_studio_client/api/models/{v1alpha_model.py → v1_model.py} +7 -7
- eval_studio_client/api/models/{v1alpha_model_type.py → v1_model_type.py} +3 -3
- eval_studio_client/api/models/{v1alpha_operation.py → v1_operation.py} +4 -4
- eval_studio_client/api/models/{v1alpha_operation_progress.py → v1_operation_progress.py} +5 -5
- eval_studio_client/api/models/{v1alpha_delete_test_response.py → v1_perturb_test_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_perturbator.py → v1_perturbator.py} +5 -5
- eval_studio_client/api/models/{v1alpha_perturbator_configuration.py → v1_perturbator_configuration.py} +6 -6
- eval_studio_client/api/models/{v1alpha_perturbator_intensity.py → v1_perturbator_intensity.py} +4 -4
- eval_studio_client/api/models/{v1alpha_problem_and_action.py → v1_problem_and_action.py} +5 -5
- eval_studio_client/api/models/{v1alpha_test.py → v1_test.py} +5 -5
- eval_studio_client/api/models/{v1alpha_test_case.py → v1_test_case.py} +5 -5
- eval_studio_client/api/models/{v1alpha_test_case_relationship.py → v1_test_case_relationship.py} +5 -5
- eval_studio_client/api/models/v1_test_cases_generator.py +50 -0
- eval_studio_client/api/models/{v1alpha_test_class.py → v1_test_class.py} +7 -7
- eval_studio_client/api/models/{v1alpha_test_class_type.py → v1_test_class_type.py} +3 -3
- eval_studio_client/api/models/{v1alpha_test_lab.py → v1_test_lab.py} +5 -5
- eval_studio_client/api/models/{v1alpha_delete_dashboard_response.py → v1_update_dashboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_create_document_response.py → v1_update_document_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_leaderboard_response.py → v1_update_leaderboard_response.py} +8 -8
- eval_studio_client/api/models/{v1alpha_delete_model_response.py → v1_update_model_response.py} +8 -8
- eval_studio_client/api/models/v1_update_operation_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_update_test_case_response.py → v1_update_test_case_response.py} +8 -8
- eval_studio_client/api/models/v1_update_test_response.py +91 -0
- eval_studio_client/api/models/{v1alpha_who_am_i_response.py → v1_who_am_i_response.py} +5 -5
- eval_studio_client/api/rest.py +1 -1
- eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
- eval_studio_client/api/test/test_document_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
- eval_studio_client/api/test/test_info_service_api.py +1 -1
- eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
- eval_studio_client/api/test/test_model_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +4 -4
- eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_api.py +37 -0
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +75 -0
- eval_studio_client/api/test/test_protobuf_any.py +1 -1
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +3 -3
- eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_to_update.py +1 -1
- eval_studio_client/api/test/test_rpc_status.py +1 -1
- eval_studio_client/api/test/test_test_case_service_api.py +1 -1
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_test_class_service_api.py +1 -1
- eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
- eval_studio_client/api/test/test_test_service_api.py +7 -1
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +57 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +2 -2
- eval_studio_client/api/test/{test_v1alpha_batch_create_leaderboards_request.py → test_v1_batch_create_leaderboards_request.py} +16 -16
- eval_studio_client/api/test/{test_v1alpha_create_leaderboard_response.py → test_v1_batch_create_leaderboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_models_request.py → test_v1_batch_delete_dashboards_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_get_dashboards_response.py → test_v1_batch_delete_dashboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_documents_request.py → test_v1_batch_delete_documents_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_get_documents_response.py → test_v1_batch_delete_documents_response.py} +13 -13
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +53 -0
- eval_studio_client/api/test/{test_v1alpha_batch_delete_evaluators_response.py → test_v1_batch_delete_evaluators_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_batch_delete_leaderboards_request.py → test_v1_batch_delete_leaderboards_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_get_leaderboards_response.py → test_v1_batch_delete_leaderboards_response.py} +15 -15
- eval_studio_client/api/test/test_v1_batch_delete_models_request.py +53 -0
- eval_studio_client/api/test/{test_v1alpha_batch_get_models_response.py → test_v1_batch_delete_models_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_test_cases_response.py → test_v1_batch_delete_test_cases_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_tests_request.py → test_v1_batch_delete_tests_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_get_tests_response.py → test_v1_batch_delete_tests_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_dashboards_response.py → test_v1_batch_get_dashboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_documents_response.py → test_v1_batch_get_documents_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_leaderboards_response.py → test_v1_batch_get_leaderboards_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_list_models_response.py → test_v1_batch_get_models_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_operations_response.py → test_v1_batch_get_operations_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_tests_response.py → test_v1_batch_get_tests_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_import_leaderboard_request.py → test_v1_batch_import_leaderboard_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_import_leaderboard_response.py → test_v1_batch_import_leaderboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_import_tests_request.py → test_v1_batch_import_tests_request.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_import_tests_response.py → test_v1_batch_import_tests_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_check_base_models_response.py → test_v1_check_base_models_response.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_collection_info.py → test_v1_collection_info.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_get_dashboard_response.py → test_v1_create_dashboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_document_response.py → test_v1_create_document_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_evaluation_request.py → test_v1_create_evaluation_request.py} +16 -16
- eval_studio_client/api/test/{test_v1alpha_get_evaluator_response.py → test_v1_create_evaluator_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_get_leaderboard_response.py → test_v1_create_leaderboard_request.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_update_operation_response.py → test_v1_create_leaderboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_import_leaderboard_response.py → test_v1_create_leaderboard_without_cache_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_model_response.py → test_v1_create_model_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_perturbation_response.py → test_v1_create_perturbation_response.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_get_test_case_response.py → test_v1_create_test_case_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_find_test_lab_response.py → test_v1_create_test_lab_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_test_response.py → test_v1_create_test_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_dashboard.py → test_v1_dashboard.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_evaluator_view.py → test_v1_dashboard_status.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_update_dashboard_response.py → test_v1_delete_dashboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_update_document_response.py → test_v1_delete_document_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_evaluator_response.py → test_v1_delete_evaluator_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_create_leaderboard_request.py → test_v1_delete_leaderboard_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_delete_model_response.py → test_v1_delete_model_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_test_case_response.py → test_v1_delete_test_case_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_test_response.py → test_v1_delete_test_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_document.py → test_v1_document.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_evaluation_test.py → test_v1_evaluation_test.py} +14 -14
- eval_studio_client/api/test/{test_v1alpha_evaluator.py → test_v1_evaluator.py} +14 -13
- eval_studio_client/api/test/{test_v1alpha_test_class_type.py → test_v1_evaluator_param_type.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_evaluator_parameter.py → test_v1_evaluator_parameter.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_model_type.py → test_v1_evaluator_view.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_get_operation_response.py → test_v1_finalize_operation_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_find_all_test_cases_by_id_response.py → test_v1_find_all_test_cases_by_id_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_test_lab_response.py → test_v1_find_test_lab_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_finalize_operation_response.py → test_v1_generate_test_cases_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_dashboard_response.py → test_v1_get_dashboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_document_response.py → test_v1_get_document_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_evaluator_response.py → test_v1_get_evaluator_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_get_info_response.py → test_v1_get_info_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_leaderboard_response.py → test_v1_get_leaderboard_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_create_model_response.py → test_v1_get_model_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_operation_progress_by_parent_response.py → test_v1_get_operation_progress_by_parent_response.py} +13 -13
- eval_studio_client/api/test/test_v1_get_operation_response.py +71 -0
- eval_studio_client/api/test/{test_v1alpha_get_perturbator_response.py → test_v1_get_perturbator_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_test_case_response.py → test_v1_get_test_case_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_get_test_class_response.py → test_v1_get_test_class_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_delete_test_response.py → test_v1_get_test_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_import_evaluation_request.py → test_v1_import_evaluation_request.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_import_leaderboard_request.py → test_v1_import_leaderboard_request.py} +12 -12
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +71 -0
- eval_studio_client/api/test/{test_v1alpha_info.py → test_v1_info.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_insight.py → test_v1_insight.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_leaderboard.py → test_v1_leaderboard.py} +14 -14
- eval_studio_client/api/test/{test_v1alpha_dashboard_status.py → test_v1_leaderboard_status.py} +7 -7
- eval_studio_client/api/test/test_v1_leaderboard_type.py +33 -0
- eval_studio_client/api/test/test_v1_leaderboard_view.py +33 -0
- eval_studio_client/api/test/{test_v1alpha_list_base_models_response.py → test_v1_list_base_models_response.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_batch_delete_dashboards_response.py → test_v1_list_dashboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_documents_response.py → test_v1_list_documents_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_evaluators_response.py → test_v1_list_evaluators_response.py} +15 -14
- eval_studio_client/api/test/{test_v1alpha_list_leaderboards_response.py → test_v1_list_leaderboards_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_list_llm_models_response.py → test_v1_list_llm_models_response.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_list_rag_collections_response.py → test_v1_list_model_collections_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_models_response.py → test_v1_list_models_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_most_recent_dashboards_response.py → test_v1_list_most_recent_dashboards_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_most_recent_leaderboards_response.py → test_v1_list_most_recent_leaderboards_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_list_most_recent_models_response.py → test_v1_list_most_recent_models_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_delete_tests_response.py → test_v1_list_most_recent_tests_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_batch_get_operations_response.py → test_v1_list_operations_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_perturbators_response.py → test_v1_list_perturbators_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_model_collections_response.py → test_v1_list_rag_collections_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_test_cases_response.py → test_v1_list_test_cases_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_list_test_classes_response.py → test_v1_list_test_classes_response.py} +13 -13
- eval_studio_client/api/test/test_v1_list_tests_response.py +69 -0
- eval_studio_client/api/test/{test_v1alpha_model.py → test_v1_model.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_leaderboard_view.py → test_v1_model_type.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_operation.py → test_v1_operation.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_operation_progress.py → test_v1_operation_progress.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_update_test_response.py → test_v1_perturb_test_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_perturbator.py → test_v1_perturbator.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_perturbator_configuration.py → test_v1_perturbator_configuration.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_leaderboard_type.py → test_v1_perturbator_intensity.py} +7 -7
- eval_studio_client/api/test/{test_v1alpha_problem_and_action.py → test_v1_problem_and_action.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_test.py → test_v1_test.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_test_case.py → test_v1_test_case.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_test_case_relationship.py → test_v1_test_case_relationship.py} +12 -12
- eval_studio_client/api/test/test_v1_test_cases_generator.py +33 -0
- eval_studio_client/api/test/{test_v1alpha_test_class.py → test_v1_test_class.py} +12 -12
- eval_studio_client/api/test/test_v1_test_class_type.py +33 -0
- eval_studio_client/api/test/{test_v1alpha_test_lab.py → test_v1_test_lab.py} +12 -12
- eval_studio_client/api/test/{test_v1alpha_delete_dashboard_response.py → test_v1_update_dashboard_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_create_document_response.py → test_v1_update_document_response.py} +13 -13
- eval_studio_client/api/test/{test_v1alpha_update_leaderboard_response.py → test_v1_update_leaderboard_response.py} +15 -15
- eval_studio_client/api/test/{test_v1alpha_update_model_response.py → test_v1_update_model_response.py} +13 -13
- eval_studio_client/api/test/test_v1_update_operation_response.py +71 -0
- eval_studio_client/api/test/{test_v1alpha_update_test_case_response.py → test_v1_update_test_case_response.py} +13 -13
- eval_studio_client/api/test/test_v1_update_test_response.py +67 -0
- eval_studio_client/api/test/{test_v1alpha_who_am_i_response.py → test_v1_who_am_i_response.py} +12 -12
- eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
- eval_studio_client/dashboards.py +23 -1
- eval_studio_client/documents.py +3 -3
- eval_studio_client/evaluators.py +1 -1
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +568 -387
- eval_studio_client/insights.py +1 -1
- eval_studio_client/leaderboards.py +14 -13
- eval_studio_client/models.py +117 -29
- eval_studio_client/perturbators.py +5 -7
- eval_studio_client/problems.py +1 -1
- eval_studio_client/test_labs.py +2 -2
- eval_studio_client/tests.py +239 -8
- {eval_studio_client-0.8.0a2.dist-info → eval_studio_client-1.0.0a1.dist-info}/METADATA +2 -2
- eval_studio_client-1.0.0a1.dist-info/RECORD +485 -0
- {eval_studio_client-0.8.0a2.dist-info → eval_studio_client-1.0.0a1.dist-info}/WHEEL +1 -1
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsRequest.md +0 -31
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsRequest.md +0 -30
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteTestCasesResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchDeleteTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetDashboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetDocumentsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetLeaderboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetOperationsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchGetTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchImportLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaBatchImportTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCheckBaseModelsResponse.md +0 -30
- eval_studio_client/api/docs/V1alphaCreateDashboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateDocumentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateEvaluatorResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateLeaderboardRequest.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateLeaderboardWithoutCacheResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateModelResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreatePerturbationResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateTestCaseResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateTestLabResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaCreateTestResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteDashboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteDocumentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteEvaluatorResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteModelResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteTestCaseResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaDeleteTestResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaEvaluationTest.md +0 -32
- eval_studio_client/api/docs/V1alphaFinalizeOperationResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaFindAllTestCasesByIDResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaFindTestLabResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetDashboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetDocumentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetEvaluatorResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetInfoResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetModelResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetOperationProgressByParentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetOperationResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetPerturbatorResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetTestCaseResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetTestClassResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaGetTestResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaImportLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListBaseModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListDashboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListDocumentsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListEvaluatorsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListLLMModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListLeaderboardsResponse.md +0 -30
- eval_studio_client/api/docs/V1alphaListModelCollectionsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListMostRecentDashboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListMostRecentLeaderboardsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListMostRecentModelsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListMostRecentTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListOperationsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListPerturbatorsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListRAGCollectionsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListTestCasesResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListTestClassesResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaListTestsResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaPerturbTestResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaPerturbatorConfiguration.md +0 -32
- eval_studio_client/api/docs/V1alphaUpdateDashboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateDocumentResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateLeaderboardResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateModelResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateOperationResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateTestCaseResponse.md +0 -29
- eval_studio_client/api/docs/V1alphaUpdateTestResponse.md +0 -29
- eval_studio_client/api/models/v1alpha_batch_create_leaderboards_response.py +0 -91
- eval_studio_client/api/models/v1alpha_batch_import_leaderboard_response.py +0 -91
- eval_studio_client/api/models/v1alpha_create_leaderboard_without_cache_response.py +0 -91
- eval_studio_client/api/models/v1alpha_list_most_recent_tests_response.py +0 -95
- eval_studio_client/api/models/v1alpha_perturb_test_response.py +0 -91
- eval_studio_client/api/test/test_v1alpha_batch_create_leaderboards_response.py +0 -71
- eval_studio_client/api/test/test_v1alpha_batch_delete_dashboards_request.py +0 -53
- eval_studio_client/api/test/test_v1alpha_batch_delete_evaluators_request.py +0 -53
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_without_cache_response.py +0 -71
- eval_studio_client/api/test/test_v1alpha_evaluator_param_type.py +0 -33
- eval_studio_client/api/test/test_v1alpha_leaderboard_status.py +0 -33
- eval_studio_client/api/test/test_v1alpha_list_most_recent_tests_response.py +0 -69
- eval_studio_client/api/test/test_v1alpha_perturb_test_response.py +0 -67
- eval_studio_client/api/test/test_v1alpha_perturbator_intensity.py +0 -33
- eval_studio_client-0.8.0a2.dist-info/RECORD +0 -470
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchDeleteEvaluatorsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**evaluators** | [**List[V1Evaluator]**](V1Evaluator.md) | The deleted Evaluators. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_delete_evaluators_response import V1BatchDeleteEvaluatorsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchDeleteEvaluatorsResponse from a JSON string
|
|
18
|
+
v1_batch_delete_evaluators_response_instance = V1BatchDeleteEvaluatorsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchDeleteEvaluatorsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_delete_evaluators_response_dict = v1_batch_delete_evaluators_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchDeleteEvaluatorsResponse from a dict
|
|
25
|
+
v1_batch_delete_evaluators_response_from_dict = V1BatchDeleteEvaluatorsResponse.from_dict(v1_batch_delete_evaluators_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# V1BatchDeleteLeaderboardsRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**names** | **List[str]** | Required. The names of the Leaderboards to delete. A maximum of 1000 can be specified. | [optional]
|
|
9
|
+
**view** | [**V1LeaderboardView**](V1LeaderboardView.md) | | [optional]
|
|
10
|
+
|
|
11
|
+
## Example
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from eval_studio_client.api.models.v1_batch_delete_leaderboards_request import V1BatchDeleteLeaderboardsRequest
|
|
15
|
+
|
|
16
|
+
# TODO update the JSON string below
|
|
17
|
+
json = "{}"
|
|
18
|
+
# create an instance of V1BatchDeleteLeaderboardsRequest from a JSON string
|
|
19
|
+
v1_batch_delete_leaderboards_request_instance = V1BatchDeleteLeaderboardsRequest.from_json(json)
|
|
20
|
+
# print the JSON string representation of the object
|
|
21
|
+
print(V1BatchDeleteLeaderboardsRequest.to_json())
|
|
22
|
+
|
|
23
|
+
# convert the object into a dict
|
|
24
|
+
v1_batch_delete_leaderboards_request_dict = v1_batch_delete_leaderboards_request_instance.to_dict()
|
|
25
|
+
# create an instance of V1BatchDeleteLeaderboardsRequest from a dict
|
|
26
|
+
v1_batch_delete_leaderboards_request_from_dict = V1BatchDeleteLeaderboardsRequest.from_dict(v1_batch_delete_leaderboards_request_dict)
|
|
27
|
+
```
|
|
28
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
29
|
+
|
|
30
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchDeleteLeaderboardsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**leaderboards** | [**List[V1Leaderboard]**](V1Leaderboard.md) | The deleted Leaderboards. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_delete_leaderboards_response import V1BatchDeleteLeaderboardsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchDeleteLeaderboardsResponse from a JSON string
|
|
18
|
+
v1_batch_delete_leaderboards_response_instance = V1BatchDeleteLeaderboardsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchDeleteLeaderboardsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_delete_leaderboards_response_dict = v1_batch_delete_leaderboards_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchDeleteLeaderboardsResponse from a dict
|
|
25
|
+
v1_batch_delete_leaderboards_response_from_dict = V1BatchDeleteLeaderboardsResponse.from_dict(v1_batch_delete_leaderboards_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchDeleteModelsRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**names** | **List[str]** | The names of the Models to delete. A maximum of 1000 can be specified. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_delete_models_request import V1BatchDeleteModelsRequest
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchDeleteModelsRequest from a JSON string
|
|
18
|
+
v1_batch_delete_models_request_instance = V1BatchDeleteModelsRequest.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchDeleteModelsRequest.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_delete_models_request_dict = v1_batch_delete_models_request_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchDeleteModelsRequest from a dict
|
|
25
|
+
v1_batch_delete_models_request_from_dict = V1BatchDeleteModelsRequest.from_dict(v1_batch_delete_models_request_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchDeleteModelsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**models** | [**List[V1Model]**](V1Model.md) | The Models that were deleted. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_delete_models_response import V1BatchDeleteModelsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchDeleteModelsResponse from a JSON string
|
|
18
|
+
v1_batch_delete_models_response_instance = V1BatchDeleteModelsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchDeleteModelsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_delete_models_response_dict = v1_batch_delete_models_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchDeleteModelsResponse from a dict
|
|
25
|
+
v1_batch_delete_models_response_from_dict = V1BatchDeleteModelsResponse.from_dict(v1_batch_delete_models_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchDeleteTestCasesResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**test_cases** | [**List[V1TestCase]**](V1TestCase.md) | The list of deleted TestCases. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_delete_test_cases_response import V1BatchDeleteTestCasesResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchDeleteTestCasesResponse from a JSON string
|
|
18
|
+
v1_batch_delete_test_cases_response_instance = V1BatchDeleteTestCasesResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchDeleteTestCasesResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_delete_test_cases_response_dict = v1_batch_delete_test_cases_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchDeleteTestCasesResponse from a dict
|
|
25
|
+
v1_batch_delete_test_cases_response_from_dict = V1BatchDeleteTestCasesResponse.from_dict(v1_batch_delete_test_cases_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
eval_studio_client/api/docs/{V1alphaBatchDeleteTestsRequest.md → V1BatchDeleteTestsRequest.md}
RENAMED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
#
|
|
1
|
+
# V1BatchDeleteTestsRequest
|
|
2
2
|
|
|
3
3
|
|
|
4
4
|
## Properties
|
|
@@ -11,19 +11,19 @@ Name | Type | Description | Notes
|
|
|
11
11
|
## Example
|
|
12
12
|
|
|
13
13
|
```python
|
|
14
|
-
from eval_studio_client.api.models.
|
|
14
|
+
from eval_studio_client.api.models.v1_batch_delete_tests_request import V1BatchDeleteTestsRequest
|
|
15
15
|
|
|
16
16
|
# TODO update the JSON string below
|
|
17
17
|
json = "{}"
|
|
18
|
-
# create an instance of
|
|
19
|
-
|
|
18
|
+
# create an instance of V1BatchDeleteTestsRequest from a JSON string
|
|
19
|
+
v1_batch_delete_tests_request_instance = V1BatchDeleteTestsRequest.from_json(json)
|
|
20
20
|
# print the JSON string representation of the object
|
|
21
|
-
print(
|
|
21
|
+
print(V1BatchDeleteTestsRequest.to_json())
|
|
22
22
|
|
|
23
23
|
# convert the object into a dict
|
|
24
|
-
|
|
25
|
-
# create an instance of
|
|
26
|
-
|
|
24
|
+
v1_batch_delete_tests_request_dict = v1_batch_delete_tests_request_instance.to_dict()
|
|
25
|
+
# create an instance of V1BatchDeleteTestsRequest from a dict
|
|
26
|
+
v1_batch_delete_tests_request_from_dict = V1BatchDeleteTestsRequest.from_dict(v1_batch_delete_tests_request_dict)
|
|
27
27
|
```
|
|
28
28
|
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
29
29
|
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchDeleteTestsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**tests** | [**List[V1Test]**](V1Test.md) | The deleted Tests. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_delete_tests_response import V1BatchDeleteTestsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchDeleteTestsResponse from a JSON string
|
|
18
|
+
v1_batch_delete_tests_response_instance = V1BatchDeleteTestsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchDeleteTestsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_delete_tests_response_dict = v1_batch_delete_tests_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchDeleteTestsResponse from a dict
|
|
25
|
+
v1_batch_delete_tests_response_from_dict = V1BatchDeleteTestsResponse.from_dict(v1_batch_delete_tests_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchGetDashboardsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**dashboards** | [**List[V1Dashboard]**](V1Dashboard.md) | The requested Dashboards. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_get_dashboards_response import V1BatchGetDashboardsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchGetDashboardsResponse from a JSON string
|
|
18
|
+
v1_batch_get_dashboards_response_instance = V1BatchGetDashboardsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchGetDashboardsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_get_dashboards_response_dict = v1_batch_get_dashboards_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchGetDashboardsResponse from a dict
|
|
25
|
+
v1_batch_get_dashboards_response_from_dict = V1BatchGetDashboardsResponse.from_dict(v1_batch_get_dashboards_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchGetDocumentsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**documents** | [**List[V1Document]**](V1Document.md) | The Documents that were requested. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_get_documents_response import V1BatchGetDocumentsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchGetDocumentsResponse from a JSON string
|
|
18
|
+
v1_batch_get_documents_response_instance = V1BatchGetDocumentsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchGetDocumentsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_get_documents_response_dict = v1_batch_get_documents_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchGetDocumentsResponse from a dict
|
|
25
|
+
v1_batch_get_documents_response_from_dict = V1BatchGetDocumentsResponse.from_dict(v1_batch_get_documents_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchGetLeaderboardsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**leaderboards** | [**List[V1Leaderboard]**](V1Leaderboard.md) | The requested Leaderboards. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_get_leaderboards_response import V1BatchGetLeaderboardsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchGetLeaderboardsResponse from a JSON string
|
|
18
|
+
v1_batch_get_leaderboards_response_instance = V1BatchGetLeaderboardsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchGetLeaderboardsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_get_leaderboards_response_dict = v1_batch_get_leaderboards_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchGetLeaderboardsResponse from a dict
|
|
25
|
+
v1_batch_get_leaderboards_response_from_dict = V1BatchGetLeaderboardsResponse.from_dict(v1_batch_get_leaderboards_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchGetModelsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**models** | [**List[V1Model]**](V1Model.md) | The Models that were requested. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_get_models_response import V1BatchGetModelsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchGetModelsResponse from a JSON string
|
|
18
|
+
v1_batch_get_models_response_instance = V1BatchGetModelsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchGetModelsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_get_models_response_dict = v1_batch_get_models_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchGetModelsResponse from a dict
|
|
25
|
+
v1_batch_get_models_response_from_dict = V1BatchGetModelsResponse.from_dict(v1_batch_get_models_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchGetOperationsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**operations** | [**List[V1Operation]**](V1Operation.md) | The Operations that were requested. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_get_operations_response import V1BatchGetOperationsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchGetOperationsResponse from a JSON string
|
|
18
|
+
v1_batch_get_operations_response_instance = V1BatchGetOperationsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchGetOperationsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_get_operations_response_dict = v1_batch_get_operations_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchGetOperationsResponse from a dict
|
|
25
|
+
v1_batch_get_operations_response_from_dict = V1BatchGetOperationsResponse.from_dict(v1_batch_get_operations_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchGetTestsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**tests** | [**List[V1Test]**](V1Test.md) | The Tests that were requested. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_get_tests_response import V1BatchGetTestsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchGetTestsResponse from a JSON string
|
|
18
|
+
v1_batch_get_tests_response_instance = V1BatchGetTestsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchGetTestsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_get_tests_response_dict = v1_batch_get_tests_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchGetTestsResponse from a dict
|
|
25
|
+
v1_batch_get_tests_response_from_dict = V1BatchGetTestsResponse.from_dict(v1_batch_get_tests_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
#
|
|
1
|
+
# V1BatchImportLeaderboardRequest
|
|
2
2
|
|
|
3
3
|
|
|
4
4
|
## Properties
|
|
@@ -11,26 +11,26 @@ Name | Type | Description | Notes
|
|
|
11
11
|
**model** | **str** | Required. Resource name of the Model used in this Leaderboard. | [optional]
|
|
12
12
|
**test_display_name** | **str** | Required. Display name of the newly created Test. | [optional]
|
|
13
13
|
**test_description** | **str** | Optional. Description of the newly created Test. | [optional]
|
|
14
|
-
**leaderboard_type** | [**
|
|
14
|
+
**leaderboard_type** | [**V1LeaderboardType**](V1LeaderboardType.md) | | [optional]
|
|
15
15
|
**dashboard_display_name** | **str** | Optional. Display name for the dashboard that will group the leaderboards. | [optional]
|
|
16
16
|
**dashboard_description** | **str** | Optional. Description for the dashboard that will group the leaderboards. | [optional]
|
|
17
17
|
|
|
18
18
|
## Example
|
|
19
19
|
|
|
20
20
|
```python
|
|
21
|
-
from eval_studio_client.api.models.
|
|
21
|
+
from eval_studio_client.api.models.v1_batch_import_leaderboard_request import V1BatchImportLeaderboardRequest
|
|
22
22
|
|
|
23
23
|
# TODO update the JSON string below
|
|
24
24
|
json = "{}"
|
|
25
|
-
# create an instance of
|
|
26
|
-
|
|
25
|
+
# create an instance of V1BatchImportLeaderboardRequest from a JSON string
|
|
26
|
+
v1_batch_import_leaderboard_request_instance = V1BatchImportLeaderboardRequest.from_json(json)
|
|
27
27
|
# print the JSON string representation of the object
|
|
28
|
-
print(
|
|
28
|
+
print(V1BatchImportLeaderboardRequest.to_json())
|
|
29
29
|
|
|
30
30
|
# convert the object into a dict
|
|
31
|
-
|
|
32
|
-
# create an instance of
|
|
33
|
-
|
|
31
|
+
v1_batch_import_leaderboard_request_dict = v1_batch_import_leaderboard_request_instance.to_dict()
|
|
32
|
+
# create an instance of V1BatchImportLeaderboardRequest from a dict
|
|
33
|
+
v1_batch_import_leaderboard_request_from_dict = V1BatchImportLeaderboardRequest.from_dict(v1_batch_import_leaderboard_request_dict)
|
|
34
34
|
```
|
|
35
35
|
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
36
36
|
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchImportLeaderboardResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**operation** | [**V1Operation**](V1Operation.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_import_leaderboard_response import V1BatchImportLeaderboardResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchImportLeaderboardResponse from a JSON string
|
|
18
|
+
v1_batch_import_leaderboard_response_instance = V1BatchImportLeaderboardResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchImportLeaderboardResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_import_leaderboard_response_dict = v1_batch_import_leaderboard_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchImportLeaderboardResponse from a dict
|
|
25
|
+
v1_batch_import_leaderboard_response_from_dict = V1BatchImportLeaderboardResponse.from_dict(v1_batch_import_leaderboard_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
eval_studio_client/api/docs/{V1alphaBatchImportTestsRequest.md → V1BatchImportTestsRequest.md}
RENAMED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
#
|
|
1
|
+
# V1BatchImportTestsRequest
|
|
2
2
|
|
|
3
3
|
|
|
4
4
|
## Properties
|
|
@@ -13,19 +13,19 @@ Name | Type | Description | Notes
|
|
|
13
13
|
## Example
|
|
14
14
|
|
|
15
15
|
```python
|
|
16
|
-
from eval_studio_client.api.models.
|
|
16
|
+
from eval_studio_client.api.models.v1_batch_import_tests_request import V1BatchImportTestsRequest
|
|
17
17
|
|
|
18
18
|
# TODO update the JSON string below
|
|
19
19
|
json = "{}"
|
|
20
|
-
# create an instance of
|
|
21
|
-
|
|
20
|
+
# create an instance of V1BatchImportTestsRequest from a JSON string
|
|
21
|
+
v1_batch_import_tests_request_instance = V1BatchImportTestsRequest.from_json(json)
|
|
22
22
|
# print the JSON string representation of the object
|
|
23
|
-
print(
|
|
23
|
+
print(V1BatchImportTestsRequest.to_json())
|
|
24
24
|
|
|
25
25
|
# convert the object into a dict
|
|
26
|
-
|
|
27
|
-
# create an instance of
|
|
28
|
-
|
|
26
|
+
v1_batch_import_tests_request_dict = v1_batch_import_tests_request_instance.to_dict()
|
|
27
|
+
# create an instance of V1BatchImportTestsRequest from a dict
|
|
28
|
+
v1_batch_import_tests_request_from_dict = V1BatchImportTestsRequest.from_dict(v1_batch_import_tests_request_dict)
|
|
29
29
|
```
|
|
30
30
|
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
31
31
|
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchImportTestsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**tests** | [**List[V1Test]**](V1Test.md) | The imported Tests. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_import_tests_response import V1BatchImportTestsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchImportTestsResponse from a JSON string
|
|
18
|
+
v1_batch_import_tests_response_instance = V1BatchImportTestsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchImportTestsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_import_tests_response_dict = v1_batch_import_tests_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchImportTestsResponse from a dict
|
|
25
|
+
v1_batch_import_tests_response_from_dict = V1BatchImportTestsResponse.from_dict(v1_batch_import_tests_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# V1CheckBaseModelsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**model_availability** | **bool** | The model availability check. | [optional]
|
|
9
|
+
**reason** | **str** | Optional. Information on why the model isn't available. | [optional]
|
|
10
|
+
|
|
11
|
+
## Example
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from eval_studio_client.api.models.v1_check_base_models_response import V1CheckBaseModelsResponse
|
|
15
|
+
|
|
16
|
+
# TODO update the JSON string below
|
|
17
|
+
json = "{}"
|
|
18
|
+
# create an instance of V1CheckBaseModelsResponse from a JSON string
|
|
19
|
+
v1_check_base_models_response_instance = V1CheckBaseModelsResponse.from_json(json)
|
|
20
|
+
# print the JSON string representation of the object
|
|
21
|
+
print(V1CheckBaseModelsResponse.to_json())
|
|
22
|
+
|
|
23
|
+
# convert the object into a dict
|
|
24
|
+
v1_check_base_models_response_dict = v1_check_base_models_response_instance.to_dict()
|
|
25
|
+
# create an instance of V1CheckBaseModelsResponse from a dict
|
|
26
|
+
v1_check_base_models_response_from_dict = V1CheckBaseModelsResponse.from_dict(v1_check_base_models_response_dict)
|
|
27
|
+
```
|
|
28
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
29
|
+
|
|
30
|
+
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
#
|
|
1
|
+
# V1CollectionInfo
|
|
2
2
|
|
|
3
3
|
CollectionInfo represents the information about a collection in the H2OGPTE.
|
|
4
4
|
|
|
@@ -14,19 +14,19 @@ Name | Type | Description | Notes
|
|
|
14
14
|
## Example
|
|
15
15
|
|
|
16
16
|
```python
|
|
17
|
-
from eval_studio_client.api.models.
|
|
17
|
+
from eval_studio_client.api.models.v1_collection_info import V1CollectionInfo
|
|
18
18
|
|
|
19
19
|
# TODO update the JSON string below
|
|
20
20
|
json = "{}"
|
|
21
|
-
# create an instance of
|
|
22
|
-
|
|
21
|
+
# create an instance of V1CollectionInfo from a JSON string
|
|
22
|
+
v1_collection_info_instance = V1CollectionInfo.from_json(json)
|
|
23
23
|
# print the JSON string representation of the object
|
|
24
|
-
print(
|
|
24
|
+
print(V1CollectionInfo.to_json())
|
|
25
25
|
|
|
26
26
|
# convert the object into a dict
|
|
27
|
-
|
|
28
|
-
# create an instance of
|
|
29
|
-
|
|
27
|
+
v1_collection_info_dict = v1_collection_info_instance.to_dict()
|
|
28
|
+
# create an instance of V1CollectionInfo from a dict
|
|
29
|
+
v1_collection_info_from_dict = V1CollectionInfo.from_dict(v1_collection_info_dict)
|
|
30
30
|
```
|
|
31
31
|
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
32
32
|
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1CreateDashboardResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**dashboard** | [**V1Dashboard**](V1Dashboard.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_create_dashboard_response import V1CreateDashboardResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1CreateDashboardResponse from a JSON string
|
|
18
|
+
v1_create_dashboard_response_instance = V1CreateDashboardResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1CreateDashboardResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_create_dashboard_response_dict = v1_create_dashboard_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1CreateDashboardResponse from a dict
|
|
25
|
+
v1_create_dashboard_response_from_dict = V1CreateDashboardResponse.from_dict(v1_create_dashboard_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|