eval-studio-client 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/__about__.py +1 -0
- eval_studio_client/__init__.py +4 -0
- eval_studio_client/api/__init__.py +180 -0
- eval_studio_client/api/api/__init__.py +20 -0
- eval_studio_client/api/api/dashboard_service_api.py +2142 -0
- eval_studio_client/api/api/document_service_api.py +1868 -0
- eval_studio_client/api/api/evaluation_service_api.py +1603 -0
- eval_studio_client/api/api/evaluator_service_api.py +1343 -0
- eval_studio_client/api/api/info_service_api.py +275 -0
- eval_studio_client/api/api/leaderboard_service_api.py +3336 -0
- eval_studio_client/api/api/model_service_api.py +2913 -0
- eval_studio_client/api/api/operation_progress_service_api.py +292 -0
- eval_studio_client/api/api/operation_service_api.py +1359 -0
- eval_studio_client/api/api/perturbation_service_api.py +321 -0
- eval_studio_client/api/api/perturbator_service_api.py +532 -0
- eval_studio_client/api/api/test_case_service_api.py +1913 -0
- eval_studio_client/api/api/test_class_service_api.py +532 -0
- eval_studio_client/api/api/test_lab_service_api.py +634 -0
- eval_studio_client/api/api/test_service_api.py +2712 -0
- eval_studio_client/api/api/who_am_i_service_api.py +275 -0
- eval_studio_client/api/api_client.py +770 -0
- eval_studio_client/api/api_response.py +21 -0
- eval_studio_client/api/configuration.py +436 -0
- eval_studio_client/api/docs/DashboardServiceApi.md +549 -0
- eval_studio_client/api/docs/DocumentServiceApi.md +478 -0
- eval_studio_client/api/docs/EvaluationServiceApi.md +332 -0
- eval_studio_client/api/docs/EvaluatorServiceApi.md +345 -0
- eval_studio_client/api/docs/InfoServiceApi.md +71 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +835 -0
- eval_studio_client/api/docs/ModelServiceApi.md +750 -0
- eval_studio_client/api/docs/OperationProgressServiceApi.md +75 -0
- eval_studio_client/api/docs/OperationServiceApi.md +345 -0
- eval_studio_client/api/docs/PerturbationServiceApi.md +78 -0
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +31 -0
- eval_studio_client/api/docs/PerturbatorServiceApi.md +138 -0
- eval_studio_client/api/docs/ProtobufAny.md +30 -0
- eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +41 -0
- eval_studio_client/api/docs/RequiredTheDocumentToUpdate.md +38 -0
- eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +54 -0
- eval_studio_client/api/docs/RequiredTheModelToUpdate.md +41 -0
- eval_studio_client/api/docs/RequiredTheOperationToFinalize.md +39 -0
- eval_studio_client/api/docs/RequiredTheOperationToUpdate.md +39 -0
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +39 -0
- eval_studio_client/api/docs/RequiredTheTestToUpdate.md +39 -0
- eval_studio_client/api/docs/RpcStatus.md +32 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +486 -0
- eval_studio_client/api/docs/TestCaseServiceBatchDeleteTestCasesRequest.md +29 -0
- eval_studio_client/api/docs/TestClassServiceApi.md +138 -0
- eval_studio_client/api/docs/TestLabServiceApi.md +151 -0
- eval_studio_client/api/docs/TestServiceApi.md +689 -0
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +31 -0
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsRequest.md +31 -0
- eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsRequest.md +30 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteTestsRequest.md +30 -0
- eval_studio_client/api/docs/V1alphaBatchDeleteTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchGetTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchImportLeaderboardRequest.md +37 -0
- eval_studio_client/api/docs/V1alphaBatchImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaBatchImportTestsRequest.md +32 -0
- eval_studio_client/api/docs/V1alphaBatchImportTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCheckBaseModelsResponse.md +30 -0
- eval_studio_client/api/docs/V1alphaCollectionInfo.md +33 -0
- eval_studio_client/api/docs/V1alphaCreateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateEvaluationRequest.md +37 -0
- eval_studio_client/api/docs/V1alphaCreateEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateLeaderboardRequest.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateLeaderboardWithoutCacheResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreatePerturbationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaCreateTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDashboard.md +41 -0
- eval_studio_client/api/docs/V1alphaDashboardStatus.md +12 -0
- eval_studio_client/api/docs/V1alphaDeleteDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDeleteTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaDocument.md +38 -0
- eval_studio_client/api/docs/V1alphaEvaluationTest.md +32 -0
- eval_studio_client/api/docs/V1alphaEvaluator.md +45 -0
- eval_studio_client/api/docs/V1alphaEvaluatorParamType.md +12 -0
- eval_studio_client/api/docs/V1alphaEvaluatorParameter.md +40 -0
- eval_studio_client/api/docs/V1alphaEvaluatorView.md +12 -0
- eval_studio_client/api/docs/V1alphaFinalizeOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaFindAllTestCasesByIDResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaFindTestLabResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetEvaluatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetInfoResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetOperationProgressByParentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetPerturbatorResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetTestClassResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaGetTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaImportEvaluationRequest.md +33 -0
- eval_studio_client/api/docs/V1alphaImportLeaderboardRequest.md +37 -0
- eval_studio_client/api/docs/V1alphaImportLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaInfo.md +35 -0
- eval_studio_client/api/docs/V1alphaInsight.md +40 -0
- eval_studio_client/api/docs/V1alphaLeaderboard.md +54 -0
- eval_studio_client/api/docs/V1alphaLeaderboardStatus.md +12 -0
- eval_studio_client/api/docs/V1alphaLeaderboardType.md +12 -0
- eval_studio_client/api/docs/V1alphaLeaderboardView.md +12 -0
- eval_studio_client/api/docs/V1alphaListBaseModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListDocumentsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListEvaluatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListLLMModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListLeaderboardsResponse.md +30 -0
- eval_studio_client/api/docs/V1alphaListModelCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentDashboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentLeaderboardsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentModelsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListMostRecentTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListOperationsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListPerturbatorsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListRAGCollectionsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListTestCasesResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListTestClassesResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaListTestsResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaModel.md +42 -0
- eval_studio_client/api/docs/V1alphaModelType.md +12 -0
- eval_studio_client/api/docs/V1alphaOperation.md +40 -0
- eval_studio_client/api/docs/V1alphaOperationProgress.md +32 -0
- eval_studio_client/api/docs/V1alphaPerturbTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaPerturbator.md +39 -0
- eval_studio_client/api/docs/V1alphaPerturbatorConfiguration.md +32 -0
- eval_studio_client/api/docs/V1alphaPerturbatorIntensity.md +11 -0
- eval_studio_client/api/docs/V1alphaProblemAndAction.md +39 -0
- eval_studio_client/api/docs/V1alphaTest.md +40 -0
- eval_studio_client/api/docs/V1alphaTestCase.md +40 -0
- eval_studio_client/api/docs/V1alphaTestCaseRelationship.md +31 -0
- eval_studio_client/api/docs/V1alphaTestClass.md +41 -0
- eval_studio_client/api/docs/V1alphaTestClassType.md +12 -0
- eval_studio_client/api/docs/V1alphaTestLab.md +41 -0
- eval_studio_client/api/docs/V1alphaUpdateDashboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateDocumentResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateLeaderboardResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateModelResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateTestCaseResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaUpdateTestResponse.md +29 -0
- eval_studio_client/api/docs/V1alphaWhoAmIResponse.md +31 -0
- eval_studio_client/api/docs/WhoAmIServiceApi.md +72 -0
- eval_studio_client/api/exceptions.py +199 -0
- eval_studio_client/api/models/__init__.py +148 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +115 -0
- eval_studio_client/api/models/protobuf_any.py +100 -0
- eval_studio_client/api/models/required_the_dashboard_to_update.py +127 -0
- eval_studio_client/api/models/required_the_document_to_update.py +116 -0
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +178 -0
- eval_studio_client/api/models/required_the_model_to_update.py +127 -0
- eval_studio_client/api/models/required_the_operation_to_finalize.py +129 -0
- eval_studio_client/api/models/required_the_operation_to_update.py +129 -0
- eval_studio_client/api/models/required_the_test_case_to_update.py +120 -0
- eval_studio_client/api/models/required_the_test_to_update.py +122 -0
- eval_studio_client/api/models/rpc_status.py +99 -0
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +87 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +99 -0
- eval_studio_client/api/models/v1alpha_batch_create_leaderboards_request.py +99 -0
- eval_studio_client/api/models/v1alpha_batch_create_leaderboards_response.py +91 -0
- eval_studio_client/api/models/v1alpha_batch_delete_dashboards_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_documents_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_documents_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_evaluators_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_evaluators_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_leaderboards_request.py +90 -0
- eval_studio_client/api/models/v1alpha_batch_delete_leaderboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_models_request.py +87 -0
- eval_studio_client/api/models/v1alpha_batch_delete_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_test_cases_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_delete_tests_request.py +89 -0
- eval_studio_client/api/models/v1alpha_batch_delete_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_documents_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_leaderboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_operations_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_get_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_batch_import_leaderboard_request.py +104 -0
- eval_studio_client/api/models/v1alpha_batch_import_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_batch_import_tests_request.py +93 -0
- eval_studio_client/api/models/v1alpha_batch_import_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_check_base_models_response.py +89 -0
- eval_studio_client/api/models/v1alpha_collection_info.py +93 -0
- eval_studio_client/api/models/v1alpha_create_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_evaluation_request.py +115 -0
- eval_studio_client/api/models/v1alpha_create_evaluator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_leaderboard_request.py +91 -0
- eval_studio_client/api/models/v1alpha_create_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_leaderboard_without_cache_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_perturbation_response.py +87 -0
- eval_studio_client/api/models/v1alpha_create_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_test_lab_response.py +91 -0
- eval_studio_client/api/models/v1alpha_create_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_dashboard.py +131 -0
- eval_studio_client/api/models/v1alpha_dashboard_status.py +39 -0
- eval_studio_client/api/models/v1alpha_delete_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_evaluator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_delete_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_document.py +120 -0
- eval_studio_client/api/models/v1alpha_evaluation_test.py +107 -0
- eval_studio_client/api/models/v1alpha_evaluator.py +155 -0
- eval_studio_client/api/models/v1alpha_evaluator_param_type.py +42 -0
- eval_studio_client/api/models/v1alpha_evaluator_parameter.py +126 -0
- eval_studio_client/api/models/v1alpha_evaluator_view.py +38 -0
- eval_studio_client/api/models/v1alpha_finalize_operation_response.py +91 -0
- eval_studio_client/api/models/v1alpha_find_all_test_cases_by_id_response.py +95 -0
- eval_studio_client/api/models/v1alpha_find_test_lab_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_evaluator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_info_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_operation_progress_by_parent_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_operation_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_perturbator_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_test_class_response.py +91 -0
- eval_studio_client/api/models/v1alpha_get_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_import_evaluation_request.py +99 -0
- eval_studio_client/api/models/v1alpha_import_leaderboard_request.py +104 -0
- eval_studio_client/api/models/v1alpha_import_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_info.py +99 -0
- eval_studio_client/api/models/v1alpha_insight.py +107 -0
- eval_studio_client/api/models/v1alpha_leaderboard.py +182 -0
- eval_studio_client/api/models/v1alpha_leaderboard_status.py +39 -0
- eval_studio_client/api/models/v1alpha_leaderboard_type.py +39 -0
- eval_studio_client/api/models/v1alpha_leaderboard_view.py +39 -0
- eval_studio_client/api/models/v1alpha_list_base_models_response.py +87 -0
- eval_studio_client/api/models/v1alpha_list_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_documents_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_evaluators_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_leaderboards_response.py +97 -0
- eval_studio_client/api/models/v1alpha_list_llm_models_response.py +87 -0
- eval_studio_client/api/models/v1alpha_list_model_collections_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_dashboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_leaderboards_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_models_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_most_recent_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_operations_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_perturbators_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_rag_collections_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_test_cases_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_test_classes_response.py +95 -0
- eval_studio_client/api/models/v1alpha_list_tests_response.py +95 -0
- eval_studio_client/api/models/v1alpha_model.py +131 -0
- eval_studio_client/api/models/v1alpha_model_type.py +46 -0
- eval_studio_client/api/models/v1alpha_operation.py +133 -0
- eval_studio_client/api/models/v1alpha_operation_progress.py +99 -0
- eval_studio_client/api/models/v1alpha_perturb_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_perturbator.py +122 -0
- eval_studio_client/api/models/v1alpha_perturbator_configuration.py +92 -0
- eval_studio_client/api/models/v1alpha_perturbator_intensity.py +39 -0
- eval_studio_client/api/models/v1alpha_problem_and_action.py +129 -0
- eval_studio_client/api/models/v1alpha_test.py +126 -0
- eval_studio_client/api/models/v1alpha_test_case.py +124 -0
- eval_studio_client/api/models/v1alpha_test_case_relationship.py +91 -0
- eval_studio_client/api/models/v1alpha_test_class.py +127 -0
- eval_studio_client/api/models/v1alpha_test_class_type.py +42 -0
- eval_studio_client/api/models/v1alpha_test_lab.py +137 -0
- eval_studio_client/api/models/v1alpha_update_dashboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_document_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_leaderboard_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_model_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_operation_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_test_case_response.py +91 -0
- eval_studio_client/api/models/v1alpha_update_test_response.py +91 -0
- eval_studio_client/api/models/v1alpha_who_am_i_response.py +91 -0
- eval_studio_client/api/rest.py +257 -0
- eval_studio_client/api/test/__init__.py +0 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +79 -0
- eval_studio_client/api/test/test_document_service_api.py +73 -0
- eval_studio_client/api/test/test_evaluation_service_api.py +55 -0
- eval_studio_client/api/test/test_evaluator_service_api.py +61 -0
- eval_studio_client/api/test/test_info_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +103 -0
- eval_studio_client/api/test/test_model_service_api.py +97 -0
- eval_studio_client/api/test/test_operation_progress_service_api.py +37 -0
- eval_studio_client/api/test/test_operation_service_api.py +61 -0
- eval_studio_client/api/test/test_perturbation_service_api.py +37 -0
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +79 -0
- eval_studio_client/api/test/test_perturbator_service_api.py +43 -0
- eval_studio_client/api/test/test_protobuf_any.py +51 -0
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +64 -0
- eval_studio_client/api/test/test_required_the_document_to_update.py +59 -0
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +115 -0
- eval_studio_client/api/test/test_required_the_model_to_update.py +63 -0
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +71 -0
- eval_studio_client/api/test/test_required_the_operation_to_update.py +71 -0
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +63 -0
- eval_studio_client/api/test/test_required_the_test_to_update.py +65 -0
- eval_studio_client/api/test/test_rpc_status.py +57 -0
- eval_studio_client/api/test/test_test_case_service_api.py +73 -0
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +53 -0
- eval_studio_client/api/test/test_test_class_service_api.py +43 -0
- eval_studio_client/api/test/test_test_lab_service_api.py +43 -0
- eval_studio_client/api/test/test_test_service_api.py +91 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +58 -0
- eval_studio_client/api/test/test_v1alpha_batch_create_leaderboards_request.py +119 -0
- eval_studio_client/api/test/test_v1alpha_batch_create_leaderboards_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_dashboards_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_documents_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_documents_response.py +63 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_evaluators_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_evaluators_response.py +91 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_leaderboards_request.py +54 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_leaderboards_response.py +116 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_models_request.py +53 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_test_cases_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_tests_request.py +54 -0
- eval_studio_client/api/test/test_v1alpha_batch_delete_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_documents_response.py +63 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_leaderboards_response.py +116 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_operations_response.py +73 -0
- eval_studio_client/api/test/test_v1alpha_batch_get_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_leaderboard_request.py +61 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_leaderboard_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_tests_request.py +54 -0
- eval_studio_client/api/test/test_v1alpha_batch_import_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_check_base_models_response.py +52 -0
- eval_studio_client/api/test/test_v1alpha_collection_info.py +54 -0
- eval_studio_client/api/test/test_v1alpha_create_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_create_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_create_evaluation_request.py +107 -0
- eval_studio_client/api/test/test_v1alpha_create_evaluator_response.py +89 -0
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_request.py +114 -0
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_create_leaderboard_without_cache_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_create_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_create_perturbation_response.py +51 -0
- eval_studio_client/api/test/test_v1alpha_create_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_create_test_lab_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_create_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_dashboard.py +65 -0
- eval_studio_client/api/test/test_v1alpha_dashboard_status.py +33 -0
- eval_studio_client/api/test/test_v1alpha_delete_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_delete_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_delete_evaluator_response.py +89 -0
- eval_studio_client/api/test/test_v1alpha_delete_leaderboard_response.py +114 -0
- eval_studio_client/api/test/test_v1alpha_delete_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_delete_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_delete_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_document.py +60 -0
- eval_studio_client/api/test/test_v1alpha_evaluation_test.py +76 -0
- eval_studio_client/api/test/test_v1alpha_evaluator.py +91 -0
- eval_studio_client/api/test/test_v1alpha_evaluator_param_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_evaluator_parameter.py +68 -0
- eval_studio_client/api/test/test_v1alpha_evaluator_view.py +33 -0
- eval_studio_client/api/test/test_v1alpha_finalize_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_find_all_test_cases_by_id_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_find_test_lab_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_get_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_get_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_get_evaluator_response.py +89 -0
- eval_studio_client/api/test/test_v1alpha_get_info_response.py +60 -0
- eval_studio_client/api/test/test_v1alpha_get_leaderboard_response.py +114 -0
- eval_studio_client/api/test/test_v1alpha_get_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_get_operation_progress_by_parent_response.py +55 -0
- eval_studio_client/api/test/test_v1alpha_get_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_get_perturbator_response.py +64 -0
- eval_studio_client/api/test/test_v1alpha_get_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_get_test_class_response.py +70 -0
- eval_studio_client/api/test/test_v1alpha_get_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_import_evaluation_request.py +73 -0
- eval_studio_client/api/test/test_v1alpha_import_leaderboard_request.py +59 -0
- eval_studio_client/api/test/test_v1alpha_import_leaderboard_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_info.py +59 -0
- eval_studio_client/api/test/test_v1alpha_insight.py +67 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard.py +116 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard_status.py +33 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_leaderboard_view.py +33 -0
- eval_studio_client/api/test/test_v1alpha_list_base_models_response.py +53 -0
- eval_studio_client/api/test/test_v1alpha_list_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_list_documents_response.py +63 -0
- eval_studio_client/api/test/test_v1alpha_list_evaluators_response.py +91 -0
- eval_studio_client/api/test/test_v1alpha_list_leaderboards_response.py +117 -0
- eval_studio_client/api/test/test_v1alpha_list_llm_models_response.py +53 -0
- eval_studio_client/api/test/test_v1alpha_list_model_collections_response.py +57 -0
- eval_studio_client/api/test/test_v1alpha_list_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_dashboards_response.py +68 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_leaderboards_response.py +116 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_models_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_list_most_recent_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_list_operations_response.py +73 -0
- eval_studio_client/api/test/test_v1alpha_list_perturbators_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_list_rag_collections_response.py +57 -0
- eval_studio_client/api/test/test_v1alpha_list_test_cases_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_list_test_classes_response.py +72 -0
- eval_studio_client/api/test/test_v1alpha_list_tests_response.py +69 -0
- eval_studio_client/api/test/test_v1alpha_model.py +64 -0
- eval_studio_client/api/test/test_v1alpha_model_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_operation.py +72 -0
- eval_studio_client/api/test/test_v1alpha_operation_progress.py +54 -0
- eval_studio_client/api/test/test_v1alpha_perturb_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_perturbator.py +63 -0
- eval_studio_client/api/test/test_v1alpha_perturbator_configuration.py +53 -0
- eval_studio_client/api/test/test_v1alpha_perturbator_intensity.py +33 -0
- eval_studio_client/api/test/test_v1alpha_problem_and_action.py +65 -0
- eval_studio_client/api/test/test_v1alpha_test.py +66 -0
- eval_studio_client/api/test/test_v1alpha_test_case.py +64 -0
- eval_studio_client/api/test/test_v1alpha_test_case_relationship.py +53 -0
- eval_studio_client/api/test/test_v1alpha_test_class.py +69 -0
- eval_studio_client/api/test/test_v1alpha_test_class_type.py +33 -0
- eval_studio_client/api/test/test_v1alpha_test_lab.py +67 -0
- eval_studio_client/api/test/test_v1alpha_update_dashboard_response.py +66 -0
- eval_studio_client/api/test/test_v1alpha_update_document_response.py +61 -0
- eval_studio_client/api/test/test_v1alpha_update_leaderboard_response.py +114 -0
- eval_studio_client/api/test/test_v1alpha_update_model_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_update_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1alpha_update_test_case_response.py +65 -0
- eval_studio_client/api/test/test_v1alpha_update_test_response.py +67 -0
- eval_studio_client/api/test/test_v1alpha_who_am_i_response.py +53 -0
- eval_studio_client/api/test/test_who_am_i_service_api.py +38 -0
- eval_studio_client/client.py +98 -0
- eval_studio_client/dashboards.py +187 -0
- eval_studio_client/documents.py +95 -0
- eval_studio_client/evaluators.py +65 -0
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +6043 -0
- eval_studio_client/insights.py +35 -0
- eval_studio_client/leaderboards.py +207 -0
- eval_studio_client/models.py +522 -0
- eval_studio_client/perturbators.py +101 -0
- eval_studio_client/problems.py +50 -0
- eval_studio_client/test_labs.py +319 -0
- eval_studio_client/tests.py +369 -0
- eval_studio_client-0.7.0.dist-info/METADATA +18 -0
- eval_studio_client-0.7.0.dist-info/RECORD +470 -0
- eval_studio_client-0.7.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
# eval_studio_client.api.EvaluationServiceApi
|
|
2
|
+
|
|
3
|
+
All URIs are relative to *http://localhost*
|
|
4
|
+
|
|
5
|
+
Method | HTTP request | Description
|
|
6
|
+
------------- | ------------- | -------------
|
|
7
|
+
[**evaluation_service_create_evaluation**](EvaluationServiceApi.md#evaluation_service_create_evaluation) | **POST** /v1alpha/evaluations |
|
|
8
|
+
[**evaluation_service_import_evaluation**](EvaluationServiceApi.md#evaluation_service_import_evaluation) | **POST** /v1alpha/evaluations:import |
|
|
9
|
+
[**evaluation_service_list_llm_models**](EvaluationServiceApi.md#evaluation_service_list_llm_models) | **GET** /v1alpha/evaluations:llm_models |
|
|
10
|
+
[**evaluation_service_list_rag_collections**](EvaluationServiceApi.md#evaluation_service_list_rag_collections) | **GET** /v1alpha/evaluations:rag_collections |
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# **evaluation_service_create_evaluation**
|
|
14
|
+
> V1alphaOperation evaluation_service_create_evaluation(body)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
### Example
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
import eval_studio_client.api
|
|
23
|
+
from eval_studio_client.api.models.v1alpha_create_evaluation_request import V1alphaCreateEvaluationRequest
|
|
24
|
+
from eval_studio_client.api.models.v1alpha_operation import V1alphaOperation
|
|
25
|
+
from eval_studio_client.api.rest import ApiException
|
|
26
|
+
from pprint import pprint
|
|
27
|
+
|
|
28
|
+
# Defining the host is optional and defaults to http://localhost
|
|
29
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
30
|
+
configuration = eval_studio_client.api.Configuration(
|
|
31
|
+
host = "http://localhost"
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# Enter a context with an instance of the API client
|
|
36
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
37
|
+
# Create an instance of the API class
|
|
38
|
+
api_instance = eval_studio_client.api.EvaluationServiceApi(api_client)
|
|
39
|
+
body = eval_studio_client.api.V1alphaCreateEvaluationRequest() # V1alphaCreateEvaluationRequest |
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
api_response = api_instance.evaluation_service_create_evaluation(body)
|
|
43
|
+
print("The response of EvaluationServiceApi->evaluation_service_create_evaluation:\n")
|
|
44
|
+
pprint(api_response)
|
|
45
|
+
except Exception as e:
|
|
46
|
+
print("Exception when calling EvaluationServiceApi->evaluation_service_create_evaluation: %s\n" % e)
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
### Parameters
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
Name | Type | Description | Notes
|
|
55
|
+
------------- | ------------- | ------------- | -------------
|
|
56
|
+
**body** | [**V1alphaCreateEvaluationRequest**](V1alphaCreateEvaluationRequest.md)| |
|
|
57
|
+
|
|
58
|
+
### Return type
|
|
59
|
+
|
|
60
|
+
[**V1alphaOperation**](V1alphaOperation.md)
|
|
61
|
+
|
|
62
|
+
### Authorization
|
|
63
|
+
|
|
64
|
+
No authorization required
|
|
65
|
+
|
|
66
|
+
### HTTP request headers
|
|
67
|
+
|
|
68
|
+
- **Content-Type**: application/json
|
|
69
|
+
- **Accept**: application/json
|
|
70
|
+
|
|
71
|
+
### HTTP response details
|
|
72
|
+
|
|
73
|
+
| Status code | Description | Response headers |
|
|
74
|
+
|-------------|-------------|------------------|
|
|
75
|
+
**200** | A successful response. | - |
|
|
76
|
+
**0** | An unexpected error response. | - |
|
|
77
|
+
|
|
78
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
79
|
+
|
|
80
|
+
# **evaluation_service_import_evaluation**
|
|
81
|
+
> V1alphaOperation evaluation_service_import_evaluation(body)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
### Example
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
```python
|
|
89
|
+
import eval_studio_client.api
|
|
90
|
+
from eval_studio_client.api.models.v1alpha_import_evaluation_request import V1alphaImportEvaluationRequest
|
|
91
|
+
from eval_studio_client.api.models.v1alpha_operation import V1alphaOperation
|
|
92
|
+
from eval_studio_client.api.rest import ApiException
|
|
93
|
+
from pprint import pprint
|
|
94
|
+
|
|
95
|
+
# Defining the host is optional and defaults to http://localhost
|
|
96
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
97
|
+
configuration = eval_studio_client.api.Configuration(
|
|
98
|
+
host = "http://localhost"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# Enter a context with an instance of the API client
|
|
103
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
104
|
+
# Create an instance of the API class
|
|
105
|
+
api_instance = eval_studio_client.api.EvaluationServiceApi(api_client)
|
|
106
|
+
body = eval_studio_client.api.V1alphaImportEvaluationRequest() # V1alphaImportEvaluationRequest |
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
api_response = api_instance.evaluation_service_import_evaluation(body)
|
|
110
|
+
print("The response of EvaluationServiceApi->evaluation_service_import_evaluation:\n")
|
|
111
|
+
pprint(api_response)
|
|
112
|
+
except Exception as e:
|
|
113
|
+
print("Exception when calling EvaluationServiceApi->evaluation_service_import_evaluation: %s\n" % e)
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
### Parameters
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
Name | Type | Description | Notes
|
|
122
|
+
------------- | ------------- | ------------- | -------------
|
|
123
|
+
**body** | [**V1alphaImportEvaluationRequest**](V1alphaImportEvaluationRequest.md)| |
|
|
124
|
+
|
|
125
|
+
### Return type
|
|
126
|
+
|
|
127
|
+
[**V1alphaOperation**](V1alphaOperation.md)
|
|
128
|
+
|
|
129
|
+
### Authorization
|
|
130
|
+
|
|
131
|
+
No authorization required
|
|
132
|
+
|
|
133
|
+
### HTTP request headers
|
|
134
|
+
|
|
135
|
+
- **Content-Type**: application/json
|
|
136
|
+
- **Accept**: application/json
|
|
137
|
+
|
|
138
|
+
### HTTP response details
|
|
139
|
+
|
|
140
|
+
| Status code | Description | Response headers |
|
|
141
|
+
|-------------|-------------|------------------|
|
|
142
|
+
**200** | A successful response. | - |
|
|
143
|
+
**0** | An unexpected error response. | - |
|
|
144
|
+
|
|
145
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
146
|
+
|
|
147
|
+
# **evaluation_service_list_llm_models**
|
|
148
|
+
> V1alphaListLLMModelsResponse evaluation_service_list_llm_models(model_name=model_name, model_create_time=model_create_time, model_creator=model_creator, model_update_time=model_update_time, model_updater=model_updater, model_delete_time=model_delete_time, model_deleter=model_deleter, model_display_name=model_display_name, model_description=model_description, model_url=model_url, model_api_key=model_api_key, model_type=model_type, model_parameters=model_parameters, model_demo=model_demo, retries=retries)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
### Example
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
```python
|
|
156
|
+
import eval_studio_client.api
|
|
157
|
+
from eval_studio_client.api.models.v1alpha_list_llm_models_response import V1alphaListLLMModelsResponse
|
|
158
|
+
from eval_studio_client.api.rest import ApiException
|
|
159
|
+
from pprint import pprint
|
|
160
|
+
|
|
161
|
+
# Defining the host is optional and defaults to http://localhost
|
|
162
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
163
|
+
configuration = eval_studio_client.api.Configuration(
|
|
164
|
+
host = "http://localhost"
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
# Enter a context with an instance of the API client
|
|
169
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
170
|
+
# Create an instance of the API class
|
|
171
|
+
api_instance = eval_studio_client.api.EvaluationServiceApi(api_client)
|
|
172
|
+
model_name = 'model_name_example' # str | Output only. Name of the Model resource. e.g.: \"models/<UUID>\" (optional)
|
|
173
|
+
model_create_time = '2013-10-20T19:20:30+01:00' # datetime | Output only. Timestamp when the Model was created. (optional)
|
|
174
|
+
model_creator = 'model_creator_example' # str | Output only. Name of the user or service that requested creation of the Model. (optional)
|
|
175
|
+
model_update_time = '2013-10-20T19:20:30+01:00' # datetime | Output only. Optional. Timestamp when the Model was last updated. (optional)
|
|
176
|
+
model_updater = 'model_updater_example' # str | Output only. Optional. Name of the user or service that requested update of the Model. (optional)
|
|
177
|
+
model_delete_time = '2013-10-20T19:20:30+01:00' # datetime | Output only. Optional. Set when the Model is deleted. When set Model should be considered as deleted. (optional)
|
|
178
|
+
model_deleter = 'model_deleter_example' # str | Output only. Optional. Name of the user or service that requested deletion of the Model. (optional)
|
|
179
|
+
model_display_name = 'model_display_name_example' # str | Human readable name of the Model. (optional)
|
|
180
|
+
model_description = 'model_description_example' # str | Optional. Arbitrary description of the Model. (optional)
|
|
181
|
+
model_url = 'model_url_example' # str | Optional. Immutable. Absolute URL to the Model. (optional)
|
|
182
|
+
model_api_key = 'model_api_key_example' # str | Optional. API key used to access the Model. Not set for read calls (i.e. get, list) by public clients (front-end). Set only for internal (server-to-worker) communication. (optional)
|
|
183
|
+
model_type = 'MODEL_TYPE_UNSPECIFIED' # str | Immutable. Type of this Model. - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock. (optional) (default to 'MODEL_TYPE_UNSPECIFIED')
|
|
184
|
+
model_parameters = 'model_parameters_example' # str | Optional. Model specific parameters in JSON format. (optional)
|
|
185
|
+
model_demo = True # bool | Output only. Whether the Model is a demo resource or not. Demo resources are read only. (optional)
|
|
186
|
+
retries = 56 # int | Optional. The number of retries to attempt when querying the model for available LLM models. Defaults to 5. (optional)
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
api_response = api_instance.evaluation_service_list_llm_models(model_name=model_name, model_create_time=model_create_time, model_creator=model_creator, model_update_time=model_update_time, model_updater=model_updater, model_delete_time=model_delete_time, model_deleter=model_deleter, model_display_name=model_display_name, model_description=model_description, model_url=model_url, model_api_key=model_api_key, model_type=model_type, model_parameters=model_parameters, model_demo=model_demo, retries=retries)
|
|
190
|
+
print("The response of EvaluationServiceApi->evaluation_service_list_llm_models:\n")
|
|
191
|
+
pprint(api_response)
|
|
192
|
+
except Exception as e:
|
|
193
|
+
print("Exception when calling EvaluationServiceApi->evaluation_service_list_llm_models: %s\n" % e)
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
### Parameters
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
Name | Type | Description | Notes
|
|
202
|
+
------------- | ------------- | ------------- | -------------
|
|
203
|
+
**model_name** | **str**| Output only. Name of the Model resource. e.g.: \"models/<UUID>\" | [optional]
|
|
204
|
+
**model_create_time** | **datetime**| Output only. Timestamp when the Model was created. | [optional]
|
|
205
|
+
**model_creator** | **str**| Output only. Name of the user or service that requested creation of the Model. | [optional]
|
|
206
|
+
**model_update_time** | **datetime**| Output only. Optional. Timestamp when the Model was last updated. | [optional]
|
|
207
|
+
**model_updater** | **str**| Output only. Optional. Name of the user or service that requested update of the Model. | [optional]
|
|
208
|
+
**model_delete_time** | **datetime**| Output only. Optional. Set when the Model is deleted. When set Model should be considered as deleted. | [optional]
|
|
209
|
+
**model_deleter** | **str**| Output only. Optional. Name of the user or service that requested deletion of the Model. | [optional]
|
|
210
|
+
**model_display_name** | **str**| Human readable name of the Model. | [optional]
|
|
211
|
+
**model_description** | **str**| Optional. Arbitrary description of the Model. | [optional]
|
|
212
|
+
**model_url** | **str**| Optional. Immutable. Absolute URL to the Model. | [optional]
|
|
213
|
+
**model_api_key** | **str**| Optional. API key used to access the Model. Not set for read calls (i.e. get, list) by public clients (front-end). Set only for internal (server-to-worker) communication. | [optional]
|
|
214
|
+
**model_type** | **str**| Immutable. Type of this Model. - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock. | [optional] [default to 'MODEL_TYPE_UNSPECIFIED']
|
|
215
|
+
**model_parameters** | **str**| Optional. Model specific parameters in JSON format. | [optional]
|
|
216
|
+
**model_demo** | **bool**| Output only. Whether the Model is a demo resource or not. Demo resources are read only. | [optional]
|
|
217
|
+
**retries** | **int**| Optional. The number of retries to attempt when querying the model for available LLM models. Defaults to 5. | [optional]
|
|
218
|
+
|
|
219
|
+
### Return type
|
|
220
|
+
|
|
221
|
+
[**V1alphaListLLMModelsResponse**](V1alphaListLLMModelsResponse.md)
|
|
222
|
+
|
|
223
|
+
### Authorization
|
|
224
|
+
|
|
225
|
+
No authorization required
|
|
226
|
+
|
|
227
|
+
### HTTP request headers
|
|
228
|
+
|
|
229
|
+
- **Content-Type**: Not defined
|
|
230
|
+
- **Accept**: application/json
|
|
231
|
+
|
|
232
|
+
### HTTP response details
|
|
233
|
+
|
|
234
|
+
| Status code | Description | Response headers |
|
|
235
|
+
|-------------|-------------|------------------|
|
|
236
|
+
**200** | A successful response. | - |
|
|
237
|
+
**0** | An unexpected error response. | - |
|
|
238
|
+
|
|
239
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
240
|
+
|
|
241
|
+
# **evaluation_service_list_rag_collections**
|
|
242
|
+
> V1alphaListRAGCollectionsResponse evaluation_service_list_rag_collections(model_name=model_name, model_create_time=model_create_time, model_creator=model_creator, model_update_time=model_update_time, model_updater=model_updater, model_delete_time=model_delete_time, model_deleter=model_deleter, model_display_name=model_display_name, model_description=model_description, model_url=model_url, model_api_key=model_api_key, model_type=model_type, model_parameters=model_parameters, model_demo=model_demo)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
### Example
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
```python
|
|
250
|
+
import eval_studio_client.api
|
|
251
|
+
from eval_studio_client.api.models.v1alpha_list_rag_collections_response import V1alphaListRAGCollectionsResponse
|
|
252
|
+
from eval_studio_client.api.rest import ApiException
|
|
253
|
+
from pprint import pprint
|
|
254
|
+
|
|
255
|
+
# Defining the host is optional and defaults to http://localhost
|
|
256
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
257
|
+
configuration = eval_studio_client.api.Configuration(
|
|
258
|
+
host = "http://localhost"
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
# Enter a context with an instance of the API client
|
|
263
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
264
|
+
# Create an instance of the API class
|
|
265
|
+
api_instance = eval_studio_client.api.EvaluationServiceApi(api_client)
|
|
266
|
+
model_name = 'model_name_example' # str | Output only. Name of the Model resource. e.g.: \"models/<UUID>\" (optional)
|
|
267
|
+
model_create_time = '2013-10-20T19:20:30+01:00' # datetime | Output only. Timestamp when the Model was created. (optional)
|
|
268
|
+
model_creator = 'model_creator_example' # str | Output only. Name of the user or service that requested creation of the Model. (optional)
|
|
269
|
+
model_update_time = '2013-10-20T19:20:30+01:00' # datetime | Output only. Optional. Timestamp when the Model was last updated. (optional)
|
|
270
|
+
model_updater = 'model_updater_example' # str | Output only. Optional. Name of the user or service that requested update of the Model. (optional)
|
|
271
|
+
model_delete_time = '2013-10-20T19:20:30+01:00' # datetime | Output only. Optional. Set when the Model is deleted. When set Model should be considered as deleted. (optional)
|
|
272
|
+
model_deleter = 'model_deleter_example' # str | Output only. Optional. Name of the user or service that requested deletion of the Model. (optional)
|
|
273
|
+
model_display_name = 'model_display_name_example' # str | Human readable name of the Model. (optional)
|
|
274
|
+
model_description = 'model_description_example' # str | Optional. Arbitrary description of the Model. (optional)
|
|
275
|
+
model_url = 'model_url_example' # str | Optional. Immutable. Absolute URL to the Model. (optional)
|
|
276
|
+
model_api_key = 'model_api_key_example' # str | Optional. API key used to access the Model. Not set for read calls (i.e. get, list) by public clients (front-end). Set only for internal (server-to-worker) communication. (optional)
|
|
277
|
+
model_type = 'MODEL_TYPE_UNSPECIFIED' # str | Immutable. Type of this Model. - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock. (optional) (default to 'MODEL_TYPE_UNSPECIFIED')
|
|
278
|
+
model_parameters = 'model_parameters_example' # str | Optional. Model specific parameters in JSON format. (optional)
|
|
279
|
+
model_demo = True # bool | Output only. Whether the Model is a demo resource or not. Demo resources are read only. (optional)
|
|
280
|
+
|
|
281
|
+
try:
|
|
282
|
+
api_response = api_instance.evaluation_service_list_rag_collections(model_name=model_name, model_create_time=model_create_time, model_creator=model_creator, model_update_time=model_update_time, model_updater=model_updater, model_delete_time=model_delete_time, model_deleter=model_deleter, model_display_name=model_display_name, model_description=model_description, model_url=model_url, model_api_key=model_api_key, model_type=model_type, model_parameters=model_parameters, model_demo=model_demo)
|
|
283
|
+
print("The response of EvaluationServiceApi->evaluation_service_list_rag_collections:\n")
|
|
284
|
+
pprint(api_response)
|
|
285
|
+
except Exception as e:
|
|
286
|
+
print("Exception when calling EvaluationServiceApi->evaluation_service_list_rag_collections: %s\n" % e)
|
|
287
|
+
```
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
### Parameters
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
Name | Type | Description | Notes
|
|
295
|
+
------------- | ------------- | ------------- | -------------
|
|
296
|
+
**model_name** | **str**| Output only. Name of the Model resource. e.g.: \"models/<UUID>\" | [optional]
|
|
297
|
+
**model_create_time** | **datetime**| Output only. Timestamp when the Model was created. | [optional]
|
|
298
|
+
**model_creator** | **str**| Output only. Name of the user or service that requested creation of the Model. | [optional]
|
|
299
|
+
**model_update_time** | **datetime**| Output only. Optional. Timestamp when the Model was last updated. | [optional]
|
|
300
|
+
**model_updater** | **str**| Output only. Optional. Name of the user or service that requested update of the Model. | [optional]
|
|
301
|
+
**model_delete_time** | **datetime**| Output only. Optional. Set when the Model is deleted. When set Model should be considered as deleted. | [optional]
|
|
302
|
+
**model_deleter** | **str**| Output only. Optional. Name of the user or service that requested deletion of the Model. | [optional]
|
|
303
|
+
**model_display_name** | **str**| Human readable name of the Model. | [optional]
|
|
304
|
+
**model_description** | **str**| Optional. Arbitrary description of the Model. | [optional]
|
|
305
|
+
**model_url** | **str**| Optional. Immutable. Absolute URL to the Model. | [optional]
|
|
306
|
+
**model_api_key** | **str**| Optional. API key used to access the Model. Not set for read calls (i.e. get, list) by public clients (front-end). Set only for internal (server-to-worker) communication. | [optional]
|
|
307
|
+
**model_type** | **str**| Immutable. Type of this Model. - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock. | [optional] [default to 'MODEL_TYPE_UNSPECIFIED']
|
|
308
|
+
**model_parameters** | **str**| Optional. Model specific parameters in JSON format. | [optional]
|
|
309
|
+
**model_demo** | **bool**| Output only. Whether the Model is a demo resource or not. Demo resources are read only. | [optional]
|
|
310
|
+
|
|
311
|
+
### Return type
|
|
312
|
+
|
|
313
|
+
[**V1alphaListRAGCollectionsResponse**](V1alphaListRAGCollectionsResponse.md)
|
|
314
|
+
|
|
315
|
+
### Authorization
|
|
316
|
+
|
|
317
|
+
No authorization required
|
|
318
|
+
|
|
319
|
+
### HTTP request headers
|
|
320
|
+
|
|
321
|
+
- **Content-Type**: Not defined
|
|
322
|
+
- **Accept**: application/json
|
|
323
|
+
|
|
324
|
+
### HTTP response details
|
|
325
|
+
|
|
326
|
+
| Status code | Description | Response headers |
|
|
327
|
+
|-------------|-------------|------------------|
|
|
328
|
+
**200** | A successful response. | - |
|
|
329
|
+
**0** | An unexpected error response. | - |
|
|
330
|
+
|
|
331
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
332
|
+
|
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
# eval_studio_client.api.EvaluatorServiceApi
|
|
2
|
+
|
|
3
|
+
All URIs are relative to *http://localhost*
|
|
4
|
+
|
|
5
|
+
Method | HTTP request | Description
|
|
6
|
+
------------- | ------------- | -------------
|
|
7
|
+
[**evaluator_service_batch_delete_evaluators**](EvaluatorServiceApi.md#evaluator_service_batch_delete_evaluators) | **POST** /v1alpha/evaluators:batchDelete |
|
|
8
|
+
[**evaluator_service_create_evaluator**](EvaluatorServiceApi.md#evaluator_service_create_evaluator) | **POST** /v1alpha/evaluators |
|
|
9
|
+
[**evaluator_service_delete_evaluator**](EvaluatorServiceApi.md#evaluator_service_delete_evaluator) | **DELETE** /v1alpha/{name_2} |
|
|
10
|
+
[**evaluator_service_get_evaluator**](EvaluatorServiceApi.md#evaluator_service_get_evaluator) | **GET** /v1alpha/{name_2} |
|
|
11
|
+
[**evaluator_service_list_evaluators**](EvaluatorServiceApi.md#evaluator_service_list_evaluators) | **GET** /v1alpha/evaluators |
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# **evaluator_service_batch_delete_evaluators**
|
|
15
|
+
> V1alphaBatchDeleteEvaluatorsResponse evaluator_service_batch_delete_evaluators(body)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
### Example
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
```python
|
|
23
|
+
import eval_studio_client.api
|
|
24
|
+
from eval_studio_client.api.models.v1alpha_batch_delete_evaluators_request import V1alphaBatchDeleteEvaluatorsRequest
|
|
25
|
+
from eval_studio_client.api.models.v1alpha_batch_delete_evaluators_response import V1alphaBatchDeleteEvaluatorsResponse
|
|
26
|
+
from eval_studio_client.api.rest import ApiException
|
|
27
|
+
from pprint import pprint
|
|
28
|
+
|
|
29
|
+
# Defining the host is optional and defaults to http://localhost
|
|
30
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
31
|
+
configuration = eval_studio_client.api.Configuration(
|
|
32
|
+
host = "http://localhost"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# Enter a context with an instance of the API client
|
|
37
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
38
|
+
# Create an instance of the API class
|
|
39
|
+
api_instance = eval_studio_client.api.EvaluatorServiceApi(api_client)
|
|
40
|
+
body = eval_studio_client.api.V1alphaBatchDeleteEvaluatorsRequest() # V1alphaBatchDeleteEvaluatorsRequest |
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
api_response = api_instance.evaluator_service_batch_delete_evaluators(body)
|
|
44
|
+
print("The response of EvaluatorServiceApi->evaluator_service_batch_delete_evaluators:\n")
|
|
45
|
+
pprint(api_response)
|
|
46
|
+
except Exception as e:
|
|
47
|
+
print("Exception when calling EvaluatorServiceApi->evaluator_service_batch_delete_evaluators: %s\n" % e)
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
### Parameters
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
Name | Type | Description | Notes
|
|
56
|
+
------------- | ------------- | ------------- | -------------
|
|
57
|
+
**body** | [**V1alphaBatchDeleteEvaluatorsRequest**](V1alphaBatchDeleteEvaluatorsRequest.md)| |
|
|
58
|
+
|
|
59
|
+
### Return type
|
|
60
|
+
|
|
61
|
+
[**V1alphaBatchDeleteEvaluatorsResponse**](V1alphaBatchDeleteEvaluatorsResponse.md)
|
|
62
|
+
|
|
63
|
+
### Authorization
|
|
64
|
+
|
|
65
|
+
No authorization required
|
|
66
|
+
|
|
67
|
+
### HTTP request headers
|
|
68
|
+
|
|
69
|
+
- **Content-Type**: application/json
|
|
70
|
+
- **Accept**: application/json
|
|
71
|
+
|
|
72
|
+
### HTTP response details
|
|
73
|
+
|
|
74
|
+
| Status code | Description | Response headers |
|
|
75
|
+
|-------------|-------------|------------------|
|
|
76
|
+
**200** | A successful response. | - |
|
|
77
|
+
**0** | An unexpected error response. | - |
|
|
78
|
+
|
|
79
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
80
|
+
|
|
81
|
+
# **evaluator_service_create_evaluator**
|
|
82
|
+
> V1alphaCreateEvaluatorResponse evaluator_service_create_evaluator(evaluator)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
### Example
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
```python
|
|
90
|
+
import eval_studio_client.api
|
|
91
|
+
from eval_studio_client.api.models.v1alpha_create_evaluator_response import V1alphaCreateEvaluatorResponse
|
|
92
|
+
from eval_studio_client.api.models.v1alpha_evaluator import V1alphaEvaluator
|
|
93
|
+
from eval_studio_client.api.rest import ApiException
|
|
94
|
+
from pprint import pprint
|
|
95
|
+
|
|
96
|
+
# Defining the host is optional and defaults to http://localhost
|
|
97
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
98
|
+
configuration = eval_studio_client.api.Configuration(
|
|
99
|
+
host = "http://localhost"
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# Enter a context with an instance of the API client
|
|
104
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
105
|
+
# Create an instance of the API class
|
|
106
|
+
api_instance = eval_studio_client.api.EvaluatorServiceApi(api_client)
|
|
107
|
+
evaluator = eval_studio_client.api.V1alphaEvaluator() # V1alphaEvaluator | Required. The Evaluator to create.
|
|
108
|
+
|
|
109
|
+
try:
|
|
110
|
+
api_response = api_instance.evaluator_service_create_evaluator(evaluator)
|
|
111
|
+
print("The response of EvaluatorServiceApi->evaluator_service_create_evaluator:\n")
|
|
112
|
+
pprint(api_response)
|
|
113
|
+
except Exception as e:
|
|
114
|
+
print("Exception when calling EvaluatorServiceApi->evaluator_service_create_evaluator: %s\n" % e)
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
### Parameters
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
Name | Type | Description | Notes
|
|
123
|
+
------------- | ------------- | ------------- | -------------
|
|
124
|
+
**evaluator** | [**V1alphaEvaluator**](V1alphaEvaluator.md)| Required. The Evaluator to create. |
|
|
125
|
+
|
|
126
|
+
### Return type
|
|
127
|
+
|
|
128
|
+
[**V1alphaCreateEvaluatorResponse**](V1alphaCreateEvaluatorResponse.md)
|
|
129
|
+
|
|
130
|
+
### Authorization
|
|
131
|
+
|
|
132
|
+
No authorization required
|
|
133
|
+
|
|
134
|
+
### HTTP request headers
|
|
135
|
+
|
|
136
|
+
- **Content-Type**: application/json
|
|
137
|
+
- **Accept**: application/json
|
|
138
|
+
|
|
139
|
+
### HTTP response details
|
|
140
|
+
|
|
141
|
+
| Status code | Description | Response headers |
|
|
142
|
+
|-------------|-------------|------------------|
|
|
143
|
+
**200** | A successful response. | - |
|
|
144
|
+
**0** | An unexpected error response. | - |
|
|
145
|
+
|
|
146
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
147
|
+
|
|
148
|
+
# **evaluator_service_delete_evaluator**
|
|
149
|
+
> V1alphaDeleteEvaluatorResponse evaluator_service_delete_evaluator(name_2)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
### Example
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
```python
|
|
157
|
+
import eval_studio_client.api
|
|
158
|
+
from eval_studio_client.api.models.v1alpha_delete_evaluator_response import V1alphaDeleteEvaluatorResponse
|
|
159
|
+
from eval_studio_client.api.rest import ApiException
|
|
160
|
+
from pprint import pprint
|
|
161
|
+
|
|
162
|
+
# Defining the host is optional and defaults to http://localhost
|
|
163
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
164
|
+
configuration = eval_studio_client.api.Configuration(
|
|
165
|
+
host = "http://localhost"
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
# Enter a context with an instance of the API client
|
|
170
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
171
|
+
# Create an instance of the API class
|
|
172
|
+
api_instance = eval_studio_client.api.EvaluatorServiceApi(api_client)
|
|
173
|
+
name_2 = 'name_2_example' # str | Required. The name of the Evaluator to delete.
|
|
174
|
+
|
|
175
|
+
try:
|
|
176
|
+
api_response = api_instance.evaluator_service_delete_evaluator(name_2)
|
|
177
|
+
print("The response of EvaluatorServiceApi->evaluator_service_delete_evaluator:\n")
|
|
178
|
+
pprint(api_response)
|
|
179
|
+
except Exception as e:
|
|
180
|
+
print("Exception when calling EvaluatorServiceApi->evaluator_service_delete_evaluator: %s\n" % e)
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
### Parameters
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
Name | Type | Description | Notes
|
|
189
|
+
------------- | ------------- | ------------- | -------------
|
|
190
|
+
**name_2** | **str**| Required. The name of the Evaluator to delete. |
|
|
191
|
+
|
|
192
|
+
### Return type
|
|
193
|
+
|
|
194
|
+
[**V1alphaDeleteEvaluatorResponse**](V1alphaDeleteEvaluatorResponse.md)
|
|
195
|
+
|
|
196
|
+
### Authorization
|
|
197
|
+
|
|
198
|
+
No authorization required
|
|
199
|
+
|
|
200
|
+
### HTTP request headers
|
|
201
|
+
|
|
202
|
+
- **Content-Type**: Not defined
|
|
203
|
+
- **Accept**: application/json
|
|
204
|
+
|
|
205
|
+
### HTTP response details
|
|
206
|
+
|
|
207
|
+
| Status code | Description | Response headers |
|
|
208
|
+
|-------------|-------------|------------------|
|
|
209
|
+
**200** | A successful response. | - |
|
|
210
|
+
**0** | An unexpected error response. | - |
|
|
211
|
+
|
|
212
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
213
|
+
|
|
214
|
+
# **evaluator_service_get_evaluator**
|
|
215
|
+
> V1alphaGetEvaluatorResponse evaluator_service_get_evaluator(name_2)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
### Example
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
```python
|
|
223
|
+
import eval_studio_client.api
|
|
224
|
+
from eval_studio_client.api.models.v1alpha_get_evaluator_response import V1alphaGetEvaluatorResponse
|
|
225
|
+
from eval_studio_client.api.rest import ApiException
|
|
226
|
+
from pprint import pprint
|
|
227
|
+
|
|
228
|
+
# Defining the host is optional and defaults to http://localhost
|
|
229
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
230
|
+
configuration = eval_studio_client.api.Configuration(
|
|
231
|
+
host = "http://localhost"
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
# Enter a context with an instance of the API client
|
|
236
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
237
|
+
# Create an instance of the API class
|
|
238
|
+
api_instance = eval_studio_client.api.EvaluatorServiceApi(api_client)
|
|
239
|
+
name_2 = 'name_2_example' # str | Required. The name of the Evaluator to retrieve.
|
|
240
|
+
|
|
241
|
+
try:
|
|
242
|
+
api_response = api_instance.evaluator_service_get_evaluator(name_2)
|
|
243
|
+
print("The response of EvaluatorServiceApi->evaluator_service_get_evaluator:\n")
|
|
244
|
+
pprint(api_response)
|
|
245
|
+
except Exception as e:
|
|
246
|
+
print("Exception when calling EvaluatorServiceApi->evaluator_service_get_evaluator: %s\n" % e)
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
### Parameters
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
Name | Type | Description | Notes
|
|
255
|
+
------------- | ------------- | ------------- | -------------
|
|
256
|
+
**name_2** | **str**| Required. The name of the Evaluator to retrieve. |
|
|
257
|
+
|
|
258
|
+
### Return type
|
|
259
|
+
|
|
260
|
+
[**V1alphaGetEvaluatorResponse**](V1alphaGetEvaluatorResponse.md)
|
|
261
|
+
|
|
262
|
+
### Authorization
|
|
263
|
+
|
|
264
|
+
No authorization required
|
|
265
|
+
|
|
266
|
+
### HTTP request headers
|
|
267
|
+
|
|
268
|
+
- **Content-Type**: Not defined
|
|
269
|
+
- **Accept**: application/json
|
|
270
|
+
|
|
271
|
+
### HTTP response details
|
|
272
|
+
|
|
273
|
+
| Status code | Description | Response headers |
|
|
274
|
+
|-------------|-------------|------------------|
|
|
275
|
+
**200** | A successful response. | - |
|
|
276
|
+
**0** | An unexpected error response. | - |
|
|
277
|
+
|
|
278
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
279
|
+
|
|
280
|
+
# **evaluator_service_list_evaluators**
|
|
281
|
+
> V1alphaListEvaluatorsResponse evaluator_service_list_evaluators(view=view)
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
### Example
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
```python
|
|
289
|
+
import eval_studio_client.api
|
|
290
|
+
from eval_studio_client.api.models.v1alpha_list_evaluators_response import V1alphaListEvaluatorsResponse
|
|
291
|
+
from eval_studio_client.api.rest import ApiException
|
|
292
|
+
from pprint import pprint
|
|
293
|
+
|
|
294
|
+
# Defining the host is optional and defaults to http://localhost
|
|
295
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
296
|
+
configuration = eval_studio_client.api.Configuration(
|
|
297
|
+
host = "http://localhost"
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
# Enter a context with an instance of the API client
|
|
302
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
303
|
+
# Create an instance of the API class
|
|
304
|
+
api_instance = eval_studio_client.api.EvaluatorServiceApi(api_client)
|
|
305
|
+
view = 'EVALUATOR_VIEW_UNSPECIFIED' # str | Optional. View specifies the amount of information included in the Evaluator's description. Brief view includes only short descriptions, which can significantly decrease the amount of data transferred. - EVALUATOR_VIEW_UNSPECIFIED: The default / unset value. The API will default to the EVALUATOR_VIEW_BRIEF. - EVALUATOR_VIEW_BRIEF: Brief view of the Evaluator, which doesn't include the long description, only the brief one. - EVALUATOR_VIEW_FULL: Full view of the evaluator, including brief and full description. (optional) (default to 'EVALUATOR_VIEW_UNSPECIFIED')
|
|
306
|
+
|
|
307
|
+
try:
|
|
308
|
+
api_response = api_instance.evaluator_service_list_evaluators(view=view)
|
|
309
|
+
print("The response of EvaluatorServiceApi->evaluator_service_list_evaluators:\n")
|
|
310
|
+
pprint(api_response)
|
|
311
|
+
except Exception as e:
|
|
312
|
+
print("Exception when calling EvaluatorServiceApi->evaluator_service_list_evaluators: %s\n" % e)
|
|
313
|
+
```
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
### Parameters
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
Name | Type | Description | Notes
|
|
321
|
+
------------- | ------------- | ------------- | -------------
|
|
322
|
+
**view** | **str**| Optional. View specifies the amount of information included in the Evaluator's description. Brief view includes only short descriptions, which can significantly decrease the amount of data transferred. - EVALUATOR_VIEW_UNSPECIFIED: The default / unset value. The API will default to the EVALUATOR_VIEW_BRIEF. - EVALUATOR_VIEW_BRIEF: Brief view of the Evaluator, which doesn't include the long description, only the brief one. - EVALUATOR_VIEW_FULL: Full view of the evaluator, including brief and full description. | [optional] [default to 'EVALUATOR_VIEW_UNSPECIFIED']
|
|
323
|
+
|
|
324
|
+
### Return type
|
|
325
|
+
|
|
326
|
+
[**V1alphaListEvaluatorsResponse**](V1alphaListEvaluatorsResponse.md)
|
|
327
|
+
|
|
328
|
+
### Authorization
|
|
329
|
+
|
|
330
|
+
No authorization required
|
|
331
|
+
|
|
332
|
+
### HTTP request headers
|
|
333
|
+
|
|
334
|
+
- **Content-Type**: Not defined
|
|
335
|
+
- **Accept**: application/json
|
|
336
|
+
|
|
337
|
+
### HTTP response details
|
|
338
|
+
|
|
339
|
+
| Status code | Description | Response headers |
|
|
340
|
+
|-------------|-------------|------------------|
|
|
341
|
+
**200** | A successful response. | - |
|
|
342
|
+
**0** | An unexpected error response. | - |
|
|
343
|
+
|
|
344
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
345
|
+
|