eval-studio-client 1.0.0a1__py3-none-any.whl → 1.1.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +79 -1
- eval_studio_client/api/api/__init__.py +9 -0
- eval_studio_client/api/api/adversarial_inputs_service_api.py +321 -0
- eval_studio_client/api/api/dashboard_service_api.py +1 -1
- eval_studio_client/api/api/document_service_api.py +1 -1
- eval_studio_client/api/api/evaluation_service_api.py +1 -1
- eval_studio_client/api/api/evaluator_service_api.py +1 -1
- eval_studio_client/api/api/generated_questions_validation_service_api.py +321 -0
- eval_studio_client/api/api/human_calibration_service_api.py +304 -0
- eval_studio_client/api/api/info_service_api.py +1 -1
- eval_studio_client/api/api/leaderboard_report_service_api.py +292 -0
- eval_studio_client/api/api/leaderboard_service_api.py +17 -17
- eval_studio_client/api/api/model_service_api.py +17 -17
- eval_studio_client/api/api/operation_progress_service_api.py +1 -1
- eval_studio_client/api/api/operation_service_api.py +272 -17
- eval_studio_client/api/api/perturbation_service_api.py +1 -1
- eval_studio_client/api/api/perturbator_service_api.py +285 -18
- eval_studio_client/api/api/prompt_generation_service_api.py +1 -1
- eval_studio_client/api/api/prompt_library_service_api.py +669 -0
- eval_studio_client/api/api/test_case_relationship_service_api.py +292 -0
- eval_studio_client/api/api/test_case_service_api.py +17 -17
- eval_studio_client/api/api/test_class_service_api.py +17 -17
- eval_studio_client/api/api/test_lab_service_api.py +1 -1
- eval_studio_client/api/api/test_service_api.py +1238 -102
- eval_studio_client/api/api/who_am_i_service_api.py +1 -1
- eval_studio_client/api/api/workflow_edge_service_api.py +835 -0
- eval_studio_client/api/api/workflow_node_service_api.py +2431 -0
- eval_studio_client/api/api/workflow_service_api.py +1893 -0
- eval_studio_client/api/api_client.py +1 -1
- eval_studio_client/api/configuration.py +1 -1
- eval_studio_client/api/docs/AdversarialInputsServiceApi.md +78 -0
- eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +45 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceApi.md +78 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.md +30 -0
- eval_studio_client/api/docs/HumanCalibrationServiceApi.md +77 -0
- eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +5 -5
- eval_studio_client/api/docs/ModelServiceApi.md +5 -5
- eval_studio_client/api/docs/OperationServiceApi.md +72 -5
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +1 -0
- eval_studio_client/api/docs/PerturbatorServiceApi.md +38 -8
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +4 -2
- eval_studio_client/api/docs/PromptLibraryServiceApi.md +155 -0
- eval_studio_client/api/docs/ProtobufNullValue.md +12 -0
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +3 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +47 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflowNode.md +44 -0
- eval_studio_client/api/docs/TestCaseRelationshipServiceApi.md +75 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +5 -5
- eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
- eval_studio_client/api/docs/TestServiceApi.md +285 -5
- eval_studio_client/api/docs/TestServiceCloneTestRequest.md +30 -0
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +3 -1
- eval_studio_client/api/docs/TestServiceImportTestCasesFromLibraryRequest.md +32 -0
- eval_studio_client/api/docs/TestServiceListTestCaseLibraryItemsRequest.md +35 -0
- eval_studio_client/api/docs/TestServicePerturbTestInPlaceRequest.md +30 -0
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +1 -0
- eval_studio_client/api/docs/V1AbortOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetWorkflowEdgesResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetWorkflowNodesResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneTestResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Context.md +32 -0
- eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1CreateWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1EstimateThresholdRequest.md +33 -0
- eval_studio_client/api/docs/V1GeneratedTestCase.md +30 -0
- eval_studio_client/api/docs/V1GetLeaderboardReportResponse.md +29 -0
- eval_studio_client/api/docs/V1GetWorkflowNodePrerequisitesResponse.md +30 -0
- eval_studio_client/api/docs/V1GetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1GetWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1ImportEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1ImportTestCasesFromLibraryResponse.md +29 -0
- eval_studio_client/api/docs/V1ImportTestCasesRequest.md +33 -0
- eval_studio_client/api/docs/V1Info.md +3 -0
- eval_studio_client/api/docs/V1InitWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1LabeledTestCase.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReport.md +32 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputData.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluator.md +42 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluatorParameter.md +38 -0
- eval_studio_client/api/docs/V1LeaderboardReportExplanation.md +34 -0
- eval_studio_client/api/docs/V1LeaderboardReportMetricsMetaEntry.md +41 -0
- eval_studio_client/api/docs/V1LeaderboardReportModel.md +39 -0
- eval_studio_client/api/docs/V1LeaderboardReportResult.md +45 -0
- eval_studio_client/api/docs/V1LeaderboardReportResultRelationship.md +32 -0
- eval_studio_client/api/docs/V1ListPromptLibraryItemsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCaseLibraryItemsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCaseRelationshipsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListWorkflowsResponse.md +29 -0
- eval_studio_client/api/docs/V1MetricScore.md +31 -0
- eval_studio_client/api/docs/V1MetricScores.md +29 -0
- eval_studio_client/api/docs/V1PerturbTestInPlaceResponse.md +29 -0
- eval_studio_client/api/docs/V1ProcessWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1PromptLibraryItem.md +42 -0
- eval_studio_client/api/docs/V1RepeatedString.md +29 -0
- eval_studio_client/api/docs/V1ResetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1TestCase.md +3 -0
- eval_studio_client/api/docs/V1TestSuiteEvaluates.md +11 -0
- eval_studio_client/api/docs/V1UpdateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Workflow.md +49 -0
- eval_studio_client/api/docs/V1WorkflowEdge.md +40 -0
- eval_studio_client/api/docs/V1WorkflowEdgeType.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNode.md +46 -0
- eval_studio_client/api/docs/V1WorkflowNodeArtifact.md +40 -0
- eval_studio_client/api/docs/V1WorkflowNodeArtifacts.md +29 -0
- eval_studio_client/api/docs/V1WorkflowNodeAttributes.md +30 -0
- eval_studio_client/api/docs/V1WorkflowNodeStatus.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNodeType.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNodeView.md +12 -0
- eval_studio_client/api/docs/V1WorkflowType.md +12 -0
- eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +215 -0
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +632 -0
- eval_studio_client/api/docs/WorkflowServiceApi.md +488 -0
- eval_studio_client/api/docs/WorkflowServiceCloneWorkflowRequest.md +33 -0
- eval_studio_client/api/exceptions.py +1 -1
- eval_studio_client/api/models/__init__.py +70 -1
- eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +143 -0
- eval_studio_client/api/models/generated_questions_validation_service_validate_generated_questions_request.py +97 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +9 -3
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +17 -6
- eval_studio_client/api/models/protobuf_any.py +1 -1
- eval_studio_client/api/models/protobuf_null_value.py +36 -0
- eval_studio_client/api/models/required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_document_to_update.py +1 -1
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_model_to_update.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_case_to_update.py +14 -3
- eval_studio_client/api/models/required_the_test_to_update.py +1 -1
- eval_studio_client/api/models/required_the_updated_workflow.py +160 -0
- eval_studio_client/api/models/required_the_updated_workflow_node.py +152 -0
- eval_studio_client/api/models/rpc_status.py +1 -1
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/models/test_service_clone_test_request.py +89 -0
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +8 -4
- eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +93 -0
- eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +99 -0
- eval_studio_client/api/models/test_service_perturb_test_in_place_request.py +97 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +5 -3
- eval_studio_client/api/models/v1_abort_operation_response.py +91 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_request.py +87 -0
- eval_studio_client/api/models/v1_batch_delete_workflows_response.py +95 -0
- eval_studio_client/api/models/v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +95 -0
- eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +95 -0
- eval_studio_client/api/models/v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/models/v1_check_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_clone_test_response.py +91 -0
- eval_studio_client/api/models/v1_clone_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_collection_info.py +1 -1
- eval_studio_client/api/models/v1_context.py +93 -0
- eval_studio_client/api/models/v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_document_response.py +1 -1
- eval_studio_client/api/models/v1_create_evaluation_request.py +8 -3
- eval_studio_client/api/models/v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/models/v1_create_model_response.py +1 -1
- eval_studio_client/api/models/v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_response.py +1 -1
- eval_studio_client/api/models/v1_create_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_dashboard.py +1 -1
- eval_studio_client/api/models/v1_dashboard_status.py +1 -1
- eval_studio_client/api/models/v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_document_response.py +1 -1
- eval_studio_client/api/models/v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_model_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_document.py +1 -1
- eval_studio_client/api/models/v1_estimate_threshold_request.py +103 -0
- eval_studio_client/api/models/v1_evaluation_test.py +1 -1
- eval_studio_client/api/models/v1_evaluator.py +1 -1
- eval_studio_client/api/models/v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/models/v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/models/v1_evaluator_view.py +1 -1
- eval_studio_client/api/models/v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/models/v1_find_all_test_cases_by_id_response.py +1 -1
- eval_studio_client/api/models/v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_generated_test_case.py +101 -0
- eval_studio_client/api/models/v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_document_response.py +1 -1
- eval_studio_client/api/models/v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_get_info_response.py +1 -1
- eval_studio_client/api/models/v1_get_leaderboard_report_response.py +91 -0
- eval_studio_client/api/models/v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_model_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_response.py +1 -1
- eval_studio_client/api/models/v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_class_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +89 -0
- eval_studio_client/api/models/v1_get_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_get_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_import_evaluation_request.py +8 -3
- eval_studio_client/api/models/v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +91 -0
- eval_studio_client/api/models/v1_import_test_cases_request.py +95 -0
- eval_studio_client/api/models/v1_info.py +10 -4
- eval_studio_client/api/models/v1_init_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_insight.py +1 -1
- eval_studio_client/api/models/v1_labeled_test_case.py +91 -0
- eval_studio_client/api/models/v1_leaderboard.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_report.py +115 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_data.py +93 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +101 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator.py +155 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator_parameter.py +109 -0
- eval_studio_client/api/models/v1_leaderboard_report_explanation.py +103 -0
- eval_studio_client/api/models/v1_leaderboard_report_metrics_meta_entry.py +129 -0
- eval_studio_client/api/models/v1_leaderboard_report_model.py +121 -0
- eval_studio_client/api/models/v1_leaderboard_report_result.py +175 -0
- eval_studio_client/api/models/v1_leaderboard_report_result_relationship.py +97 -0
- eval_studio_client/api/models/v1_leaderboard_status.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_type.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_view.py +1 -1
- eval_studio_client/api/models/v1_list_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_documents_response.py +1 -1
- eval_studio_client/api/models/v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_operations_response.py +1 -1
- eval_studio_client/api/models/v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/models/v1_list_prompt_library_items_response.py +95 -0
- eval_studio_client/api/models/v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_library_items_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_case_relationships_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/models/v1_list_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_workflows_response.py +95 -0
- eval_studio_client/api/models/v1_metric_score.py +89 -0
- eval_studio_client/api/models/v1_metric_scores.py +95 -0
- eval_studio_client/api/models/v1_model.py +1 -1
- eval_studio_client/api/models/v1_model_type.py +1 -1
- eval_studio_client/api/models/v1_operation.py +1 -1
- eval_studio_client/api/models/v1_operation_progress.py +1 -1
- eval_studio_client/api/models/v1_perturb_test_in_place_response.py +91 -0
- eval_studio_client/api/models/v1_perturb_test_response.py +1 -1
- eval_studio_client/api/models/v1_perturbator.py +1 -1
- eval_studio_client/api/models/v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/models/v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/models/v1_problem_and_action.py +1 -1
- eval_studio_client/api/models/v1_process_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_prompt_library_item.py +129 -0
- eval_studio_client/api/models/v1_repeated_string.py +87 -0
- eval_studio_client/api/models/v1_reset_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_test.py +1 -1
- eval_studio_client/api/models/v1_test_case.py +14 -3
- eval_studio_client/api/models/v1_test_case_relationship.py +1 -1
- eval_studio_client/api/models/v1_test_cases_generator.py +1 -1
- eval_studio_client/api/models/v1_test_class.py +1 -1
- eval_studio_client/api/models/v1_test_class_type.py +1 -1
- eval_studio_client/api/models/v1_test_lab.py +1 -1
- eval_studio_client/api/models/v1_test_suite_evaluates.py +39 -0
- eval_studio_client/api/models/v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_document_response.py +1 -1
- eval_studio_client/api/models/v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_model_response.py +1 -1
- eval_studio_client/api/models/v1_update_operation_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_update_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_who_am_i_response.py +1 -1
- eval_studio_client/api/models/v1_workflow.py +164 -0
- eval_studio_client/api/models/v1_workflow_edge.py +123 -0
- eval_studio_client/api/models/v1_workflow_edge_type.py +37 -0
- eval_studio_client/api/models/v1_workflow_node.py +156 -0
- eval_studio_client/api/models/v1_workflow_node_artifact.py +122 -0
- eval_studio_client/api/models/v1_workflow_node_artifacts.py +97 -0
- eval_studio_client/api/models/v1_workflow_node_attributes.py +87 -0
- eval_studio_client/api/models/v1_workflow_node_status.py +40 -0
- eval_studio_client/api/models/v1_workflow_node_type.py +44 -0
- eval_studio_client/api/models/v1_workflow_node_view.py +38 -0
- eval_studio_client/api/models/v1_workflow_type.py +37 -0
- eval_studio_client/api/models/workflow_service_clone_workflow_request.py +95 -0
- eval_studio_client/api/rest.py +1 -1
- eval_studio_client/api/test/test_adversarial_inputs_service_api.py +37 -0
- eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +128 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
- eval_studio_client/api/test/test_document_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
- eval_studio_client/api/test/test_generated_questions_validation_service_api.py +37 -0
- eval_studio_client/api/test/test_generated_questions_validation_service_validate_generated_questions_request.py +83 -0
- eval_studio_client/api/test/test_human_calibration_service_api.py +38 -0
- eval_studio_client/api/test/test_info_service_api.py +1 -1
- eval_studio_client/api/test/test_leaderboard_report_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
- eval_studio_client/api/test/test_model_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_service_api.py +7 -1
- eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +25 -3
- eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +13 -5
- eval_studio_client/api/test/test_prompt_library_service_api.py +43 -0
- eval_studio_client/api/test/test_protobuf_any.py +1 -1
- eval_studio_client/api/test/test_protobuf_null_value.py +33 -0
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +9 -2
- eval_studio_client/api/test/test_required_the_test_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_updated_workflow.py +91 -0
- eval_studio_client/api/test/test_required_the_updated_workflow_node.py +80 -0
- eval_studio_client/api/test/test_rpc_status.py +1 -1
- eval_studio_client/api/test/test_test_case_relationship_service_api.py +37 -0
- eval_studio_client/api/test/test_test_case_service_api.py +1 -1
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_test_class_service_api.py +1 -1
- eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
- eval_studio_client/api/test/test_test_service_api.py +25 -1
- eval_studio_client/api/test/test_test_service_clone_test_request.py +52 -0
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +8 -2
- eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +56 -0
- eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +63 -0
- eval_studio_client/api/test/test_test_service_perturb_test_in_place_request.py +59 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +5 -2
- eval_studio_client/api/test/test_v1_abort_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +9 -2
- eval_studio_client/api/test/test_v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +53 -0
- eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +95 -0
- eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +64 -0
- eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +84 -0
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_check_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_clone_test_response.py +67 -0
- eval_studio_client/api/test/test_v1_clone_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_collection_info.py +1 -1
- eval_studio_client/api/test/test_v1_context.py +54 -0
- eval_studio_client/api/test/test_v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +25 -3
- eval_studio_client/api/test/test_v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_create_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_create_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_dashboard.py +1 -1
- eval_studio_client/api/test/test_v1_dashboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_delete_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_delete_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_document.py +1 -1
- eval_studio_client/api/test/test_v1_estimate_threshold_request.py +60 -0
- eval_studio_client/api/test/test_v1_evaluation_test.py +9 -2
- eval_studio_client/api/test/test_v1_evaluator.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_view.py +1 -1
- eval_studio_client/api/test/test_v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +9 -2
- eval_studio_client/api/test/test_v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/test/test_v1_generated_test_case.py +79 -0
- eval_studio_client/api/test/test_v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_info_response.py +7 -2
- eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +175 -0
- eval_studio_client/api/test/test_v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_get_test_class_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +56 -0
- eval_studio_client/api/test/test_v1_get_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_get_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_import_evaluation_request.py +17 -2
- eval_studio_client/api/test/test_v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +71 -0
- eval_studio_client/api/test/test_v1_import_test_cases_request.py +57 -0
- eval_studio_client/api/test/test_v1_info.py +7 -2
- eval_studio_client/api/test/test_v1_init_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_insight.py +1 -1
- eval_studio_client/api/test/test_v1_labeled_test_case.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_report.py +174 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_data.py +52 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +56 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator.py +114 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator_parameter.py +63 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_explanation.py +58 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_metrics_meta_entry.py +66 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_model.py +62 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result.py +92 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result_relationship.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_type.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_view.py +1 -1
- eval_studio_client/api/test/test_v1_list_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +71 -0
- eval_studio_client/api/test/test_v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +71 -0
- eval_studio_client/api/test/test_v1_list_test_case_relationships_response.py +56 -0
- eval_studio_client/api/test/test_v1_list_test_cases_response.py +9 -2
- eval_studio_client/api/test/test_v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_workflows_response.py +95 -0
- eval_studio_client/api/test/test_v1_metric_score.py +52 -0
- eval_studio_client/api/test/test_v1_metric_scores.py +55 -0
- eval_studio_client/api/test/test_v1_model.py +1 -1
- eval_studio_client/api/test/test_v1_model_type.py +1 -1
- eval_studio_client/api/test/test_v1_operation.py +1 -1
- eval_studio_client/api/test/test_v1_operation_progress.py +1 -1
- eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +67 -0
- eval_studio_client/api/test/test_v1_perturb_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/test/test_v1_problem_and_action.py +1 -1
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +71 -0
- eval_studio_client/api/test/test_v1_prompt_library_item.py +68 -0
- eval_studio_client/api/test/test_v1_repeated_string.py +53 -0
- eval_studio_client/api/test/test_v1_reset_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_test.py +1 -1
- eval_studio_client/api/test/test_v1_test_case.py +9 -2
- eval_studio_client/api/test/test_v1_test_case_relationship.py +1 -1
- eval_studio_client/api/test/test_v1_test_cases_generator.py +1 -1
- eval_studio_client/api/test/test_v1_test_class.py +1 -1
- eval_studio_client/api/test/test_v1_test_class_type.py +1 -1
- eval_studio_client/api/test/test_v1_test_lab.py +1 -1
- eval_studio_client/api/test/test_v1_test_suite_evaluates.py +33 -0
- eval_studio_client/api/test/test_v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_update_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_update_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_who_am_i_response.py +1 -1
- eval_studio_client/api/test/test_v1_workflow.py +92 -0
- eval_studio_client/api/test/test_v1_workflow_edge.py +61 -0
- eval_studio_client/api/test/test_v1_workflow_edge_type.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node.py +81 -0
- eval_studio_client/api/test/test_v1_workflow_node_artifact.py +61 -0
- eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +64 -0
- eval_studio_client/api/test/test_v1_workflow_node_attributes.py +51 -0
- eval_studio_client/api/test/test_v1_workflow_node_status.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node_type.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node_view.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_type.py +33 -0
- eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
- eval_studio_client/api/test/test_workflow_edge_service_api.py +52 -0
- eval_studio_client/api/test/test_workflow_node_service_api.py +94 -0
- eval_studio_client/api/test/test_workflow_service_api.py +80 -0
- eval_studio_client/api/test/test_workflow_service_clone_workflow_request.py +55 -0
- eval_studio_client/client.py +7 -0
- eval_studio_client/dashboards.py +66 -18
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +5132 -1847
- eval_studio_client/leaderboards.py +125 -0
- eval_studio_client/models.py +3 -42
- eval_studio_client/test_labs.py +49 -21
- eval_studio_client/tests.py +323 -58
- eval_studio_client/utils.py +26 -0
- {eval_studio_client-1.0.0a1.dist-info → eval_studio_client-1.1.0a5.dist-info}/METADATA +2 -3
- eval_studio_client-1.1.0a5.dist-info/RECORD +720 -0
- {eval_studio_client-1.0.0a1.dist-info → eval_studio_client-1.1.0a5.dist-info}/WHEEL +1 -1
- eval_studio_client-1.0.0a1.dist-info/RECORD +0 -485
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# eval_studio_client.api.AdversarialInputsServiceApi
|
|
2
|
+
|
|
3
|
+
All URIs are relative to *http://localhost*
|
|
4
|
+
|
|
5
|
+
Method | HTTP request | Description
|
|
6
|
+
------------- | ------------- | -------------
|
|
7
|
+
[**adversarial_inputs_service_test_adversarial_inputs_robustness**](AdversarialInputsServiceApi.md#adversarial_inputs_service_test_adversarial_inputs_robustness) | **POST** /v1/{test}:testAdversarialInputsRobustness |
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# **adversarial_inputs_service_test_adversarial_inputs_robustness**
|
|
11
|
+
> V1Operation adversarial_inputs_service_test_adversarial_inputs_robustness(test, body)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
### Example
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
```python
|
|
19
|
+
import eval_studio_client.api
|
|
20
|
+
from eval_studio_client.api.models.adversarial_inputs_service_test_adversarial_inputs_robustness_request import AdversarialInputsServiceTestAdversarialInputsRobustnessRequest
|
|
21
|
+
from eval_studio_client.api.models.v1_operation import V1Operation
|
|
22
|
+
from eval_studio_client.api.rest import ApiException
|
|
23
|
+
from pprint import pprint
|
|
24
|
+
|
|
25
|
+
# Defining the host is optional and defaults to http://localhost
|
|
26
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
27
|
+
configuration = eval_studio_client.api.Configuration(
|
|
28
|
+
host = "http://localhost"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Enter a context with an instance of the API client
|
|
33
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
34
|
+
# Create an instance of the API class
|
|
35
|
+
api_instance = eval_studio_client.api.AdversarialInputsServiceApi(api_client)
|
|
36
|
+
test = 'test_example' # str | Required. The Test to which the adversarial inputs TestCases will be added.
|
|
37
|
+
body = eval_studio_client.api.AdversarialInputsServiceTestAdversarialInputsRobustnessRequest() # AdversarialInputsServiceTestAdversarialInputsRobustnessRequest |
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
api_response = api_instance.adversarial_inputs_service_test_adversarial_inputs_robustness(test, body)
|
|
41
|
+
print("The response of AdversarialInputsServiceApi->adversarial_inputs_service_test_adversarial_inputs_robustness:\n")
|
|
42
|
+
pprint(api_response)
|
|
43
|
+
except Exception as e:
|
|
44
|
+
print("Exception when calling AdversarialInputsServiceApi->adversarial_inputs_service_test_adversarial_inputs_robustness: %s\n" % e)
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
### Parameters
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
Name | Type | Description | Notes
|
|
53
|
+
------------- | ------------- | ------------- | -------------
|
|
54
|
+
**test** | **str**| Required. The Test to which the adversarial inputs TestCases will be added. |
|
|
55
|
+
**body** | [**AdversarialInputsServiceTestAdversarialInputsRobustnessRequest**](AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md)| |
|
|
56
|
+
|
|
57
|
+
### Return type
|
|
58
|
+
|
|
59
|
+
[**V1Operation**](V1Operation.md)
|
|
60
|
+
|
|
61
|
+
### Authorization
|
|
62
|
+
|
|
63
|
+
No authorization required
|
|
64
|
+
|
|
65
|
+
### HTTP request headers
|
|
66
|
+
|
|
67
|
+
- **Content-Type**: application/json
|
|
68
|
+
- **Accept**: application/json
|
|
69
|
+
|
|
70
|
+
### HTTP response details
|
|
71
|
+
|
|
72
|
+
| Status code | Description | Response headers |
|
|
73
|
+
|-------------|-------------|------------------|
|
|
74
|
+
**200** | A successful response. | - |
|
|
75
|
+
**0** | An unexpected error response. | - |
|
|
76
|
+
|
|
77
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
78
|
+
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# AdversarialInputsServiceTestAdversarialInputsRobustnessRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**operation** | **str** | Required. The Operation processing adversarial inputs robustness testing. | [optional]
|
|
9
|
+
**generator_input_types** | [**List[V1TestCasesGenerator]**](V1TestCasesGenerator.md) | Optional. The list of adversarial input types to generate. | [optional]
|
|
10
|
+
**generator_document_urls** | **List[str]** | Required. The document URLs which were used to generate the baseline TestCases. | [optional]
|
|
11
|
+
**generator_model** | [**V1Model**](V1Model.md) | | [optional]
|
|
12
|
+
**generator_base_llm_model** | **str** | Required. Base LLM model to use for generation of adversarial the prompts. | [optional]
|
|
13
|
+
**generator_count** | **int** | Required. The number of adversarial TestCases to generate. | [optional]
|
|
14
|
+
**generator_topics** | **List[str]** | Optional. Topics to generate questions for. If not specified, use document summarization as topic generation. | [optional]
|
|
15
|
+
**generator_chunks** | **List[str]** | Optional. The list of chunks to use for generation. If set, the Documents assigned to the Test and h2ogpte_collection_id are ignored. | [optional]
|
|
16
|
+
**generator_h2ogpte_collection_id** | **str** | Optional. ID of the h2oGPTe collection to use. If provided, documents referenced by Test and any specified chunks are ignored. This field is required if Test does not reference any documents and no chunks are provided. If this field is left empty, a temporary collection will be created. | [optional]
|
|
17
|
+
**evaluator_identifiers** | **List[str]** | Required. Evaluator identifiers to use for the model evaluation using the adversarial inputs. | [optional]
|
|
18
|
+
**evaluators_parameters** | **Dict[str, str]** | Optional. Additional evaluators configuration, for all the evaluators used in the evaluation. Key is the evaluator identifier, and the value is a JSON string containing the configuration dictionary. | [optional]
|
|
19
|
+
**model** | [**V1Model**](V1Model.md) | | [optional]
|
|
20
|
+
**base_llm_model** | **str** | Required. Base LLM model to be evaluated using the adversarial inputs. | [optional]
|
|
21
|
+
**model_parameters** | **str** | Optional. Parameters overrides for the Model host in JSON format. | [optional]
|
|
22
|
+
**default_h2ogpte_model** | [**V1Model**](V1Model.md) | | [optional]
|
|
23
|
+
**baseline_eval** | **str** | Required. Baseline evaluation name. | [optional]
|
|
24
|
+
**baseline_metrics** | [**Dict[str, V1MetricScores]**](V1MetricScores.md) | Required. Map of baseline metrics from the evaluator to the metric scores for the evaluator. | [optional]
|
|
25
|
+
|
|
26
|
+
## Example
|
|
27
|
+
|
|
28
|
+
```python
|
|
29
|
+
from eval_studio_client.api.models.adversarial_inputs_service_test_adversarial_inputs_robustness_request import AdversarialInputsServiceTestAdversarialInputsRobustnessRequest
|
|
30
|
+
|
|
31
|
+
# TODO update the JSON string below
|
|
32
|
+
json = "{}"
|
|
33
|
+
# create an instance of AdversarialInputsServiceTestAdversarialInputsRobustnessRequest from a JSON string
|
|
34
|
+
adversarial_inputs_service_test_adversarial_inputs_robustness_request_instance = AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.from_json(json)
|
|
35
|
+
# print the JSON string representation of the object
|
|
36
|
+
print(AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.to_json())
|
|
37
|
+
|
|
38
|
+
# convert the object into a dict
|
|
39
|
+
adversarial_inputs_service_test_adversarial_inputs_robustness_request_dict = adversarial_inputs_service_test_adversarial_inputs_robustness_request_instance.to_dict()
|
|
40
|
+
# create an instance of AdversarialInputsServiceTestAdversarialInputsRobustnessRequest from a dict
|
|
41
|
+
adversarial_inputs_service_test_adversarial_inputs_robustness_request_from_dict = AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.from_dict(adversarial_inputs_service_test_adversarial_inputs_robustness_request_dict)
|
|
42
|
+
```
|
|
43
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
44
|
+
|
|
45
|
+
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# eval_studio_client.api.GeneratedQuestionsValidationServiceApi
|
|
2
|
+
|
|
3
|
+
All URIs are relative to *http://localhost*
|
|
4
|
+
|
|
5
|
+
Method | HTTP request | Description
|
|
6
|
+
------------- | ------------- | -------------
|
|
7
|
+
[**generated_questions_validation_service_validate_generated_questions**](GeneratedQuestionsValidationServiceApi.md#generated_questions_validation_service_validate_generated_questions) | **POST** /v1/{name}:validateGeneratedQuestions |
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# **generated_questions_validation_service_validate_generated_questions**
|
|
11
|
+
> V1Operation generated_questions_validation_service_validate_generated_questions(name, body)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
### Example
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
```python
|
|
19
|
+
import eval_studio_client.api
|
|
20
|
+
from eval_studio_client.api.models.generated_questions_validation_service_validate_generated_questions_request import GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest
|
|
21
|
+
from eval_studio_client.api.models.v1_operation import V1Operation
|
|
22
|
+
from eval_studio_client.api.rest import ApiException
|
|
23
|
+
from pprint import pprint
|
|
24
|
+
|
|
25
|
+
# Defining the host is optional and defaults to http://localhost
|
|
26
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
27
|
+
configuration = eval_studio_client.api.Configuration(
|
|
28
|
+
host = "http://localhost"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Enter a context with an instance of the API client
|
|
33
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
34
|
+
# Create an instance of the API class
|
|
35
|
+
api_instance = eval_studio_client.api.GeneratedQuestionsValidationServiceApi(api_client)
|
|
36
|
+
name = 'name_example' # str | Required. The Test for which to generate TestCases.
|
|
37
|
+
body = eval_studio_client.api.GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest() # GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest |
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
api_response = api_instance.generated_questions_validation_service_validate_generated_questions(name, body)
|
|
41
|
+
print("The response of GeneratedQuestionsValidationServiceApi->generated_questions_validation_service_validate_generated_questions:\n")
|
|
42
|
+
pprint(api_response)
|
|
43
|
+
except Exception as e:
|
|
44
|
+
print("Exception when calling GeneratedQuestionsValidationServiceApi->generated_questions_validation_service_validate_generated_questions: %s\n" % e)
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
### Parameters
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
Name | Type | Description | Notes
|
|
53
|
+
------------- | ------------- | ------------- | -------------
|
|
54
|
+
**name** | **str**| Required. The Test for which to generate TestCases. |
|
|
55
|
+
**body** | [**GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest**](GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.md)| |
|
|
56
|
+
|
|
57
|
+
### Return type
|
|
58
|
+
|
|
59
|
+
[**V1Operation**](V1Operation.md)
|
|
60
|
+
|
|
61
|
+
### Authorization
|
|
62
|
+
|
|
63
|
+
No authorization required
|
|
64
|
+
|
|
65
|
+
### HTTP request headers
|
|
66
|
+
|
|
67
|
+
- **Content-Type**: application/json
|
|
68
|
+
- **Accept**: application/json
|
|
69
|
+
|
|
70
|
+
### HTTP response details
|
|
71
|
+
|
|
72
|
+
| Status code | Description | Response headers |
|
|
73
|
+
|-------------|-------------|------------------|
|
|
74
|
+
**200** | A successful response. | - |
|
|
75
|
+
**0** | An unexpected error response. | - |
|
|
76
|
+
|
|
77
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
78
|
+
|
eval_studio_client/api/docs/GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.md
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**operation** | **str** | Required. The Operation processing this question validation process. | [optional]
|
|
9
|
+
**test_cases** | [**List[V1GeneratedTestCase]**](V1GeneratedTestCase.md) | Required. Generated Test Cases, i.e., Test cases with context that was used for their generation. | [optional]
|
|
10
|
+
|
|
11
|
+
## Example
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from eval_studio_client.api.models.generated_questions_validation_service_validate_generated_questions_request import GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest
|
|
15
|
+
|
|
16
|
+
# TODO update the JSON string below
|
|
17
|
+
json = "{}"
|
|
18
|
+
# create an instance of GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest from a JSON string
|
|
19
|
+
generated_questions_validation_service_validate_generated_questions_request_instance = GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.from_json(json)
|
|
20
|
+
# print the JSON string representation of the object
|
|
21
|
+
print(GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.to_json())
|
|
22
|
+
|
|
23
|
+
# convert the object into a dict
|
|
24
|
+
generated_questions_validation_service_validate_generated_questions_request_dict = generated_questions_validation_service_validate_generated_questions_request_instance.to_dict()
|
|
25
|
+
# create an instance of GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest from a dict
|
|
26
|
+
generated_questions_validation_service_validate_generated_questions_request_from_dict = GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.from_dict(generated_questions_validation_service_validate_generated_questions_request_dict)
|
|
27
|
+
```
|
|
28
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
29
|
+
|
|
30
|
+
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# eval_studio_client.api.HumanCalibrationServiceApi
|
|
2
|
+
|
|
3
|
+
All URIs are relative to *http://localhost*
|
|
4
|
+
|
|
5
|
+
Method | HTTP request | Description
|
|
6
|
+
------------- | ------------- | -------------
|
|
7
|
+
[**human_calibration_service_estimate_threshold**](HumanCalibrationServiceApi.md#human_calibration_service_estimate_threshold) | **POST** /v1:estimateThreshold | EstimateThreshold runs a threshold estimation process based on human labeling of randomly sampled test-cases.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# **human_calibration_service_estimate_threshold**
|
|
11
|
+
> V1Operation human_calibration_service_estimate_threshold(body)
|
|
12
|
+
|
|
13
|
+
EstimateThreshold runs a threshold estimation process based on human labeling of randomly sampled test-cases.
|
|
14
|
+
|
|
15
|
+
### Example
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
```python
|
|
19
|
+
import eval_studio_client.api
|
|
20
|
+
from eval_studio_client.api.models.v1_estimate_threshold_request import V1EstimateThresholdRequest
|
|
21
|
+
from eval_studio_client.api.models.v1_operation import V1Operation
|
|
22
|
+
from eval_studio_client.api.rest import ApiException
|
|
23
|
+
from pprint import pprint
|
|
24
|
+
|
|
25
|
+
# Defining the host is optional and defaults to http://localhost
|
|
26
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
27
|
+
configuration = eval_studio_client.api.Configuration(
|
|
28
|
+
host = "http://localhost"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# Enter a context with an instance of the API client
|
|
33
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
34
|
+
# Create an instance of the API class
|
|
35
|
+
api_instance = eval_studio_client.api.HumanCalibrationServiceApi(api_client)
|
|
36
|
+
body = eval_studio_client.api.V1EstimateThresholdRequest() # V1EstimateThresholdRequest |
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
# EstimateThreshold runs a threshold estimation process based on human labeling of randomly sampled test-cases.
|
|
40
|
+
api_response = api_instance.human_calibration_service_estimate_threshold(body)
|
|
41
|
+
print("The response of HumanCalibrationServiceApi->human_calibration_service_estimate_threshold:\n")
|
|
42
|
+
pprint(api_response)
|
|
43
|
+
except Exception as e:
|
|
44
|
+
print("Exception when calling HumanCalibrationServiceApi->human_calibration_service_estimate_threshold: %s\n" % e)
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
### Parameters
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
Name | Type | Description | Notes
|
|
53
|
+
------------- | ------------- | ------------- | -------------
|
|
54
|
+
**body** | [**V1EstimateThresholdRequest**](V1EstimateThresholdRequest.md)| |
|
|
55
|
+
|
|
56
|
+
### Return type
|
|
57
|
+
|
|
58
|
+
[**V1Operation**](V1Operation.md)
|
|
59
|
+
|
|
60
|
+
### Authorization
|
|
61
|
+
|
|
62
|
+
No authorization required
|
|
63
|
+
|
|
64
|
+
### HTTP request headers
|
|
65
|
+
|
|
66
|
+
- **Content-Type**: application/json
|
|
67
|
+
- **Accept**: application/json
|
|
68
|
+
|
|
69
|
+
### HTTP response details
|
|
70
|
+
|
|
71
|
+
| Status code | Description | Response headers |
|
|
72
|
+
|-------------|-------------|------------------|
|
|
73
|
+
**200** | A successful response. | - |
|
|
74
|
+
**0** | An unexpected error response. | - |
|
|
75
|
+
|
|
76
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
77
|
+
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# eval_studio_client.api.LeaderboardReportServiceApi
|
|
2
|
+
|
|
3
|
+
All URIs are relative to *http://localhost*
|
|
4
|
+
|
|
5
|
+
Method | HTTP request | Description
|
|
6
|
+
------------- | ------------- | -------------
|
|
7
|
+
[**leaderboard_report_service_get_leaderboard_report**](LeaderboardReportServiceApi.md#leaderboard_report_service_get_leaderboard_report) | **GET** /v1/{name_3} |
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# **leaderboard_report_service_get_leaderboard_report**
|
|
11
|
+
> V1GetLeaderboardReportResponse leaderboard_report_service_get_leaderboard_report(name_3)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
### Example
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
```python
|
|
19
|
+
import eval_studio_client.api
|
|
20
|
+
from eval_studio_client.api.models.v1_get_leaderboard_report_response import V1GetLeaderboardReportResponse
|
|
21
|
+
from eval_studio_client.api.rest import ApiException
|
|
22
|
+
from pprint import pprint
|
|
23
|
+
|
|
24
|
+
# Defining the host is optional and defaults to http://localhost
|
|
25
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
26
|
+
configuration = eval_studio_client.api.Configuration(
|
|
27
|
+
host = "http://localhost"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# Enter a context with an instance of the API client
|
|
32
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
33
|
+
# Create an instance of the API class
|
|
34
|
+
api_instance = eval_studio_client.api.LeaderboardReportServiceApi(api_client)
|
|
35
|
+
name_3 = 'name_3_example' # str | Required. The name of the Leaderboard to retrieve.
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
api_response = api_instance.leaderboard_report_service_get_leaderboard_report(name_3)
|
|
39
|
+
print("The response of LeaderboardReportServiceApi->leaderboard_report_service_get_leaderboard_report:\n")
|
|
40
|
+
pprint(api_response)
|
|
41
|
+
except Exception as e:
|
|
42
|
+
print("Exception when calling LeaderboardReportServiceApi->leaderboard_report_service_get_leaderboard_report: %s\n" % e)
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
### Parameters
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
Name | Type | Description | Notes
|
|
51
|
+
------------- | ------------- | ------------- | -------------
|
|
52
|
+
**name_3** | **str**| Required. The name of the Leaderboard to retrieve. |
|
|
53
|
+
|
|
54
|
+
### Return type
|
|
55
|
+
|
|
56
|
+
[**V1GetLeaderboardReportResponse**](V1GetLeaderboardReportResponse.md)
|
|
57
|
+
|
|
58
|
+
### Authorization
|
|
59
|
+
|
|
60
|
+
No authorization required
|
|
61
|
+
|
|
62
|
+
### HTTP request headers
|
|
63
|
+
|
|
64
|
+
- **Content-Type**: Not defined
|
|
65
|
+
- **Accept**: application/json
|
|
66
|
+
|
|
67
|
+
### HTTP response details
|
|
68
|
+
|
|
69
|
+
| Status code | Description | Response headers |
|
|
70
|
+
|-------------|-------------|------------------|
|
|
71
|
+
**200** | A successful response. | - |
|
|
72
|
+
**0** | An unexpected error response. | - |
|
|
73
|
+
|
|
74
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
75
|
+
|
|
@@ -11,7 +11,7 @@ Method | HTTP request | Description
|
|
|
11
11
|
[**leaderboard_service_create_leaderboard**](LeaderboardServiceApi.md#leaderboard_service_create_leaderboard) | **POST** /v1/leaderboards |
|
|
12
12
|
[**leaderboard_service_create_leaderboard_without_cache**](LeaderboardServiceApi.md#leaderboard_service_create_leaderboard_without_cache) | **POST** /v1/leaderboards:withoutCache |
|
|
13
13
|
[**leaderboard_service_delete_leaderboard**](LeaderboardServiceApi.md#leaderboard_service_delete_leaderboard) | **DELETE** /v1/{name_3} |
|
|
14
|
-
[**leaderboard_service_get_leaderboard**](LeaderboardServiceApi.md#leaderboard_service_get_leaderboard) | **GET** /v1/{
|
|
14
|
+
[**leaderboard_service_get_leaderboard**](LeaderboardServiceApi.md#leaderboard_service_get_leaderboard) | **GET** /v1/{name_4} |
|
|
15
15
|
[**leaderboard_service_import_leaderboard**](LeaderboardServiceApi.md#leaderboard_service_import_leaderboard) | **POST** /v1/leaderboards:import |
|
|
16
16
|
[**leaderboard_service_list_leaderboards**](LeaderboardServiceApi.md#leaderboard_service_list_leaderboards) | **GET** /v1/leaderboards |
|
|
17
17
|
[**leaderboard_service_list_most_recent_leaderboards**](LeaderboardServiceApi.md#leaderboard_service_list_most_recent_leaderboards) | **GET** /v1/leaderboards:mostRecent |
|
|
@@ -488,7 +488,7 @@ No authorization required
|
|
|
488
488
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
489
489
|
|
|
490
490
|
# **leaderboard_service_get_leaderboard**
|
|
491
|
-
> V1GetLeaderboardResponse leaderboard_service_get_leaderboard(
|
|
491
|
+
> V1GetLeaderboardResponse leaderboard_service_get_leaderboard(name_4)
|
|
492
492
|
|
|
493
493
|
|
|
494
494
|
|
|
@@ -512,10 +512,10 @@ configuration = eval_studio_client.api.Configuration(
|
|
|
512
512
|
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
513
513
|
# Create an instance of the API class
|
|
514
514
|
api_instance = eval_studio_client.api.LeaderboardServiceApi(api_client)
|
|
515
|
-
|
|
515
|
+
name_4 = 'name_4_example' # str | Required. The name of the Leaderboard to retrieve.
|
|
516
516
|
|
|
517
517
|
try:
|
|
518
|
-
api_response = api_instance.leaderboard_service_get_leaderboard(
|
|
518
|
+
api_response = api_instance.leaderboard_service_get_leaderboard(name_4)
|
|
519
519
|
print("The response of LeaderboardServiceApi->leaderboard_service_get_leaderboard:\n")
|
|
520
520
|
pprint(api_response)
|
|
521
521
|
except Exception as e:
|
|
@@ -529,7 +529,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
|
529
529
|
|
|
530
530
|
Name | Type | Description | Notes
|
|
531
531
|
------------- | ------------- | ------------- | -------------
|
|
532
|
-
**
|
|
532
|
+
**name_4** | **str**| Required. The name of the Leaderboard to retrieve. |
|
|
533
533
|
|
|
534
534
|
### Return type
|
|
535
535
|
|
|
@@ -9,7 +9,7 @@ Method | HTTP request | Description
|
|
|
9
9
|
[**model_service_check_base_models**](ModelServiceApi.md#model_service_check_base_models) | **GET** /v1/models:check_base_models |
|
|
10
10
|
[**model_service_create_model**](ModelServiceApi.md#model_service_create_model) | **POST** /v1/models |
|
|
11
11
|
[**model_service_delete_model**](ModelServiceApi.md#model_service_delete_model) | **DELETE** /v1/{name_4} |
|
|
12
|
-
[**model_service_get_model**](ModelServiceApi.md#model_service_get_model) | **GET** /v1/{
|
|
12
|
+
[**model_service_get_model**](ModelServiceApi.md#model_service_get_model) | **GET** /v1/{name_5} |
|
|
13
13
|
[**model_service_list_base_models**](ModelServiceApi.md#model_service_list_base_models) | **GET** /v1/models:base_models |
|
|
14
14
|
[**model_service_list_model_collections**](ModelServiceApi.md#model_service_list_model_collections) | **GET** /v1/models:collections |
|
|
15
15
|
[**model_service_list_models**](ModelServiceApi.md#model_service_list_models) | **GET** /v1/models |
|
|
@@ -350,7 +350,7 @@ No authorization required
|
|
|
350
350
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
351
351
|
|
|
352
352
|
# **model_service_get_model**
|
|
353
|
-
> V1GetModelResponse model_service_get_model(
|
|
353
|
+
> V1GetModelResponse model_service_get_model(name_5)
|
|
354
354
|
|
|
355
355
|
|
|
356
356
|
|
|
@@ -374,10 +374,10 @@ configuration = eval_studio_client.api.Configuration(
|
|
|
374
374
|
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
375
375
|
# Create an instance of the API class
|
|
376
376
|
api_instance = eval_studio_client.api.ModelServiceApi(api_client)
|
|
377
|
-
|
|
377
|
+
name_5 = 'name_5_example' # str | Required. The name of the Model to retrieve.
|
|
378
378
|
|
|
379
379
|
try:
|
|
380
|
-
api_response = api_instance.model_service_get_model(
|
|
380
|
+
api_response = api_instance.model_service_get_model(name_5)
|
|
381
381
|
print("The response of ModelServiceApi->model_service_get_model:\n")
|
|
382
382
|
pprint(api_response)
|
|
383
383
|
except Exception as e:
|
|
@@ -391,7 +391,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
|
391
391
|
|
|
392
392
|
Name | Type | Description | Notes
|
|
393
393
|
------------- | ------------- | ------------- | -------------
|
|
394
|
-
**
|
|
394
|
+
**name_5** | **str**| Required. The name of the Model to retrieve. |
|
|
395
395
|
|
|
396
396
|
### Return type
|
|
397
397
|
|
|
@@ -4,13 +4,80 @@ All URIs are relative to *http://localhost*
|
|
|
4
4
|
|
|
5
5
|
Method | HTTP request | Description
|
|
6
6
|
------------- | ------------- | -------------
|
|
7
|
+
[**operation_service_abort_operation**](OperationServiceApi.md#operation_service_abort_operation) | **POST** /v1/{name}:abort |
|
|
7
8
|
[**operation_service_batch_get_operations**](OperationServiceApi.md#operation_service_batch_get_operations) | **GET** /v1/operations:batchGet |
|
|
8
9
|
[**operation_service_finalize_operation**](OperationServiceApi.md#operation_service_finalize_operation) | **PATCH** /v1/{operation.name}:finalize |
|
|
9
|
-
[**operation_service_get_operation**](OperationServiceApi.md#operation_service_get_operation) | **GET** /v1/{
|
|
10
|
+
[**operation_service_get_operation**](OperationServiceApi.md#operation_service_get_operation) | **GET** /v1/{name_6} |
|
|
10
11
|
[**operation_service_list_operations**](OperationServiceApi.md#operation_service_list_operations) | **GET** /v1/operations |
|
|
11
12
|
[**operation_service_update_operation**](OperationServiceApi.md#operation_service_update_operation) | **PATCH** /v1/{operation.name} |
|
|
12
13
|
|
|
13
14
|
|
|
15
|
+
# **operation_service_abort_operation**
|
|
16
|
+
> V1AbortOperationResponse operation_service_abort_operation(name)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
### Example
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
import eval_studio_client.api
|
|
25
|
+
from eval_studio_client.api.models.v1_abort_operation_response import V1AbortOperationResponse
|
|
26
|
+
from eval_studio_client.api.rest import ApiException
|
|
27
|
+
from pprint import pprint
|
|
28
|
+
|
|
29
|
+
# Defining the host is optional and defaults to http://localhost
|
|
30
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
31
|
+
configuration = eval_studio_client.api.Configuration(
|
|
32
|
+
host = "http://localhost"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# Enter a context with an instance of the API client
|
|
37
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
38
|
+
# Create an instance of the API class
|
|
39
|
+
api_instance = eval_studio_client.api.OperationServiceApi(api_client)
|
|
40
|
+
name = 'name_example' # str | Required. The name of the Operation to abort.
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
api_response = api_instance.operation_service_abort_operation(name)
|
|
44
|
+
print("The response of OperationServiceApi->operation_service_abort_operation:\n")
|
|
45
|
+
pprint(api_response)
|
|
46
|
+
except Exception as e:
|
|
47
|
+
print("Exception when calling OperationServiceApi->operation_service_abort_operation: %s\n" % e)
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
### Parameters
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
Name | Type | Description | Notes
|
|
56
|
+
------------- | ------------- | ------------- | -------------
|
|
57
|
+
**name** | **str**| Required. The name of the Operation to abort. |
|
|
58
|
+
|
|
59
|
+
### Return type
|
|
60
|
+
|
|
61
|
+
[**V1AbortOperationResponse**](V1AbortOperationResponse.md)
|
|
62
|
+
|
|
63
|
+
### Authorization
|
|
64
|
+
|
|
65
|
+
No authorization required
|
|
66
|
+
|
|
67
|
+
### HTTP request headers
|
|
68
|
+
|
|
69
|
+
- **Content-Type**: Not defined
|
|
70
|
+
- **Accept**: application/json
|
|
71
|
+
|
|
72
|
+
### HTTP response details
|
|
73
|
+
|
|
74
|
+
| Status code | Description | Response headers |
|
|
75
|
+
|-------------|-------------|------------------|
|
|
76
|
+
**200** | A successful response. | - |
|
|
77
|
+
**0** | An unexpected error response. | - |
|
|
78
|
+
|
|
79
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
80
|
+
|
|
14
81
|
# **operation_service_batch_get_operations**
|
|
15
82
|
> V1BatchGetOperationsResponse operation_service_batch_get_operations(names=names)
|
|
16
83
|
|
|
@@ -147,7 +214,7 @@ No authorization required
|
|
|
147
214
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
148
215
|
|
|
149
216
|
# **operation_service_get_operation**
|
|
150
|
-
> V1GetOperationResponse operation_service_get_operation(
|
|
217
|
+
> V1GetOperationResponse operation_service_get_operation(name_6)
|
|
151
218
|
|
|
152
219
|
|
|
153
220
|
|
|
@@ -171,10 +238,10 @@ configuration = eval_studio_client.api.Configuration(
|
|
|
171
238
|
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
172
239
|
# Create an instance of the API class
|
|
173
240
|
api_instance = eval_studio_client.api.OperationServiceApi(api_client)
|
|
174
|
-
|
|
241
|
+
name_6 = 'name_6_example' # str | Required. The name of the Operation to retrieve.
|
|
175
242
|
|
|
176
243
|
try:
|
|
177
|
-
api_response = api_instance.operation_service_get_operation(
|
|
244
|
+
api_response = api_instance.operation_service_get_operation(name_6)
|
|
178
245
|
print("The response of OperationServiceApi->operation_service_get_operation:\n")
|
|
179
246
|
pprint(api_response)
|
|
180
247
|
except Exception as e:
|
|
@@ -188,7 +255,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
|
188
255
|
|
|
189
256
|
Name | Type | Description | Notes
|
|
190
257
|
------------- | ------------- | ------------- | -------------
|
|
191
|
-
**
|
|
258
|
+
**name_6** | **str**| Required. The name of the Operation to retrieve. |
|
|
192
259
|
|
|
193
260
|
### Return type
|
|
194
261
|
|
|
@@ -8,6 +8,7 @@ Name | Type | Description | Notes
|
|
|
8
8
|
**perturbator_configurations** | [**List[V1PerturbatorConfiguration]**](V1PerturbatorConfiguration.md) | Required. PerturbatorConfiguration to apply to the parent Test. | [optional]
|
|
9
9
|
**test_cases** | [**List[V1TestCase]**](V1TestCase.md) | Required. List of test cases to perturbate. These are the test cases from the parent test. TODO: breaks https://google.aip.dev/144 | [optional]
|
|
10
10
|
**test_case_relationships** | [**List[V1TestCaseRelationship]**](V1TestCaseRelationship.md) | Optional. List of relationships between test cases. | [optional]
|
|
11
|
+
**default_h2ogpte_model** | [**V1Model**](V1Model.md) | | [optional]
|
|
11
12
|
|
|
12
13
|
## Example
|
|
13
14
|
|