eval-studio-client 1.0.3a1__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +83 -1
- eval_studio_client/api/api/__init__.py +8 -0
- eval_studio_client/api/api/adversarial_inputs_service_api.py +321 -0
- eval_studio_client/api/api/dashboard_service_api.py +18 -1
- eval_studio_client/api/api/document_service_api.py +1 -1
- eval_studio_client/api/api/evaluation_service_api.py +1 -1
- eval_studio_client/api/api/evaluator_service_api.py +1 -1
- eval_studio_client/api/api/human_calibration_service_api.py +304 -0
- eval_studio_client/api/api/info_service_api.py +1 -1
- eval_studio_client/api/api/leaderboard_report_service_api.py +292 -0
- eval_studio_client/api/api/leaderboard_service_api.py +17 -17
- eval_studio_client/api/api/model_service_api.py +17 -17
- eval_studio_client/api/api/operation_progress_service_api.py +1 -1
- eval_studio_client/api/api/operation_service_api.py +272 -17
- eval_studio_client/api/api/perturbation_service_api.py +1 -1
- eval_studio_client/api/api/perturbator_service_api.py +285 -18
- eval_studio_client/api/api/prompt_generation_service_api.py +1 -1
- eval_studio_client/api/api/prompt_library_service_api.py +669 -0
- eval_studio_client/api/api/test_case_relationship_service_api.py +292 -0
- eval_studio_client/api/api/test_case_service_api.py +17 -17
- eval_studio_client/api/api/test_class_service_api.py +17 -17
- eval_studio_client/api/api/test_lab_service_api.py +1 -1
- eval_studio_client/api/api/test_service_api.py +1272 -102
- eval_studio_client/api/api/who_am_i_service_api.py +1 -1
- eval_studio_client/api/api/workflow_edge_service_api.py +835 -0
- eval_studio_client/api/api/workflow_node_service_api.py +2431 -0
- eval_studio_client/api/api/workflow_service_api.py +2403 -0
- eval_studio_client/api/api_client.py +1 -1
- eval_studio_client/api/configuration.py +1 -1
- eval_studio_client/api/docs/AdversarialInputsServiceApi.md +78 -0
- eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +45 -0
- eval_studio_client/api/docs/DashboardServiceApi.md +4 -2
- eval_studio_client/api/docs/HumanCalibrationServiceApi.md +77 -0
- eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +5 -5
- eval_studio_client/api/docs/ModelServiceApi.md +5 -5
- eval_studio_client/api/docs/OperationServiceApi.md +72 -5
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +1 -0
- eval_studio_client/api/docs/PerturbatorServiceApi.md +38 -8
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +4 -2
- eval_studio_client/api/docs/PromptLibraryServiceApi.md +155 -0
- eval_studio_client/api/docs/ProtobufNullValue.md +12 -0
- eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +1 -0
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +3 -0
- eval_studio_client/api/docs/RequiredTheTestToUpdate.md +1 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +47 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflowNode.md +44 -0
- eval_studio_client/api/docs/TestCaseRelationshipServiceApi.md +75 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +5 -5
- eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
- eval_studio_client/api/docs/TestServiceApi.md +293 -9
- eval_studio_client/api/docs/TestServiceCloneTestRequest.md +30 -0
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +3 -1
- eval_studio_client/api/docs/TestServiceImportTestCasesFromLibraryRequest.md +32 -0
- eval_studio_client/api/docs/TestServiceListTestCaseLibraryItemsRequest.md +35 -0
- eval_studio_client/api/docs/TestServicePerturbTestInPlaceRequest.md +30 -0
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +1 -0
- eval_studio_client/api/docs/V1AbortOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetWorkflowEdgesResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetWorkflowNodesResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneTestResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Context.md +37 -0
- eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1CreateWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Dashboard.md +1 -0
- eval_studio_client/api/docs/V1DashboardType.md +12 -0
- eval_studio_client/api/docs/V1DeleteWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1DependencyList.md +30 -0
- eval_studio_client/api/docs/V1EstimateThresholdRequest.md +33 -0
- eval_studio_client/api/docs/V1Evaluator.md +2 -0
- eval_studio_client/api/docs/V1GetGuardrailsConfigurationResponse.md +29 -0
- eval_studio_client/api/docs/V1GetLeaderboardReportResponse.md +29 -0
- eval_studio_client/api/docs/V1GetWorkflowNodePrerequisitesResponse.md +30 -0
- eval_studio_client/api/docs/V1GetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1GetWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1ImportEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1ImportTestCasesFromLibraryResponse.md +29 -0
- eval_studio_client/api/docs/V1ImportTestCasesRequest.md +33 -0
- eval_studio_client/api/docs/V1Info.md +3 -0
- eval_studio_client/api/docs/V1InitWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1LabeledTestCase.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReport.md +32 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputData.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluator.md +42 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluatorParameter.md +38 -0
- eval_studio_client/api/docs/V1LeaderboardReportExplanation.md +34 -0
- eval_studio_client/api/docs/V1LeaderboardReportMetricsMetaEntry.md +41 -0
- eval_studio_client/api/docs/V1LeaderboardReportModel.md +37 -0
- eval_studio_client/api/docs/V1LeaderboardReportResult.md +45 -0
- eval_studio_client/api/docs/V1LeaderboardReportResultRelationship.md +32 -0
- eval_studio_client/api/docs/V1ListPromptLibraryItemsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCaseLibraryItemsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCaseRelationshipsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListWorkflowDependenciesResponse.md +30 -0
- eval_studio_client/api/docs/V1ListWorkflowsResponse.md +29 -0
- eval_studio_client/api/docs/V1MetricScore.md +31 -0
- eval_studio_client/api/docs/V1MetricScores.md +29 -0
- eval_studio_client/api/docs/V1PerturbTestInPlaceResponse.md +29 -0
- eval_studio_client/api/docs/V1ProcessWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1PromptLibraryItem.md +42 -0
- eval_studio_client/api/docs/V1RepeatedContext.md +29 -0
- eval_studio_client/api/docs/V1RepeatedString.md +29 -0
- eval_studio_client/api/docs/V1ResetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1Test.md +1 -0
- eval_studio_client/api/docs/V1TestCase.md +3 -0
- eval_studio_client/api/docs/V1TestSuiteEvaluates.md +11 -0
- eval_studio_client/api/docs/V1TestType.md +12 -0
- eval_studio_client/api/docs/V1UpdateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Workflow.md +49 -0
- eval_studio_client/api/docs/V1WorkflowDependency.md +30 -0
- eval_studio_client/api/docs/V1WorkflowEdge.md +40 -0
- eval_studio_client/api/docs/V1WorkflowEdgeType.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNode.md +46 -0
- eval_studio_client/api/docs/V1WorkflowNodeArtifact.md +41 -0
- eval_studio_client/api/docs/V1WorkflowNodeArtifacts.md +29 -0
- eval_studio_client/api/docs/V1WorkflowNodeAttributes.md +30 -0
- eval_studio_client/api/docs/V1WorkflowNodeStatus.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNodeType.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNodeView.md +12 -0
- eval_studio_client/api/docs/V1WorkflowType.md +12 -0
- eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +215 -0
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +632 -0
- eval_studio_client/api/docs/WorkflowServiceApi.md +623 -0
- eval_studio_client/api/docs/WorkflowServiceCloneWorkflowRequest.md +33 -0
- eval_studio_client/api/exceptions.py +1 -1
- eval_studio_client/api/models/__init__.py +75 -1
- eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +143 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +9 -3
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +18 -6
- eval_studio_client/api/models/protobuf_any.py +1 -1
- eval_studio_client/api/models/protobuf_null_value.py +36 -0
- eval_studio_client/api/models/required_the_dashboard_to_update.py +6 -3
- eval_studio_client/api/models/required_the_document_to_update.py +1 -1
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_model_to_update.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_case_to_update.py +14 -3
- eval_studio_client/api/models/required_the_test_to_update.py +6 -3
- eval_studio_client/api/models/required_the_updated_workflow.py +160 -0
- eval_studio_client/api/models/required_the_updated_workflow_node.py +152 -0
- eval_studio_client/api/models/rpc_status.py +1 -1
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/models/test_service_clone_test_request.py +89 -0
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +16 -4
- eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +93 -0
- eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +99 -0
- eval_studio_client/api/models/test_service_perturb_test_in_place_request.py +97 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +5 -3
- eval_studio_client/api/models/v1_abort_operation_response.py +91 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_request.py +87 -0
- eval_studio_client/api/models/v1_batch_delete_workflows_response.py +95 -0
- eval_studio_client/api/models/v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +95 -0
- eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +95 -0
- eval_studio_client/api/models/v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/models/v1_check_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_clone_test_response.py +91 -0
- eval_studio_client/api/models/v1_clone_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_collection_info.py +1 -1
- eval_studio_client/api/models/v1_context.py +103 -0
- eval_studio_client/api/models/v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_document_response.py +1 -1
- eval_studio_client/api/models/v1_create_evaluation_request.py +8 -3
- eval_studio_client/api/models/v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/models/v1_create_model_response.py +1 -1
- eval_studio_client/api/models/v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_response.py +1 -1
- eval_studio_client/api/models/v1_create_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_dashboard.py +6 -3
- eval_studio_client/api/models/v1_dashboard_status.py +1 -1
- eval_studio_client/api/models/v1_dashboard_type.py +38 -0
- eval_studio_client/api/models/v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_document_response.py +1 -1
- eval_studio_client/api/models/v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_model_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_dependency_list.py +97 -0
- eval_studio_client/api/models/v1_document.py +1 -1
- eval_studio_client/api/models/v1_estimate_threshold_request.py +103 -0
- eval_studio_client/api/models/v1_evaluation_test.py +1 -1
- eval_studio_client/api/models/v1_evaluator.py +12 -4
- eval_studio_client/api/models/v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/models/v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/models/v1_evaluator_view.py +1 -1
- eval_studio_client/api/models/v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/models/v1_find_all_test_cases_by_id_response.py +1 -1
- eval_studio_client/api/models/v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_document_response.py +1 -1
- eval_studio_client/api/models/v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_get_guardrails_configuration_response.py +87 -0
- eval_studio_client/api/models/v1_get_info_response.py +1 -1
- eval_studio_client/api/models/v1_get_leaderboard_report_response.py +91 -0
- eval_studio_client/api/models/v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_model_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_response.py +1 -1
- eval_studio_client/api/models/v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_class_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +89 -0
- eval_studio_client/api/models/v1_get_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_get_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_import_evaluation_request.py +8 -3
- eval_studio_client/api/models/v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +91 -0
- eval_studio_client/api/models/v1_import_test_cases_request.py +95 -0
- eval_studio_client/api/models/v1_info.py +10 -4
- eval_studio_client/api/models/v1_init_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_insight.py +1 -1
- eval_studio_client/api/models/v1_labeled_test_case.py +91 -0
- eval_studio_client/api/models/v1_leaderboard.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_report.py +115 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_data.py +93 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +101 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator.py +155 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator_parameter.py +109 -0
- eval_studio_client/api/models/v1_leaderboard_report_explanation.py +103 -0
- eval_studio_client/api/models/v1_leaderboard_report_metrics_meta_entry.py +129 -0
- eval_studio_client/api/models/v1_leaderboard_report_model.py +113 -0
- eval_studio_client/api/models/v1_leaderboard_report_result.py +175 -0
- eval_studio_client/api/models/v1_leaderboard_report_result_relationship.py +97 -0
- eval_studio_client/api/models/v1_leaderboard_status.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_type.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_view.py +1 -1
- eval_studio_client/api/models/v1_list_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_documents_response.py +1 -1
- eval_studio_client/api/models/v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_operations_response.py +1 -1
- eval_studio_client/api/models/v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/models/v1_list_prompt_library_items_response.py +95 -0
- eval_studio_client/api/models/v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_library_items_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_case_relationships_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/models/v1_list_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_workflow_dependencies_response.py +105 -0
- eval_studio_client/api/models/v1_list_workflows_response.py +95 -0
- eval_studio_client/api/models/v1_metric_score.py +89 -0
- eval_studio_client/api/models/v1_metric_scores.py +95 -0
- eval_studio_client/api/models/v1_model.py +1 -1
- eval_studio_client/api/models/v1_model_type.py +1 -1
- eval_studio_client/api/models/v1_operation.py +1 -1
- eval_studio_client/api/models/v1_operation_progress.py +1 -1
- eval_studio_client/api/models/v1_perturb_test_in_place_response.py +91 -0
- eval_studio_client/api/models/v1_perturb_test_response.py +1 -1
- eval_studio_client/api/models/v1_perturbator.py +1 -1
- eval_studio_client/api/models/v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/models/v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/models/v1_problem_and_action.py +1 -1
- eval_studio_client/api/models/v1_process_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_prompt_library_item.py +129 -0
- eval_studio_client/api/models/v1_repeated_context.py +95 -0
- eval_studio_client/api/models/v1_repeated_string.py +87 -0
- eval_studio_client/api/models/v1_reset_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_test.py +6 -3
- eval_studio_client/api/models/v1_test_case.py +14 -3
- eval_studio_client/api/models/v1_test_case_relationship.py +1 -1
- eval_studio_client/api/models/v1_test_cases_generator.py +1 -1
- eval_studio_client/api/models/v1_test_class.py +1 -1
- eval_studio_client/api/models/v1_test_class_type.py +1 -1
- eval_studio_client/api/models/v1_test_lab.py +1 -1
- eval_studio_client/api/models/v1_test_suite_evaluates.py +39 -0
- eval_studio_client/api/models/v1_test_type.py +38 -0
- eval_studio_client/api/models/v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_document_response.py +1 -1
- eval_studio_client/api/models/v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_model_response.py +1 -1
- eval_studio_client/api/models/v1_update_operation_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_update_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_who_am_i_response.py +1 -1
- eval_studio_client/api/models/v1_workflow.py +164 -0
- eval_studio_client/api/models/v1_workflow_dependency.py +89 -0
- eval_studio_client/api/models/v1_workflow_edge.py +123 -0
- eval_studio_client/api/models/v1_workflow_edge_type.py +38 -0
- eval_studio_client/api/models/v1_workflow_node.py +156 -0
- eval_studio_client/api/models/v1_workflow_node_artifact.py +126 -0
- eval_studio_client/api/models/v1_workflow_node_artifacts.py +97 -0
- eval_studio_client/api/models/v1_workflow_node_attributes.py +87 -0
- eval_studio_client/api/models/v1_workflow_node_status.py +40 -0
- eval_studio_client/api/models/v1_workflow_node_type.py +44 -0
- eval_studio_client/api/models/v1_workflow_node_view.py +38 -0
- eval_studio_client/api/models/v1_workflow_type.py +37 -0
- eval_studio_client/api/models/workflow_service_clone_workflow_request.py +95 -0
- eval_studio_client/api/rest.py +1 -1
- eval_studio_client/api/test/test_adversarial_inputs_service_api.py +37 -0
- eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +128 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
- eval_studio_client/api/test/test_document_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
- eval_studio_client/api/test/test_human_calibration_service_api.py +38 -0
- eval_studio_client/api/test/test_info_service_api.py +1 -1
- eval_studio_client/api/test/test_leaderboard_report_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
- eval_studio_client/api/test/test_model_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_service_api.py +7 -1
- eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +25 -3
- eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +21 -5
- eval_studio_client/api/test/test_prompt_library_service_api.py +43 -0
- eval_studio_client/api/test/test_protobuf_any.py +1 -1
- eval_studio_client/api/test/test_protobuf_null_value.py +33 -0
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +3 -2
- eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +9 -2
- eval_studio_client/api/test/test_required_the_test_to_update.py +3 -2
- eval_studio_client/api/test/test_required_the_updated_workflow.py +92 -0
- eval_studio_client/api/test/test_required_the_updated_workflow_node.py +81 -0
- eval_studio_client/api/test/test_rpc_status.py +1 -1
- eval_studio_client/api/test/test_test_case_relationship_service_api.py +37 -0
- eval_studio_client/api/test/test_test_case_service_api.py +1 -1
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_test_class_service_api.py +1 -1
- eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
- eval_studio_client/api/test/test_test_service_api.py +25 -1
- eval_studio_client/api/test/test_test_service_clone_test_request.py +52 -0
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +17 -2
- eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +56 -0
- eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +63 -0
- eval_studio_client/api/test/test_test_service_perturb_test_in_place_request.py +59 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +5 -2
- eval_studio_client/api/test/test_v1_abort_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +3 -2
- eval_studio_client/api/test/test_v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +4 -2
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +9 -2
- eval_studio_client/api/test/test_v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +53 -0
- eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +95 -0
- eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +3 -2
- eval_studio_client/api/test/test_v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +64 -0
- eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +84 -0
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_check_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_clone_test_response.py +68 -0
- eval_studio_client/api/test/test_v1_clone_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_collection_info.py +1 -1
- eval_studio_client/api/test/test_v1_context.py +59 -0
- eval_studio_client/api/test/test_v1_create_dashboard_response.py +3 -2
- eval_studio_client/api/test/test_v1_create_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +25 -3
- eval_studio_client/api/test/test_v1_create_evaluator_response.py +4 -2
- eval_studio_client/api/test/test_v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_create_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_create_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_create_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_dashboard.py +3 -2
- eval_studio_client/api/test/test_v1_dashboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_dashboard_type.py +33 -0
- eval_studio_client/api/test/test_v1_delete_dashboard_response.py +3 -2
- eval_studio_client/api/test/test_v1_delete_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_evaluator_response.py +4 -2
- eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_delete_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_delete_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_delete_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_dependency_list.py +56 -0
- eval_studio_client/api/test/test_v1_document.py +1 -1
- eval_studio_client/api/test/test_v1_estimate_threshold_request.py +60 -0
- eval_studio_client/api/test/test_v1_evaluation_test.py +9 -2
- eval_studio_client/api/test/test_v1_evaluator.py +4 -2
- eval_studio_client/api/test/test_v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_view.py +1 -1
- eval_studio_client/api/test/test_v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +9 -2
- eval_studio_client/api/test/test_v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_dashboard_response.py +3 -2
- eval_studio_client/api/test/test_v1_get_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_evaluator_response.py +4 -2
- eval_studio_client/api/test/test_v1_get_guardrails_configuration_response.py +51 -0
- eval_studio_client/api/test/test_v1_get_info_response.py +7 -2
- eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +173 -0
- eval_studio_client/api/test/test_v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_get_test_class_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +56 -0
- eval_studio_client/api/test/test_v1_get_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_get_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_import_evaluation_request.py +17 -2
- eval_studio_client/api/test/test_v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +71 -0
- eval_studio_client/api/test/test_v1_import_test_cases_request.py +57 -0
- eval_studio_client/api/test/test_v1_info.py +7 -2
- eval_studio_client/api/test/test_v1_init_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_insight.py +1 -1
- eval_studio_client/api/test/test_v1_labeled_test_case.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_report.py +172 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_data.py +52 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +56 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator.py +114 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator_parameter.py +63 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_explanation.py +58 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_metrics_meta_entry.py +66 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_model.py +60 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result.py +92 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result_relationship.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_type.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_view.py +1 -1
- eval_studio_client/api/test/test_v1_list_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_dashboards_response.py +3 -2
- eval_studio_client/api/test/test_v1_list_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_evaluators_response.py +4 -2
- eval_studio_client/api/test/test_v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +3 -2
- eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_list_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +71 -0
- eval_studio_client/api/test/test_v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +71 -0
- eval_studio_client/api/test/test_v1_list_test_case_relationships_response.py +56 -0
- eval_studio_client/api/test/test_v1_list_test_cases_response.py +9 -2
- eval_studio_client/api/test/test_v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_list_workflow_dependencies_response.py +93 -0
- eval_studio_client/api/test/test_v1_list_workflows_response.py +95 -0
- eval_studio_client/api/test/test_v1_metric_score.py +52 -0
- eval_studio_client/api/test/test_v1_metric_scores.py +55 -0
- eval_studio_client/api/test/test_v1_model.py +1 -1
- eval_studio_client/api/test/test_v1_model_type.py +1 -1
- eval_studio_client/api/test/test_v1_operation.py +1 -1
- eval_studio_client/api/test/test_v1_operation_progress.py +1 -1
- eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +68 -0
- eval_studio_client/api/test/test_v1_perturb_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_perturbator.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/test/test_v1_problem_and_action.py +1 -1
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +71 -0
- eval_studio_client/api/test/test_v1_prompt_library_item.py +68 -0
- eval_studio_client/api/test/test_v1_repeated_context.py +62 -0
- eval_studio_client/api/test/test_v1_repeated_string.py +53 -0
- eval_studio_client/api/test/test_v1_reset_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_test.py +3 -2
- eval_studio_client/api/test/test_v1_test_case.py +9 -2
- eval_studio_client/api/test/test_v1_test_case_relationship.py +1 -1
- eval_studio_client/api/test/test_v1_test_cases_generator.py +1 -1
- eval_studio_client/api/test/test_v1_test_class.py +1 -1
- eval_studio_client/api/test/test_v1_test_class_type.py +1 -1
- eval_studio_client/api/test/test_v1_test_lab.py +1 -1
- eval_studio_client/api/test/test_v1_test_suite_evaluates.py +33 -0
- eval_studio_client/api/test/test_v1_test_type.py +33 -0
- eval_studio_client/api/test/test_v1_update_dashboard_response.py +3 -2
- eval_studio_client/api/test/test_v1_update_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_update_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_update_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_update_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_who_am_i_response.py +1 -1
- eval_studio_client/api/test/test_v1_workflow.py +93 -0
- eval_studio_client/api/test/test_v1_workflow_dependency.py +52 -0
- eval_studio_client/api/test/test_v1_workflow_edge.py +61 -0
- eval_studio_client/api/test/test_v1_workflow_edge_type.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node.py +82 -0
- eval_studio_client/api/test/test_v1_workflow_node_artifact.py +62 -0
- eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +65 -0
- eval_studio_client/api/test/test_v1_workflow_node_attributes.py +51 -0
- eval_studio_client/api/test/test_v1_workflow_node_status.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node_type.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node_view.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_type.py +33 -0
- eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
- eval_studio_client/api/test/test_workflow_edge_service_api.py +52 -0
- eval_studio_client/api/test/test_workflow_node_service_api.py +94 -0
- eval_studio_client/api/test/test_workflow_service_api.py +93 -0
- eval_studio_client/api/test/test_workflow_service_clone_workflow_request.py +55 -0
- eval_studio_client/client.py +7 -0
- eval_studio_client/dashboards.py +29 -0
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +5318 -1884
- eval_studio_client/leaderboards.py +123 -0
- eval_studio_client/models.py +3 -42
- eval_studio_client/test_labs.py +49 -21
- eval_studio_client/tests.py +290 -8
- {eval_studio_client-1.0.3a1.dist-info → eval_studio_client-1.1.0.dist-info}/METADATA +1 -2
- eval_studio_client-1.1.0.dist-info/RECORD +732 -0
- eval_studio_client-1.0.3a1.dist-info/RECORD +0 -486
- {eval_studio_client-1.0.3a1.dist-info → eval_studio_client-1.1.0.dist-info}/WHEEL +0 -0
|
@@ -7,13 +7,17 @@ Method | HTTP request | Description
|
|
|
7
7
|
[**test_service_batch_delete_tests**](TestServiceApi.md#test_service_batch_delete_tests) | **POST** /v1/tests:batchDelete |
|
|
8
8
|
[**test_service_batch_get_tests**](TestServiceApi.md#test_service_batch_get_tests) | **GET** /v1/tests:batchGet |
|
|
9
9
|
[**test_service_batch_import_tests**](TestServiceApi.md#test_service_batch_import_tests) | **POST** /v1/tests:batchImport |
|
|
10
|
+
[**test_service_clone_test**](TestServiceApi.md#test_service_clone_test) | **POST** /v1/{name}:clone |
|
|
10
11
|
[**test_service_create_test**](TestServiceApi.md#test_service_create_test) | **POST** /v1/tests |
|
|
11
12
|
[**test_service_delete_test**](TestServiceApi.md#test_service_delete_test) | **DELETE** /v1/{name_6} |
|
|
12
13
|
[**test_service_generate_test_cases**](TestServiceApi.md#test_service_generate_test_cases) | **POST** /v1/{name}:generateTestCases |
|
|
13
|
-
[**test_service_get_test**](TestServiceApi.md#test_service_get_test) | **GET** /v1/{
|
|
14
|
+
[**test_service_get_test**](TestServiceApi.md#test_service_get_test) | **GET** /v1/{name_10} |
|
|
15
|
+
[**test_service_import_test_cases_from_library**](TestServiceApi.md#test_service_import_test_cases_from_library) | **POST** /v1/{name}:importTestCasesFromLibrary |
|
|
14
16
|
[**test_service_list_most_recent_tests**](TestServiceApi.md#test_service_list_most_recent_tests) | **GET** /v1/tests:mostRecent |
|
|
17
|
+
[**test_service_list_test_case_library_items**](TestServiceApi.md#test_service_list_test_case_library_items) | **POST** /v1/{name}:listTestCaseLibraryItems |
|
|
15
18
|
[**test_service_list_tests**](TestServiceApi.md#test_service_list_tests) | **GET** /v1/tests |
|
|
16
19
|
[**test_service_perturb_test**](TestServiceApi.md#test_service_perturb_test) | **POST** /v1/{name}:perturb |
|
|
20
|
+
[**test_service_perturb_test_in_place**](TestServiceApi.md#test_service_perturb_test_in_place) | **POST** /v1/{name}:perturbInPlace |
|
|
17
21
|
[**test_service_update_test**](TestServiceApi.md#test_service_update_test) | **PATCH** /v1/{test.name} |
|
|
18
22
|
|
|
19
23
|
|
|
@@ -217,6 +221,75 @@ No authorization required
|
|
|
217
221
|
|
|
218
222
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
219
223
|
|
|
224
|
+
# **test_service_clone_test**
|
|
225
|
+
> V1CloneTestResponse test_service_clone_test(name, body)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
### Example
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
```python
|
|
233
|
+
import eval_studio_client.api
|
|
234
|
+
from eval_studio_client.api.models.test_service_clone_test_request import TestServiceCloneTestRequest
|
|
235
|
+
from eval_studio_client.api.models.v1_clone_test_response import V1CloneTestResponse
|
|
236
|
+
from eval_studio_client.api.rest import ApiException
|
|
237
|
+
from pprint import pprint
|
|
238
|
+
|
|
239
|
+
# Defining the host is optional and defaults to http://localhost
|
|
240
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
241
|
+
configuration = eval_studio_client.api.Configuration(
|
|
242
|
+
host = "http://localhost"
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
# Enter a context with an instance of the API client
|
|
247
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
248
|
+
# Create an instance of the API class
|
|
249
|
+
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
250
|
+
name = 'name_example' # str | Required. The name of the Test to clone.
|
|
251
|
+
body = eval_studio_client.api.TestServiceCloneTestRequest() # TestServiceCloneTestRequest |
|
|
252
|
+
|
|
253
|
+
try:
|
|
254
|
+
api_response = api_instance.test_service_clone_test(name, body)
|
|
255
|
+
print("The response of TestServiceApi->test_service_clone_test:\n")
|
|
256
|
+
pprint(api_response)
|
|
257
|
+
except Exception as e:
|
|
258
|
+
print("Exception when calling TestServiceApi->test_service_clone_test: %s\n" % e)
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
### Parameters
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
Name | Type | Description | Notes
|
|
267
|
+
------------- | ------------- | ------------- | -------------
|
|
268
|
+
**name** | **str**| Required. The name of the Test to clone. |
|
|
269
|
+
**body** | [**TestServiceCloneTestRequest**](TestServiceCloneTestRequest.md)| |
|
|
270
|
+
|
|
271
|
+
### Return type
|
|
272
|
+
|
|
273
|
+
[**V1CloneTestResponse**](V1CloneTestResponse.md)
|
|
274
|
+
|
|
275
|
+
### Authorization
|
|
276
|
+
|
|
277
|
+
No authorization required
|
|
278
|
+
|
|
279
|
+
### HTTP request headers
|
|
280
|
+
|
|
281
|
+
- **Content-Type**: application/json
|
|
282
|
+
- **Accept**: application/json
|
|
283
|
+
|
|
284
|
+
### HTTP response details
|
|
285
|
+
|
|
286
|
+
| Status code | Description | Response headers |
|
|
287
|
+
|-------------|-------------|------------------|
|
|
288
|
+
**200** | A successful response. | - |
|
|
289
|
+
**0** | An unexpected error response. | - |
|
|
290
|
+
|
|
291
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
292
|
+
|
|
220
293
|
# **test_service_create_test**
|
|
221
294
|
> V1CreateTestResponse test_service_create_test(test)
|
|
222
295
|
|
|
@@ -422,7 +495,7 @@ No authorization required
|
|
|
422
495
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
423
496
|
|
|
424
497
|
# **test_service_get_test**
|
|
425
|
-
> V1GetTestResponse test_service_get_test(
|
|
498
|
+
> V1GetTestResponse test_service_get_test(name_10)
|
|
426
499
|
|
|
427
500
|
|
|
428
501
|
|
|
@@ -446,10 +519,10 @@ configuration = eval_studio_client.api.Configuration(
|
|
|
446
519
|
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
447
520
|
# Create an instance of the API class
|
|
448
521
|
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
449
|
-
|
|
522
|
+
name_10 = 'name_10_example' # str | Required. The name of the Test to retrieve.
|
|
450
523
|
|
|
451
524
|
try:
|
|
452
|
-
api_response = api_instance.test_service_get_test(
|
|
525
|
+
api_response = api_instance.test_service_get_test(name_10)
|
|
453
526
|
print("The response of TestServiceApi->test_service_get_test:\n")
|
|
454
527
|
pprint(api_response)
|
|
455
528
|
except Exception as e:
|
|
@@ -463,7 +536,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
|
463
536
|
|
|
464
537
|
Name | Type | Description | Notes
|
|
465
538
|
------------- | ------------- | ------------- | -------------
|
|
466
|
-
**
|
|
539
|
+
**name_10** | **str**| Required. The name of the Test to retrieve. |
|
|
467
540
|
|
|
468
541
|
### Return type
|
|
469
542
|
|
|
@@ -487,8 +560,77 @@ No authorization required
|
|
|
487
560
|
|
|
488
561
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
489
562
|
|
|
563
|
+
# **test_service_import_test_cases_from_library**
|
|
564
|
+
> V1ImportTestCasesFromLibraryResponse test_service_import_test_cases_from_library(name, body)
|
|
565
|
+
|
|
566
|
+
|
|
567
|
+
|
|
568
|
+
### Example
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
```python
|
|
572
|
+
import eval_studio_client.api
|
|
573
|
+
from eval_studio_client.api.models.test_service_import_test_cases_from_library_request import TestServiceImportTestCasesFromLibraryRequest
|
|
574
|
+
from eval_studio_client.api.models.v1_import_test_cases_from_library_response import V1ImportTestCasesFromLibraryResponse
|
|
575
|
+
from eval_studio_client.api.rest import ApiException
|
|
576
|
+
from pprint import pprint
|
|
577
|
+
|
|
578
|
+
# Defining the host is optional and defaults to http://localhost
|
|
579
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
580
|
+
configuration = eval_studio_client.api.Configuration(
|
|
581
|
+
host = "http://localhost"
|
|
582
|
+
)
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
# Enter a context with an instance of the API client
|
|
586
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
587
|
+
# Create an instance of the API class
|
|
588
|
+
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
589
|
+
name = 'name_example' # str | Required. The Test for which to get TestCases.
|
|
590
|
+
body = eval_studio_client.api.TestServiceImportTestCasesFromLibraryRequest() # TestServiceImportTestCasesFromLibraryRequest |
|
|
591
|
+
|
|
592
|
+
try:
|
|
593
|
+
api_response = api_instance.test_service_import_test_cases_from_library(name, body)
|
|
594
|
+
print("The response of TestServiceApi->test_service_import_test_cases_from_library:\n")
|
|
595
|
+
pprint(api_response)
|
|
596
|
+
except Exception as e:
|
|
597
|
+
print("Exception when calling TestServiceApi->test_service_import_test_cases_from_library: %s\n" % e)
|
|
598
|
+
```
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
|
|
602
|
+
### Parameters
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
Name | Type | Description | Notes
|
|
606
|
+
------------- | ------------- | ------------- | -------------
|
|
607
|
+
**name** | **str**| Required. The Test for which to get TestCases. |
|
|
608
|
+
**body** | [**TestServiceImportTestCasesFromLibraryRequest**](TestServiceImportTestCasesFromLibraryRequest.md)| |
|
|
609
|
+
|
|
610
|
+
### Return type
|
|
611
|
+
|
|
612
|
+
[**V1ImportTestCasesFromLibraryResponse**](V1ImportTestCasesFromLibraryResponse.md)
|
|
613
|
+
|
|
614
|
+
### Authorization
|
|
615
|
+
|
|
616
|
+
No authorization required
|
|
617
|
+
|
|
618
|
+
### HTTP request headers
|
|
619
|
+
|
|
620
|
+
- **Content-Type**: application/json
|
|
621
|
+
- **Accept**: application/json
|
|
622
|
+
|
|
623
|
+
### HTTP response details
|
|
624
|
+
|
|
625
|
+
| Status code | Description | Response headers |
|
|
626
|
+
|-------------|-------------|------------------|
|
|
627
|
+
**200** | A successful response. | - |
|
|
628
|
+
**0** | An unexpected error response. | - |
|
|
629
|
+
|
|
630
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
631
|
+
|
|
490
632
|
# **test_service_list_most_recent_tests**
|
|
491
|
-
> V1ListMostRecentTestsResponse test_service_list_most_recent_tests(limit=limit)
|
|
633
|
+
> V1ListMostRecentTestsResponse test_service_list_most_recent_tests(limit=limit, filter=filter)
|
|
492
634
|
|
|
493
635
|
|
|
494
636
|
|
|
@@ -513,9 +655,10 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
|
513
655
|
# Create an instance of the API class
|
|
514
656
|
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
515
657
|
limit = 56 # int | Optional. The max number of the most recent Tests to retrieve. Use -1 to retrieve all. Defaults to 3. (optional)
|
|
658
|
+
filter = 'filter_example' # str | Optional. If specified, only leaderboards matching the filter will be returned. Attempts to implement AIP-160 (https://aip.dev/160), although not all fields, operators and features are supported. Supported fields: - type - only '=' operator is supported (optional)
|
|
516
659
|
|
|
517
660
|
try:
|
|
518
|
-
api_response = api_instance.test_service_list_most_recent_tests(limit=limit)
|
|
661
|
+
api_response = api_instance.test_service_list_most_recent_tests(limit=limit, filter=filter)
|
|
519
662
|
print("The response of TestServiceApi->test_service_list_most_recent_tests:\n")
|
|
520
663
|
pprint(api_response)
|
|
521
664
|
except Exception as e:
|
|
@@ -530,6 +673,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
|
530
673
|
Name | Type | Description | Notes
|
|
531
674
|
------------- | ------------- | ------------- | -------------
|
|
532
675
|
**limit** | **int**| Optional. The max number of the most recent Tests to retrieve. Use -1 to retrieve all. Defaults to 3. | [optional]
|
|
676
|
+
**filter** | **str**| Optional. If specified, only leaderboards matching the filter will be returned. Attempts to implement AIP-160 (https://aip.dev/160), although not all fields, operators and features are supported. Supported fields: - type - only '=' operator is supported | [optional]
|
|
533
677
|
|
|
534
678
|
### Return type
|
|
535
679
|
|
|
@@ -553,8 +697,77 @@ No authorization required
|
|
|
553
697
|
|
|
554
698
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
555
699
|
|
|
700
|
+
# **test_service_list_test_case_library_items**
|
|
701
|
+
> V1ListTestCaseLibraryItemsResponse test_service_list_test_case_library_items(name, body)
|
|
702
|
+
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
### Example
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
```python
|
|
709
|
+
import eval_studio_client.api
|
|
710
|
+
from eval_studio_client.api.models.test_service_list_test_case_library_items_request import TestServiceListTestCaseLibraryItemsRequest
|
|
711
|
+
from eval_studio_client.api.models.v1_list_test_case_library_items_response import V1ListTestCaseLibraryItemsResponse
|
|
712
|
+
from eval_studio_client.api.rest import ApiException
|
|
713
|
+
from pprint import pprint
|
|
714
|
+
|
|
715
|
+
# Defining the host is optional and defaults to http://localhost
|
|
716
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
717
|
+
configuration = eval_studio_client.api.Configuration(
|
|
718
|
+
host = "http://localhost"
|
|
719
|
+
)
|
|
720
|
+
|
|
721
|
+
|
|
722
|
+
# Enter a context with an instance of the API client
|
|
723
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
724
|
+
# Create an instance of the API class
|
|
725
|
+
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
726
|
+
name = 'name_example' # str | Required. The Test for which to list the items.
|
|
727
|
+
body = eval_studio_client.api.TestServiceListTestCaseLibraryItemsRequest() # TestServiceListTestCaseLibraryItemsRequest |
|
|
728
|
+
|
|
729
|
+
try:
|
|
730
|
+
api_response = api_instance.test_service_list_test_case_library_items(name, body)
|
|
731
|
+
print("The response of TestServiceApi->test_service_list_test_case_library_items:\n")
|
|
732
|
+
pprint(api_response)
|
|
733
|
+
except Exception as e:
|
|
734
|
+
print("Exception when calling TestServiceApi->test_service_list_test_case_library_items: %s\n" % e)
|
|
735
|
+
```
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
|
|
739
|
+
### Parameters
|
|
740
|
+
|
|
741
|
+
|
|
742
|
+
Name | Type | Description | Notes
|
|
743
|
+
------------- | ------------- | ------------- | -------------
|
|
744
|
+
**name** | **str**| Required. The Test for which to list the items. |
|
|
745
|
+
**body** | [**TestServiceListTestCaseLibraryItemsRequest**](TestServiceListTestCaseLibraryItemsRequest.md)| |
|
|
746
|
+
|
|
747
|
+
### Return type
|
|
748
|
+
|
|
749
|
+
[**V1ListTestCaseLibraryItemsResponse**](V1ListTestCaseLibraryItemsResponse.md)
|
|
750
|
+
|
|
751
|
+
### Authorization
|
|
752
|
+
|
|
753
|
+
No authorization required
|
|
754
|
+
|
|
755
|
+
### HTTP request headers
|
|
756
|
+
|
|
757
|
+
- **Content-Type**: application/json
|
|
758
|
+
- **Accept**: application/json
|
|
759
|
+
|
|
760
|
+
### HTTP response details
|
|
761
|
+
|
|
762
|
+
| Status code | Description | Response headers |
|
|
763
|
+
|-------------|-------------|------------------|
|
|
764
|
+
**200** | A successful response. | - |
|
|
765
|
+
**0** | An unexpected error response. | - |
|
|
766
|
+
|
|
767
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
768
|
+
|
|
556
769
|
# **test_service_list_tests**
|
|
557
|
-
> V1ListTestsResponse test_service_list_tests(order_by=order_by)
|
|
770
|
+
> V1ListTestsResponse test_service_list_tests(filter=filter, order_by=order_by)
|
|
558
771
|
|
|
559
772
|
|
|
560
773
|
|
|
@@ -578,10 +791,11 @@ configuration = eval_studio_client.api.Configuration(
|
|
|
578
791
|
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
579
792
|
# Create an instance of the API class
|
|
580
793
|
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
794
|
+
filter = 'filter_example' # str | Optional. If specified, only leaderboards matching the filter will be returned. Attempts to implement AIP-160 (https://aip.dev/160), although not all fields, operators and features are supported. Supported fields: - type - only '=' operator is supported (optional)
|
|
581
795
|
order_by = 'order_by_example' # str | If specified, the returned tests will be ordered by the specified field. Attempts to implement AIP-130 (https://google.aip.dev/132#ordering), although not all features are supported yet. Supported fields: - create_time - update_time (optional)
|
|
582
796
|
|
|
583
797
|
try:
|
|
584
|
-
api_response = api_instance.test_service_list_tests(order_by=order_by)
|
|
798
|
+
api_response = api_instance.test_service_list_tests(filter=filter, order_by=order_by)
|
|
585
799
|
print("The response of TestServiceApi->test_service_list_tests:\n")
|
|
586
800
|
pprint(api_response)
|
|
587
801
|
except Exception as e:
|
|
@@ -595,6 +809,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
|
595
809
|
|
|
596
810
|
Name | Type | Description | Notes
|
|
597
811
|
------------- | ------------- | ------------- | -------------
|
|
812
|
+
**filter** | **str**| Optional. If specified, only leaderboards matching the filter will be returned. Attempts to implement AIP-160 (https://aip.dev/160), although not all fields, operators and features are supported. Supported fields: - type - only '=' operator is supported | [optional]
|
|
598
813
|
**order_by** | **str**| If specified, the returned tests will be ordered by the specified field. Attempts to implement AIP-130 (https://google.aip.dev/132#ordering), although not all features are supported yet. Supported fields: - create_time - update_time | [optional]
|
|
599
814
|
|
|
600
815
|
### Return type
|
|
@@ -688,6 +903,75 @@ No authorization required
|
|
|
688
903
|
|
|
689
904
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
690
905
|
|
|
906
|
+
# **test_service_perturb_test_in_place**
|
|
907
|
+
> V1PerturbTestInPlaceResponse test_service_perturb_test_in_place(name, body)
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
|
|
911
|
+
### Example
|
|
912
|
+
|
|
913
|
+
|
|
914
|
+
```python
|
|
915
|
+
import eval_studio_client.api
|
|
916
|
+
from eval_studio_client.api.models.test_service_perturb_test_in_place_request import TestServicePerturbTestInPlaceRequest
|
|
917
|
+
from eval_studio_client.api.models.v1_perturb_test_in_place_response import V1PerturbTestInPlaceResponse
|
|
918
|
+
from eval_studio_client.api.rest import ApiException
|
|
919
|
+
from pprint import pprint
|
|
920
|
+
|
|
921
|
+
# Defining the host is optional and defaults to http://localhost
|
|
922
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
923
|
+
configuration = eval_studio_client.api.Configuration(
|
|
924
|
+
host = "http://localhost"
|
|
925
|
+
)
|
|
926
|
+
|
|
927
|
+
|
|
928
|
+
# Enter a context with an instance of the API client
|
|
929
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
930
|
+
# Create an instance of the API class
|
|
931
|
+
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
932
|
+
name = 'name_example' # str | Required. The name of the Test to perturb.
|
|
933
|
+
body = eval_studio_client.api.TestServicePerturbTestInPlaceRequest() # TestServicePerturbTestInPlaceRequest |
|
|
934
|
+
|
|
935
|
+
try:
|
|
936
|
+
api_response = api_instance.test_service_perturb_test_in_place(name, body)
|
|
937
|
+
print("The response of TestServiceApi->test_service_perturb_test_in_place:\n")
|
|
938
|
+
pprint(api_response)
|
|
939
|
+
except Exception as e:
|
|
940
|
+
print("Exception when calling TestServiceApi->test_service_perturb_test_in_place: %s\n" % e)
|
|
941
|
+
```
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
|
|
945
|
+
### Parameters
|
|
946
|
+
|
|
947
|
+
|
|
948
|
+
Name | Type | Description | Notes
|
|
949
|
+
------------- | ------------- | ------------- | -------------
|
|
950
|
+
**name** | **str**| Required. The name of the Test to perturb. |
|
|
951
|
+
**body** | [**TestServicePerturbTestInPlaceRequest**](TestServicePerturbTestInPlaceRequest.md)| |
|
|
952
|
+
|
|
953
|
+
### Return type
|
|
954
|
+
|
|
955
|
+
[**V1PerturbTestInPlaceResponse**](V1PerturbTestInPlaceResponse.md)
|
|
956
|
+
|
|
957
|
+
### Authorization
|
|
958
|
+
|
|
959
|
+
No authorization required
|
|
960
|
+
|
|
961
|
+
### HTTP request headers
|
|
962
|
+
|
|
963
|
+
- **Content-Type**: application/json
|
|
964
|
+
- **Accept**: application/json
|
|
965
|
+
|
|
966
|
+
### HTTP response details
|
|
967
|
+
|
|
968
|
+
| Status code | Description | Response headers |
|
|
969
|
+
|-------------|-------------|------------------|
|
|
970
|
+
**200** | A successful response. | - |
|
|
971
|
+
**0** | An unexpected error response. | - |
|
|
972
|
+
|
|
973
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
974
|
+
|
|
691
975
|
# **test_service_update_test**
|
|
692
976
|
> V1UpdateTestResponse test_service_update_test(test_name, test)
|
|
693
977
|
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# TestServiceCloneTestRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**new_test_display_name** | **str** | Optional. Name of the newly created test. | [optional]
|
|
9
|
+
**new_test_description** | **str** | Optional. Description of the newly created Test. | [optional]
|
|
10
|
+
|
|
11
|
+
## Example
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from eval_studio_client.api.models.test_service_clone_test_request import TestServiceCloneTestRequest
|
|
15
|
+
|
|
16
|
+
# TODO update the JSON string below
|
|
17
|
+
json = "{}"
|
|
18
|
+
# create an instance of TestServiceCloneTestRequest from a JSON string
|
|
19
|
+
test_service_clone_test_request_instance = TestServiceCloneTestRequest.from_json(json)
|
|
20
|
+
# print the JSON string representation of the object
|
|
21
|
+
print(TestServiceCloneTestRequest.to_json())
|
|
22
|
+
|
|
23
|
+
# convert the object into a dict
|
|
24
|
+
test_service_clone_test_request_dict = test_service_clone_test_request_instance.to_dict()
|
|
25
|
+
# create an instance of TestServiceCloneTestRequest from a dict
|
|
26
|
+
test_service_clone_test_request_from_dict = TestServiceCloneTestRequest.from_dict(test_service_clone_test_request_dict)
|
|
27
|
+
```
|
|
28
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
29
|
+
|
|
30
|
+
|
|
@@ -9,7 +9,9 @@ Name | Type | Description | Notes
|
|
|
9
9
|
**model** | **str** | Optional. The Model to use for generating TestCases. If not specified, the default RAG h2oGPTe will be used. Error is returned, if no default model is specified and this field is not set. | [optional]
|
|
10
10
|
**base_llm_model** | **str** | Optional. The base LLM model to use for generating the prompts. Selected automatically if not specified. | [optional]
|
|
11
11
|
**generators** | [**List[V1TestCasesGenerator]**](V1TestCasesGenerator.md) | Optional. Generators to use for generation. If not specified, all generators are selected. | [optional]
|
|
12
|
-
**h2ogpte_collection_id** | **str** | Optional.
|
|
12
|
+
**h2ogpte_collection_id** | **str** | Optional. ID of the h2oGPTe collection to use. If provided, documents referenced by Test and any specified chunks are ignored. This field is required if Test does not reference any documents and no chunks are provided. If this field is left empty, a temporary collection will be created. | [optional]
|
|
13
|
+
**topics** | **List[str]** | Optional. Topics to generate questions for. If not specified, use document summarization as topic generation. | [optional]
|
|
14
|
+
**chunks** | [**List[V1Context]**](V1Context.md) | Optional. The list of chunks to use for generation. If set, the Documents assigned to the Test and h2ogpte_collection_id are ignored. | [optional]
|
|
13
15
|
|
|
14
16
|
## Example
|
|
15
17
|
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# TestServiceImportTestCasesFromLibraryRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**operation** | **str** | Required. The Operation processing this prompt library retrieval process. | [optional]
|
|
9
|
+
**test_suite_url** | **str** | Required. The URL of the library test suite to get TestCases from (sample). | [optional]
|
|
10
|
+
**count** | **int** | Required. The number of TestCases to get from the library. | [optional]
|
|
11
|
+
**test_document_urls** | **List[str]** | Optional. The list of target Test corpus document URLs to skip when returning library TestCases corpus. | [optional]
|
|
12
|
+
|
|
13
|
+
## Example
|
|
14
|
+
|
|
15
|
+
```python
|
|
16
|
+
from eval_studio_client.api.models.test_service_import_test_cases_from_library_request import TestServiceImportTestCasesFromLibraryRequest
|
|
17
|
+
|
|
18
|
+
# TODO update the JSON string below
|
|
19
|
+
json = "{}"
|
|
20
|
+
# create an instance of TestServiceImportTestCasesFromLibraryRequest from a JSON string
|
|
21
|
+
test_service_import_test_cases_from_library_request_instance = TestServiceImportTestCasesFromLibraryRequest.from_json(json)
|
|
22
|
+
# print the JSON string representation of the object
|
|
23
|
+
print(TestServiceImportTestCasesFromLibraryRequest.to_json())
|
|
24
|
+
|
|
25
|
+
# convert the object into a dict
|
|
26
|
+
test_service_import_test_cases_from_library_request_dict = test_service_import_test_cases_from_library_request_instance.to_dict()
|
|
27
|
+
# create an instance of TestServiceImportTestCasesFromLibraryRequest from a dict
|
|
28
|
+
test_service_import_test_cases_from_library_request_from_dict = TestServiceImportTestCasesFromLibraryRequest.from_dict(test_service_import_test_cases_from_library_request_dict)
|
|
29
|
+
```
|
|
30
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
31
|
+
|
|
32
|
+
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# TestServiceListTestCaseLibraryItemsRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**filter_by_categories** | **List[str]** | Optional. Filter by categories. | [optional]
|
|
9
|
+
**filter_by_purposes** | **List[str]** | Optional. Filter by purposes. | [optional]
|
|
10
|
+
**filter_by_evaluates** | **List[str]** | Optional. Filter by evaluates. | [optional]
|
|
11
|
+
**filter_by_origin** | **str** | Optional. Filter by origin. | [optional]
|
|
12
|
+
**filter_by_test_case_count** | **int** | Optional. Filter by test case count. | [optional]
|
|
13
|
+
**filter_by_test_count** | **int** | Optional. Filter by test count. | [optional]
|
|
14
|
+
**filter_by_fts** | **str** | Optional. Filter by FTS. | [optional]
|
|
15
|
+
|
|
16
|
+
## Example
|
|
17
|
+
|
|
18
|
+
```python
|
|
19
|
+
from eval_studio_client.api.models.test_service_list_test_case_library_items_request import TestServiceListTestCaseLibraryItemsRequest
|
|
20
|
+
|
|
21
|
+
# TODO update the JSON string below
|
|
22
|
+
json = "{}"
|
|
23
|
+
# create an instance of TestServiceListTestCaseLibraryItemsRequest from a JSON string
|
|
24
|
+
test_service_list_test_case_library_items_request_instance = TestServiceListTestCaseLibraryItemsRequest.from_json(json)
|
|
25
|
+
# print the JSON string representation of the object
|
|
26
|
+
print(TestServiceListTestCaseLibraryItemsRequest.to_json())
|
|
27
|
+
|
|
28
|
+
# convert the object into a dict
|
|
29
|
+
test_service_list_test_case_library_items_request_dict = test_service_list_test_case_library_items_request_instance.to_dict()
|
|
30
|
+
# create an instance of TestServiceListTestCaseLibraryItemsRequest from a dict
|
|
31
|
+
test_service_list_test_case_library_items_request_from_dict = TestServiceListTestCaseLibraryItemsRequest.from_dict(test_service_list_test_case_library_items_request_dict)
|
|
32
|
+
```
|
|
33
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
34
|
+
|
|
35
|
+
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# TestServicePerturbTestInPlaceRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**perturbator_configurations** | [**List[V1PerturbatorConfiguration]**](V1PerturbatorConfiguration.md) | Required. PerturbatorConfigurations to apply to the Test. | [optional]
|
|
9
|
+
**test_case_names** | **List[str]** | Optional. Perturbation apply only to selected testCases. | [optional]
|
|
10
|
+
|
|
11
|
+
## Example
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from eval_studio_client.api.models.test_service_perturb_test_in_place_request import TestServicePerturbTestInPlaceRequest
|
|
15
|
+
|
|
16
|
+
# TODO update the JSON string below
|
|
17
|
+
json = "{}"
|
|
18
|
+
# create an instance of TestServicePerturbTestInPlaceRequest from a JSON string
|
|
19
|
+
test_service_perturb_test_in_place_request_instance = TestServicePerturbTestInPlaceRequest.from_json(json)
|
|
20
|
+
# print the JSON string representation of the object
|
|
21
|
+
print(TestServicePerturbTestInPlaceRequest.to_json())
|
|
22
|
+
|
|
23
|
+
# convert the object into a dict
|
|
24
|
+
test_service_perturb_test_in_place_request_dict = test_service_perturb_test_in_place_request_instance.to_dict()
|
|
25
|
+
# create an instance of TestServicePerturbTestInPlaceRequest from a dict
|
|
26
|
+
test_service_perturb_test_in_place_request_from_dict = TestServicePerturbTestInPlaceRequest.from_dict(test_service_perturb_test_in_place_request_dict)
|
|
27
|
+
```
|
|
28
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
29
|
+
|
|
30
|
+
|
|
@@ -8,6 +8,7 @@ Name | Type | Description | Notes
|
|
|
8
8
|
**perturbator_configurations** | [**List[V1PerturbatorConfiguration]**](V1PerturbatorConfiguration.md) | Required. PerturbatorConfigurations to apply to the Test. | [optional]
|
|
9
9
|
**new_test_display_name** | **str** | Required. Name of the newly created test. | [optional]
|
|
10
10
|
**new_test_description** | **str** | Optional. Description of the newly created Test. | [optional]
|
|
11
|
+
**test_case_names** | **List[str]** | Optional. Perturbation apply only to selected testCases. | [optional]
|
|
11
12
|
|
|
12
13
|
## Example
|
|
13
14
|
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1AbortOperationResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**operation** | [**V1Operation**](V1Operation.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_abort_operation_response import V1AbortOperationResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1AbortOperationResponse from a JSON string
|
|
18
|
+
v1_abort_operation_response_instance = V1AbortOperationResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1AbortOperationResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_abort_operation_response_dict = v1_abort_operation_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1AbortOperationResponse from a dict
|
|
25
|
+
v1_abort_operation_response_from_dict = V1AbortOperationResponse.from_dict(v1_abort_operation_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchDeleteWorkflowsRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**names** | **List[str]** | Required. The names of the Workflows to delete. A maximum of 1000 can be specified. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_delete_workflows_request import V1BatchDeleteWorkflowsRequest
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchDeleteWorkflowsRequest from a JSON string
|
|
18
|
+
v1_batch_delete_workflows_request_instance = V1BatchDeleteWorkflowsRequest.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchDeleteWorkflowsRequest.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_delete_workflows_request_dict = v1_batch_delete_workflows_request_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchDeleteWorkflowsRequest from a dict
|
|
25
|
+
v1_batch_delete_workflows_request_from_dict = V1BatchDeleteWorkflowsRequest.from_dict(v1_batch_delete_workflows_request_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchDeleteWorkflowsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**workflows** | [**List[V1Workflow]**](V1Workflow.md) | The deleted Workflows. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_delete_workflows_response import V1BatchDeleteWorkflowsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchDeleteWorkflowsResponse from a JSON string
|
|
18
|
+
v1_batch_delete_workflows_response_instance = V1BatchDeleteWorkflowsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchDeleteWorkflowsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_delete_workflows_response_dict = v1_batch_delete_workflows_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchDeleteWorkflowsResponse from a dict
|
|
25
|
+
v1_batch_delete_workflows_response_from_dict = V1BatchDeleteWorkflowsResponse.from_dict(v1_batch_delete_workflows_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1BatchGetWorkflowEdgesResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**edges** | [**List[V1WorkflowEdge]**](V1WorkflowEdge.md) | The WorkflowEdges requested. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_batch_get_workflow_edges_response import V1BatchGetWorkflowEdgesResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1BatchGetWorkflowEdgesResponse from a JSON string
|
|
18
|
+
v1_batch_get_workflow_edges_response_instance = V1BatchGetWorkflowEdgesResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1BatchGetWorkflowEdgesResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_batch_get_workflow_edges_response_dict = v1_batch_get_workflow_edges_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1BatchGetWorkflowEdgesResponse from a dict
|
|
25
|
+
v1_batch_get_workflow_edges_response_from_dict = V1BatchGetWorkflowEdgesResponse.from_dict(v1_batch_get_workflow_edges_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|