eval-studio-client 1.0.1__py3-none-any.whl → 1.1.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +36 -1
- eval_studio_client/api/api/__init__.py +4 -0
- eval_studio_client/api/api/adversarial_inputs_service_api.py +321 -0
- eval_studio_client/api/api/dashboard_service_api.py +1 -1
- eval_studio_client/api/api/document_service_api.py +1 -1
- eval_studio_client/api/api/evaluation_service_api.py +1 -1
- eval_studio_client/api/api/evaluator_service_api.py +1 -1
- eval_studio_client/api/api/generated_questions_validation_service_api.py +321 -0
- eval_studio_client/api/api/human_calibration_service_api.py +1 -1
- eval_studio_client/api/api/info_service_api.py +1 -1
- eval_studio_client/api/api/leaderboard_report_service_api.py +292 -0
- eval_studio_client/api/api/leaderboard_service_api.py +17 -17
- eval_studio_client/api/api/model_service_api.py +17 -17
- eval_studio_client/api/api/operation_progress_service_api.py +1 -1
- eval_studio_client/api/api/operation_service_api.py +272 -17
- eval_studio_client/api/api/perturbation_service_api.py +1 -1
- eval_studio_client/api/api/perturbator_service_api.py +17 -17
- eval_studio_client/api/api/prompt_generation_service_api.py +1 -1
- eval_studio_client/api/api/prompt_library_service_api.py +1 -1
- eval_studio_client/api/api/test_case_relationship_service_api.py +292 -0
- eval_studio_client/api/api/test_case_service_api.py +17 -17
- eval_studio_client/api/api/test_class_service_api.py +17 -17
- eval_studio_client/api/api/test_lab_service_api.py +1 -1
- eval_studio_client/api/api/test_service_api.py +585 -17
- eval_studio_client/api/api/who_am_i_service_api.py +1 -1
- eval_studio_client/api/api/workflow_edge_service_api.py +541 -2
- eval_studio_client/api/api/workflow_node_service_api.py +923 -126
- eval_studio_client/api/api/workflow_service_api.py +317 -33
- eval_studio_client/api/api_client.py +1 -1
- eval_studio_client/api/configuration.py +1 -1
- eval_studio_client/api/docs/AdversarialInputsServiceApi.md +78 -0
- eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +45 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceApi.md +78 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.md +30 -0
- eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +5 -5
- eval_studio_client/api/docs/ModelServiceApi.md +5 -5
- eval_studio_client/api/docs/OperationServiceApi.md +72 -5
- eval_studio_client/api/docs/PerturbatorServiceApi.md +5 -5
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +2 -1
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +2 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +3 -0
- eval_studio_client/api/docs/TestCaseRelationshipServiceApi.md +75 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +5 -5
- eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
- eval_studio_client/api/docs/TestServiceApi.md +145 -5
- eval_studio_client/api/docs/TestServiceCloneTestRequest.md +30 -0
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +3 -2
- eval_studio_client/api/docs/TestServicePerturbTestInPlaceRequest.md +30 -0
- eval_studio_client/api/docs/V1AbortOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneTestResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Context.md +32 -0
- eval_studio_client/api/docs/V1CreateWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1GeneratedTestCase.md +30 -0
- eval_studio_client/api/docs/V1GetLeaderboardReportResponse.md +29 -0
- eval_studio_client/api/docs/V1Info.md +3 -0
- eval_studio_client/api/docs/V1InitWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1LeaderboardReport.md +32 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputData.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluator.md +42 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluatorParameter.md +38 -0
- eval_studio_client/api/docs/V1LeaderboardReportExplanation.md +34 -0
- eval_studio_client/api/docs/V1LeaderboardReportMetricsMetaEntry.md +41 -0
- eval_studio_client/api/docs/V1LeaderboardReportModel.md +39 -0
- eval_studio_client/api/docs/V1LeaderboardReportResult.md +45 -0
- eval_studio_client/api/docs/V1LeaderboardReportResultRelationship.md +32 -0
- eval_studio_client/api/docs/V1ListTestCaseRelationshipsResponse.md +29 -0
- eval_studio_client/api/docs/V1MetricScore.md +31 -0
- eval_studio_client/api/docs/V1MetricScores.md +29 -0
- eval_studio_client/api/docs/V1PerturbTestInPlaceResponse.md +29 -0
- eval_studio_client/api/docs/V1RepeatedString.md +29 -0
- eval_studio_client/api/docs/V1ResetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1TestCase.md +2 -0
- eval_studio_client/api/docs/V1Workflow.md +3 -0
- eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +139 -0
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +221 -12
- eval_studio_client/api/docs/WorkflowServiceApi.md +81 -10
- eval_studio_client/api/docs/WorkflowServiceCloneWorkflowRequest.md +33 -0
- eval_studio_client/api/exceptions.py +1 -1
- eval_studio_client/api/models/__init__.py +32 -1
- eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +143 -0
- eval_studio_client/api/models/generated_questions_validation_service_validate_generated_questions_request.py +97 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +1 -1
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +13 -4
- eval_studio_client/api/models/protobuf_any.py +1 -1
- eval_studio_client/api/models/protobuf_null_value.py +1 -1
- eval_studio_client/api/models/required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_document_to_update.py +1 -1
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_model_to_update.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_case_to_update.py +10 -3
- eval_studio_client/api/models/required_the_test_to_update.py +1 -1
- eval_studio_client/api/models/required_the_updated_workflow.py +11 -3
- eval_studio_client/api/models/required_the_updated_workflow_node.py +1 -1
- eval_studio_client/api/models/rpc_status.py +1 -1
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/models/test_service_clone_test_request.py +89 -0
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +7 -5
- eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +1 -1
- eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +1 -1
- eval_studio_client/api/models/test_service_perturb_test_in_place_request.py +97 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +1 -1
- eval_studio_client/api/models/v1_abort_operation_response.py +91 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/models/v1_check_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_clone_test_response.py +91 -0
- eval_studio_client/api/models/v1_clone_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_collection_info.py +1 -1
- eval_studio_client/api/models/v1_context.py +93 -0
- eval_studio_client/api/models/v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_document_response.py +1 -1
- eval_studio_client/api/models/v1_create_evaluation_request.py +1 -1
- eval_studio_client/api/models/v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/models/v1_create_model_response.py +1 -1
- eval_studio_client/api/models/v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_response.py +1 -1
- eval_studio_client/api/models/v1_create_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_dashboard.py +1 -1
- eval_studio_client/api/models/v1_dashboard_status.py +1 -1
- eval_studio_client/api/models/v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_document_response.py +1 -1
- eval_studio_client/api/models/v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_model_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_document.py +1 -1
- eval_studio_client/api/models/v1_estimate_threshold_request.py +1 -1
- eval_studio_client/api/models/v1_evaluation_test.py +1 -1
- eval_studio_client/api/models/v1_evaluator.py +1 -1
- eval_studio_client/api/models/v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/models/v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/models/v1_evaluator_view.py +1 -1
- eval_studio_client/api/models/v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/models/v1_find_all_test_cases_by_id_response.py +1 -1
- eval_studio_client/api/models/v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_generated_test_case.py +101 -0
- eval_studio_client/api/models/v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_document_response.py +1 -1
- eval_studio_client/api/models/v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_get_info_response.py +1 -1
- eval_studio_client/api/models/v1_get_leaderboard_report_response.py +91 -0
- eval_studio_client/api/models/v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_model_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_response.py +1 -1
- eval_studio_client/api/models/v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_class_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_import_evaluation_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_request.py +1 -1
- eval_studio_client/api/models/v1_info.py +10 -4
- eval_studio_client/api/models/v1_init_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_insight.py +1 -1
- eval_studio_client/api/models/v1_labeled_test_case.py +1 -1
- eval_studio_client/api/models/v1_leaderboard.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_report.py +115 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_data.py +93 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +101 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator.py +155 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator_parameter.py +109 -0
- eval_studio_client/api/models/v1_leaderboard_report_explanation.py +103 -0
- eval_studio_client/api/models/v1_leaderboard_report_metrics_meta_entry.py +129 -0
- eval_studio_client/api/models/v1_leaderboard_report_model.py +121 -0
- eval_studio_client/api/models/v1_leaderboard_report_result.py +175 -0
- eval_studio_client/api/models/v1_leaderboard_report_result_relationship.py +97 -0
- eval_studio_client/api/models/v1_leaderboard_status.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_type.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_view.py +1 -1
- eval_studio_client/api/models/v1_list_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_documents_response.py +1 -1
- eval_studio_client/api/models/v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_operations_response.py +1 -1
- eval_studio_client/api/models/v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/models/v1_list_prompt_library_items_response.py +1 -1
- eval_studio_client/api/models/v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_library_items_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_relationships_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/models/v1_list_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_workflows_response.py +1 -1
- eval_studio_client/api/models/v1_metric_score.py +89 -0
- eval_studio_client/api/models/v1_metric_scores.py +95 -0
- eval_studio_client/api/models/v1_model.py +1 -1
- eval_studio_client/api/models/v1_model_type.py +1 -1
- eval_studio_client/api/models/v1_operation.py +1 -1
- eval_studio_client/api/models/v1_operation_progress.py +1 -1
- eval_studio_client/api/models/v1_perturb_test_in_place_response.py +91 -0
- eval_studio_client/api/models/v1_perturb_test_response.py +1 -1
- eval_studio_client/api/models/v1_perturbator.py +1 -1
- eval_studio_client/api/models/v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/models/v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/models/v1_problem_and_action.py +1 -1
- eval_studio_client/api/models/v1_process_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_prompt_library_item.py +1 -1
- eval_studio_client/api/models/v1_repeated_string.py +87 -0
- eval_studio_client/api/models/v1_reset_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_test.py +1 -1
- eval_studio_client/api/models/v1_test_case.py +10 -3
- eval_studio_client/api/models/v1_test_case_relationship.py +1 -1
- eval_studio_client/api/models/v1_test_cases_generator.py +1 -1
- eval_studio_client/api/models/v1_test_class.py +1 -1
- eval_studio_client/api/models/v1_test_class_type.py +1 -1
- eval_studio_client/api/models/v1_test_lab.py +1 -1
- eval_studio_client/api/models/v1_test_suite_evaluates.py +1 -1
- eval_studio_client/api/models/v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_document_response.py +1 -1
- eval_studio_client/api/models/v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_model_response.py +1 -1
- eval_studio_client/api/models/v1_update_operation_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_who_am_i_response.py +1 -1
- eval_studio_client/api/models/v1_workflow.py +11 -3
- eval_studio_client/api/models/v1_workflow_edge.py +1 -1
- eval_studio_client/api/models/v1_workflow_edge_type.py +1 -1
- eval_studio_client/api/models/v1_workflow_node.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_artifact.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_artifacts.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_attributes.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_status.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_type.py +4 -1
- eval_studio_client/api/models/v1_workflow_node_view.py +1 -1
- eval_studio_client/api/models/v1_workflow_type.py +1 -1
- eval_studio_client/api/models/workflow_service_clone_workflow_request.py +95 -0
- eval_studio_client/api/rest.py +1 -1
- eval_studio_client/api/test/test_adversarial_inputs_service_api.py +37 -0
- eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +128 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
- eval_studio_client/api/test/test_document_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
- eval_studio_client/api/test/test_generated_questions_validation_service_api.py +37 -0
- eval_studio_client/api/test/test_generated_questions_validation_service_validate_generated_questions_request.py +83 -0
- eval_studio_client/api/test/test_human_calibration_service_api.py +1 -1
- eval_studio_client/api/test/test_info_service_api.py +1 -1
- eval_studio_client/api/test/test_leaderboard_report_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
- eval_studio_client/api/test/test_model_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_service_api.py +7 -1
- eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +6 -2
- eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +9 -4
- eval_studio_client/api/test/test_prompt_library_service_api.py +1 -1
- eval_studio_client/api/test/test_protobuf_any.py +1 -1
- eval_studio_client/api/test/test_protobuf_null_value.py +1 -1
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +6 -2
- eval_studio_client/api/test/test_required_the_test_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_updated_workflow.py +5 -2
- eval_studio_client/api/test/test_required_the_updated_workflow_node.py +1 -1
- eval_studio_client/api/test/test_rpc_status.py +1 -1
- eval_studio_client/api/test/test_test_case_relationship_service_api.py +37 -0
- eval_studio_client/api/test/test_test_case_service_api.py +1 -1
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_test_class_service_api.py +1 -1
- eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
- eval_studio_client/api/test/test_test_service_api.py +13 -1
- eval_studio_client/api/test/test_test_service_clone_test_request.py +52 -0
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +4 -1
- eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +1 -1
- eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +1 -1
- eval_studio_client/api/test/test_test_service_perturb_test_in_place_request.py +59 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +1 -1
- eval_studio_client/api/test/test_v1_abort_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +6 -2
- eval_studio_client/api/test/test_v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +5 -2
- eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_check_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_clone_test_response.py +67 -0
- eval_studio_client/api/test/test_v1_clone_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_collection_info.py +1 -1
- eval_studio_client/api/test/test_v1_context.py +54 -0
- eval_studio_client/api/test/test_v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +6 -2
- eval_studio_client/api/test/test_v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_create_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_create_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_dashboard.py +1 -1
- eval_studio_client/api/test/test_v1_dashboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_delete_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_document.py +1 -1
- eval_studio_client/api/test/test_v1_estimate_threshold_request.py +1 -1
- eval_studio_client/api/test/test_v1_evaluation_test.py +6 -2
- eval_studio_client/api/test/test_v1_evaluator.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_view.py +1 -1
- eval_studio_client/api/test/test_v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +6 -2
- eval_studio_client/api/test/test_v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/test/test_v1_generated_test_case.py +79 -0
- eval_studio_client/api/test/test_v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_info_response.py +7 -2
- eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +175 -0
- eval_studio_client/api/test/test_v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_get_test_class_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_import_evaluation_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_v1_info.py +7 -2
- eval_studio_client/api/test/test_v1_init_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_insight.py +1 -1
- eval_studio_client/api/test/test_v1_labeled_test_case.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_report.py +174 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_data.py +52 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +56 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator.py +114 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator_parameter.py +63 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_explanation.py +58 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_metrics_meta_entry.py +66 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_model.py +62 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result.py +92 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result_relationship.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_type.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_view.py +1 -1
- eval_studio_client/api/test/test_v1_list_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_relationships_response.py +56 -0
- eval_studio_client/api/test/test_v1_list_test_cases_response.py +6 -2
- eval_studio_client/api/test/test_v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_workflows_response.py +5 -2
- eval_studio_client/api/test/test_v1_metric_score.py +52 -0
- eval_studio_client/api/test/test_v1_metric_scores.py +55 -0
- eval_studio_client/api/test/test_v1_model.py +1 -1
- eval_studio_client/api/test/test_v1_model_type.py +1 -1
- eval_studio_client/api/test/test_v1_operation.py +1 -1
- eval_studio_client/api/test/test_v1_operation_progress.py +1 -1
- eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +67 -0
- eval_studio_client/api/test/test_v1_perturb_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/test/test_v1_problem_and_action.py +1 -1
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_prompt_library_item.py +1 -1
- eval_studio_client/api/test/test_v1_repeated_string.py +53 -0
- eval_studio_client/api/test/test_v1_reset_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_test.py +1 -1
- eval_studio_client/api/test/test_v1_test_case.py +6 -2
- eval_studio_client/api/test/test_v1_test_case_relationship.py +1 -1
- eval_studio_client/api/test/test_v1_test_cases_generator.py +1 -1
- eval_studio_client/api/test/test_v1_test_class.py +1 -1
- eval_studio_client/api/test/test_v1_test_class_type.py +1 -1
- eval_studio_client/api/test/test_v1_test_lab.py +1 -1
- eval_studio_client/api/test/test_v1_test_suite_evaluates.py +1 -1
- eval_studio_client/api/test/test_v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_update_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_who_am_i_response.py +1 -1
- eval_studio_client/api/test/test_v1_workflow.py +5 -2
- eval_studio_client/api/test/test_v1_workflow_edge.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_edge_type.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_artifact.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_attributes.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_status.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_type.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_view.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_type.py +1 -1
- eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
- eval_studio_client/api/test/test_workflow_edge_service_api.py +15 -1
- eval_studio_client/api/test/test_workflow_node_service_api.py +23 -2
- eval_studio_client/api/test/test_workflow_service_api.py +8 -1
- eval_studio_client/api/test/test_workflow_service_clone_workflow_request.py +55 -0
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +1633 -219
- eval_studio_client/tests.py +103 -8
- {eval_studio_client-1.0.1.dist-info → eval_studio_client-1.1.0a5.dist-info}/METADATA +2 -2
- eval_studio_client-1.1.0a5.dist-info/RECORD +720 -0
- {eval_studio_client-1.0.1.dist-info → eval_studio_client-1.1.0a5.dist-info}/WHEEL +1 -1
- eval_studio_client-1.0.1.dist-info/RECORD +0 -615
|
@@ -7,15 +7,17 @@ Method | HTTP request | Description
|
|
|
7
7
|
[**test_service_batch_delete_tests**](TestServiceApi.md#test_service_batch_delete_tests) | **POST** /v1/tests:batchDelete |
|
|
8
8
|
[**test_service_batch_get_tests**](TestServiceApi.md#test_service_batch_get_tests) | **GET** /v1/tests:batchGet |
|
|
9
9
|
[**test_service_batch_import_tests**](TestServiceApi.md#test_service_batch_import_tests) | **POST** /v1/tests:batchImport |
|
|
10
|
+
[**test_service_clone_test**](TestServiceApi.md#test_service_clone_test) | **POST** /v1/{name}:clone |
|
|
10
11
|
[**test_service_create_test**](TestServiceApi.md#test_service_create_test) | **POST** /v1/tests |
|
|
11
12
|
[**test_service_delete_test**](TestServiceApi.md#test_service_delete_test) | **DELETE** /v1/{name_6} |
|
|
12
13
|
[**test_service_generate_test_cases**](TestServiceApi.md#test_service_generate_test_cases) | **POST** /v1/{name}:generateTestCases |
|
|
13
|
-
[**test_service_get_test**](TestServiceApi.md#test_service_get_test) | **GET** /v1/{
|
|
14
|
+
[**test_service_get_test**](TestServiceApi.md#test_service_get_test) | **GET** /v1/{name_10} |
|
|
14
15
|
[**test_service_import_test_cases_from_library**](TestServiceApi.md#test_service_import_test_cases_from_library) | **POST** /v1/{name}:importTestCasesFromLibrary |
|
|
15
16
|
[**test_service_list_most_recent_tests**](TestServiceApi.md#test_service_list_most_recent_tests) | **GET** /v1/tests:mostRecent |
|
|
16
17
|
[**test_service_list_test_case_library_items**](TestServiceApi.md#test_service_list_test_case_library_items) | **POST** /v1/{name}:listTestCaseLibraryItems |
|
|
17
18
|
[**test_service_list_tests**](TestServiceApi.md#test_service_list_tests) | **GET** /v1/tests |
|
|
18
19
|
[**test_service_perturb_test**](TestServiceApi.md#test_service_perturb_test) | **POST** /v1/{name}:perturb |
|
|
20
|
+
[**test_service_perturb_test_in_place**](TestServiceApi.md#test_service_perturb_test_in_place) | **POST** /v1/{name}:perturbInPlace |
|
|
19
21
|
[**test_service_update_test**](TestServiceApi.md#test_service_update_test) | **PATCH** /v1/{test.name} |
|
|
20
22
|
|
|
21
23
|
|
|
@@ -219,6 +221,75 @@ No authorization required
|
|
|
219
221
|
|
|
220
222
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
221
223
|
|
|
224
|
+
# **test_service_clone_test**
|
|
225
|
+
> V1CloneTestResponse test_service_clone_test(name, body)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
### Example
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
```python
|
|
233
|
+
import eval_studio_client.api
|
|
234
|
+
from eval_studio_client.api.models.test_service_clone_test_request import TestServiceCloneTestRequest
|
|
235
|
+
from eval_studio_client.api.models.v1_clone_test_response import V1CloneTestResponse
|
|
236
|
+
from eval_studio_client.api.rest import ApiException
|
|
237
|
+
from pprint import pprint
|
|
238
|
+
|
|
239
|
+
# Defining the host is optional and defaults to http://localhost
|
|
240
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
241
|
+
configuration = eval_studio_client.api.Configuration(
|
|
242
|
+
host = "http://localhost"
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
# Enter a context with an instance of the API client
|
|
247
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
248
|
+
# Create an instance of the API class
|
|
249
|
+
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
250
|
+
name = 'name_example' # str | Required. The name of the Test to clone.
|
|
251
|
+
body = eval_studio_client.api.TestServiceCloneTestRequest() # TestServiceCloneTestRequest |
|
|
252
|
+
|
|
253
|
+
try:
|
|
254
|
+
api_response = api_instance.test_service_clone_test(name, body)
|
|
255
|
+
print("The response of TestServiceApi->test_service_clone_test:\n")
|
|
256
|
+
pprint(api_response)
|
|
257
|
+
except Exception as e:
|
|
258
|
+
print("Exception when calling TestServiceApi->test_service_clone_test: %s\n" % e)
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
### Parameters
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
Name | Type | Description | Notes
|
|
267
|
+
------------- | ------------- | ------------- | -------------
|
|
268
|
+
**name** | **str**| Required. The name of the Test to clone. |
|
|
269
|
+
**body** | [**TestServiceCloneTestRequest**](TestServiceCloneTestRequest.md)| |
|
|
270
|
+
|
|
271
|
+
### Return type
|
|
272
|
+
|
|
273
|
+
[**V1CloneTestResponse**](V1CloneTestResponse.md)
|
|
274
|
+
|
|
275
|
+
### Authorization
|
|
276
|
+
|
|
277
|
+
No authorization required
|
|
278
|
+
|
|
279
|
+
### HTTP request headers
|
|
280
|
+
|
|
281
|
+
- **Content-Type**: application/json
|
|
282
|
+
- **Accept**: application/json
|
|
283
|
+
|
|
284
|
+
### HTTP response details
|
|
285
|
+
|
|
286
|
+
| Status code | Description | Response headers |
|
|
287
|
+
|-------------|-------------|------------------|
|
|
288
|
+
**200** | A successful response. | - |
|
|
289
|
+
**0** | An unexpected error response. | - |
|
|
290
|
+
|
|
291
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
292
|
+
|
|
222
293
|
# **test_service_create_test**
|
|
223
294
|
> V1CreateTestResponse test_service_create_test(test)
|
|
224
295
|
|
|
@@ -424,7 +495,7 @@ No authorization required
|
|
|
424
495
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
425
496
|
|
|
426
497
|
# **test_service_get_test**
|
|
427
|
-
> V1GetTestResponse test_service_get_test(
|
|
498
|
+
> V1GetTestResponse test_service_get_test(name_10)
|
|
428
499
|
|
|
429
500
|
|
|
430
501
|
|
|
@@ -448,10 +519,10 @@ configuration = eval_studio_client.api.Configuration(
|
|
|
448
519
|
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
449
520
|
# Create an instance of the API class
|
|
450
521
|
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
451
|
-
|
|
522
|
+
name_10 = 'name_10_example' # str | Required. The name of the Test to retrieve.
|
|
452
523
|
|
|
453
524
|
try:
|
|
454
|
-
api_response = api_instance.test_service_get_test(
|
|
525
|
+
api_response = api_instance.test_service_get_test(name_10)
|
|
455
526
|
print("The response of TestServiceApi->test_service_get_test:\n")
|
|
456
527
|
pprint(api_response)
|
|
457
528
|
except Exception as e:
|
|
@@ -465,7 +536,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
|
465
536
|
|
|
466
537
|
Name | Type | Description | Notes
|
|
467
538
|
------------- | ------------- | ------------- | -------------
|
|
468
|
-
**
|
|
539
|
+
**name_10** | **str**| Required. The name of the Test to retrieve. |
|
|
469
540
|
|
|
470
541
|
### Return type
|
|
471
542
|
|
|
@@ -828,6 +899,75 @@ No authorization required
|
|
|
828
899
|
|
|
829
900
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
830
901
|
|
|
902
|
+
# **test_service_perturb_test_in_place**
|
|
903
|
+
> V1PerturbTestInPlaceResponse test_service_perturb_test_in_place(name, body)
|
|
904
|
+
|
|
905
|
+
|
|
906
|
+
|
|
907
|
+
### Example
|
|
908
|
+
|
|
909
|
+
|
|
910
|
+
```python
|
|
911
|
+
import eval_studio_client.api
|
|
912
|
+
from eval_studio_client.api.models.test_service_perturb_test_in_place_request import TestServicePerturbTestInPlaceRequest
|
|
913
|
+
from eval_studio_client.api.models.v1_perturb_test_in_place_response import V1PerturbTestInPlaceResponse
|
|
914
|
+
from eval_studio_client.api.rest import ApiException
|
|
915
|
+
from pprint import pprint
|
|
916
|
+
|
|
917
|
+
# Defining the host is optional and defaults to http://localhost
|
|
918
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
919
|
+
configuration = eval_studio_client.api.Configuration(
|
|
920
|
+
host = "http://localhost"
|
|
921
|
+
)
|
|
922
|
+
|
|
923
|
+
|
|
924
|
+
# Enter a context with an instance of the API client
|
|
925
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
926
|
+
# Create an instance of the API class
|
|
927
|
+
api_instance = eval_studio_client.api.TestServiceApi(api_client)
|
|
928
|
+
name = 'name_example' # str | Required. The name of the Test to perturb.
|
|
929
|
+
body = eval_studio_client.api.TestServicePerturbTestInPlaceRequest() # TestServicePerturbTestInPlaceRequest |
|
|
930
|
+
|
|
931
|
+
try:
|
|
932
|
+
api_response = api_instance.test_service_perturb_test_in_place(name, body)
|
|
933
|
+
print("The response of TestServiceApi->test_service_perturb_test_in_place:\n")
|
|
934
|
+
pprint(api_response)
|
|
935
|
+
except Exception as e:
|
|
936
|
+
print("Exception when calling TestServiceApi->test_service_perturb_test_in_place: %s\n" % e)
|
|
937
|
+
```
|
|
938
|
+
|
|
939
|
+
|
|
940
|
+
|
|
941
|
+
### Parameters
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
Name | Type | Description | Notes
|
|
945
|
+
------------- | ------------- | ------------- | -------------
|
|
946
|
+
**name** | **str**| Required. The name of the Test to perturb. |
|
|
947
|
+
**body** | [**TestServicePerturbTestInPlaceRequest**](TestServicePerturbTestInPlaceRequest.md)| |
|
|
948
|
+
|
|
949
|
+
### Return type
|
|
950
|
+
|
|
951
|
+
[**V1PerturbTestInPlaceResponse**](V1PerturbTestInPlaceResponse.md)
|
|
952
|
+
|
|
953
|
+
### Authorization
|
|
954
|
+
|
|
955
|
+
No authorization required
|
|
956
|
+
|
|
957
|
+
### HTTP request headers
|
|
958
|
+
|
|
959
|
+
- **Content-Type**: application/json
|
|
960
|
+
- **Accept**: application/json
|
|
961
|
+
|
|
962
|
+
### HTTP response details
|
|
963
|
+
|
|
964
|
+
| Status code | Description | Response headers |
|
|
965
|
+
|-------------|-------------|------------------|
|
|
966
|
+
**200** | A successful response. | - |
|
|
967
|
+
**0** | An unexpected error response. | - |
|
|
968
|
+
|
|
969
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
970
|
+
|
|
831
971
|
# **test_service_update_test**
|
|
832
972
|
> V1UpdateTestResponse test_service_update_test(test_name, test)
|
|
833
973
|
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# TestServiceCloneTestRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**new_test_display_name** | **str** | Optional. Name of the newly created test. | [optional]
|
|
9
|
+
**new_test_description** | **str** | Optional. Description of the newly created Test. | [optional]
|
|
10
|
+
|
|
11
|
+
## Example
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from eval_studio_client.api.models.test_service_clone_test_request import TestServiceCloneTestRequest
|
|
15
|
+
|
|
16
|
+
# TODO update the JSON string below
|
|
17
|
+
json = "{}"
|
|
18
|
+
# create an instance of TestServiceCloneTestRequest from a JSON string
|
|
19
|
+
test_service_clone_test_request_instance = TestServiceCloneTestRequest.from_json(json)
|
|
20
|
+
# print the JSON string representation of the object
|
|
21
|
+
print(TestServiceCloneTestRequest.to_json())
|
|
22
|
+
|
|
23
|
+
# convert the object into a dict
|
|
24
|
+
test_service_clone_test_request_dict = test_service_clone_test_request_instance.to_dict()
|
|
25
|
+
# create an instance of TestServiceCloneTestRequest from a dict
|
|
26
|
+
test_service_clone_test_request_from_dict = TestServiceCloneTestRequest.from_dict(test_service_clone_test_request_dict)
|
|
27
|
+
```
|
|
28
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
29
|
+
|
|
30
|
+
|
|
@@ -9,8 +9,9 @@ Name | Type | Description | Notes
|
|
|
9
9
|
**model** | **str** | Optional. The Model to use for generating TestCases. If not specified, the default RAG h2oGPTe will be used. Error is returned, if no default model is specified and this field is not set. | [optional]
|
|
10
10
|
**base_llm_model** | **str** | Optional. The base LLM model to use for generating the prompts. Selected automatically if not specified. | [optional]
|
|
11
11
|
**generators** | [**List[V1TestCasesGenerator]**](V1TestCasesGenerator.md) | Optional. Generators to use for generation. If not specified, all generators are selected. | [optional]
|
|
12
|
-
**h2ogpte_collection_id** | **str** | Optional.
|
|
13
|
-
**topics** | **List[str]** | Optional.
|
|
12
|
+
**h2ogpte_collection_id** | **str** | Optional. ID of the h2oGPTe collection to use. If provided, documents referenced by Test and any specified chunks are ignored. This field is required if Test does not reference any documents and no chunks are provided. If this field is left empty, a temporary collection will be created. | [optional]
|
|
13
|
+
**topics** | **List[str]** | Optional. Topics to generate questions for. If not specified, use document summarization as topic generation. | [optional]
|
|
14
|
+
**chunks** | **List[str]** | Optional. The list of chunks to use for generation. If set, the Documents assigned to the Test and h2ogpte_collection_id are ignored. | [optional]
|
|
14
15
|
|
|
15
16
|
## Example
|
|
16
17
|
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# TestServicePerturbTestInPlaceRequest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**perturbator_configurations** | [**List[V1PerturbatorConfiguration]**](V1PerturbatorConfiguration.md) | Required. PerturbatorConfigurations to apply to the Test. | [optional]
|
|
9
|
+
**test_case_names** | **List[str]** | Optional. Perturbation apply only to selected testCases. | [optional]
|
|
10
|
+
|
|
11
|
+
## Example
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from eval_studio_client.api.models.test_service_perturb_test_in_place_request import TestServicePerturbTestInPlaceRequest
|
|
15
|
+
|
|
16
|
+
# TODO update the JSON string below
|
|
17
|
+
json = "{}"
|
|
18
|
+
# create an instance of TestServicePerturbTestInPlaceRequest from a JSON string
|
|
19
|
+
test_service_perturb_test_in_place_request_instance = TestServicePerturbTestInPlaceRequest.from_json(json)
|
|
20
|
+
# print the JSON string representation of the object
|
|
21
|
+
print(TestServicePerturbTestInPlaceRequest.to_json())
|
|
22
|
+
|
|
23
|
+
# convert the object into a dict
|
|
24
|
+
test_service_perturb_test_in_place_request_dict = test_service_perturb_test_in_place_request_instance.to_dict()
|
|
25
|
+
# create an instance of TestServicePerturbTestInPlaceRequest from a dict
|
|
26
|
+
test_service_perturb_test_in_place_request_from_dict = TestServicePerturbTestInPlaceRequest.from_dict(test_service_perturb_test_in_place_request_dict)
|
|
27
|
+
```
|
|
28
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
29
|
+
|
|
30
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1AbortOperationResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**operation** | [**V1Operation**](V1Operation.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_abort_operation_response import V1AbortOperationResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1AbortOperationResponse from a JSON string
|
|
18
|
+
v1_abort_operation_response_instance = V1AbortOperationResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1AbortOperationResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_abort_operation_response_dict = v1_abort_operation_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1AbortOperationResponse from a dict
|
|
25
|
+
v1_abort_operation_response_from_dict = V1AbortOperationResponse.from_dict(v1_abort_operation_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1CloneTestResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**test** | [**V1Test**](V1Test.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_clone_test_response import V1CloneTestResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1CloneTestResponse from a JSON string
|
|
18
|
+
v1_clone_test_response_instance = V1CloneTestResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1CloneTestResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_clone_test_response_dict = v1_clone_test_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1CloneTestResponse from a dict
|
|
25
|
+
v1_clone_test_response_from_dict = V1CloneTestResponse.from_dict(v1_clone_test_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1CloneWorkflowResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**workflow** | [**V1Workflow**](V1Workflow.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_clone_workflow_response import V1CloneWorkflowResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1CloneWorkflowResponse from a JSON string
|
|
18
|
+
v1_clone_workflow_response_instance = V1CloneWorkflowResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1CloneWorkflowResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_clone_workflow_response_dict = v1_clone_workflow_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1CloneWorkflowResponse from a dict
|
|
25
|
+
v1_clone_workflow_response_from_dict = V1CloneWorkflowResponse.from_dict(v1_clone_workflow_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# V1Context
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**collection_id** | **str** | Collection Id. | [optional]
|
|
9
|
+
**chunk_id** | **int** | Chunk Id. | [optional]
|
|
10
|
+
**score** | **float** | Chunk score. | [optional]
|
|
11
|
+
**content** | **str** | Content. | [optional]
|
|
12
|
+
|
|
13
|
+
## Example
|
|
14
|
+
|
|
15
|
+
```python
|
|
16
|
+
from eval_studio_client.api.models.v1_context import V1Context
|
|
17
|
+
|
|
18
|
+
# TODO update the JSON string below
|
|
19
|
+
json = "{}"
|
|
20
|
+
# create an instance of V1Context from a JSON string
|
|
21
|
+
v1_context_instance = V1Context.from_json(json)
|
|
22
|
+
# print the JSON string representation of the object
|
|
23
|
+
print(V1Context.to_json())
|
|
24
|
+
|
|
25
|
+
# convert the object into a dict
|
|
26
|
+
v1_context_dict = v1_context_instance.to_dict()
|
|
27
|
+
# create an instance of V1Context from a dict
|
|
28
|
+
v1_context_from_dict = V1Context.from_dict(v1_context_dict)
|
|
29
|
+
```
|
|
30
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
31
|
+
|
|
32
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1CreateWorkflowEdgeResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**edge** | [**V1WorkflowEdge**](V1WorkflowEdge.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_create_workflow_edge_response import V1CreateWorkflowEdgeResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1CreateWorkflowEdgeResponse from a JSON string
|
|
18
|
+
v1_create_workflow_edge_response_instance = V1CreateWorkflowEdgeResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1CreateWorkflowEdgeResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_create_workflow_edge_response_dict = v1_create_workflow_edge_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1CreateWorkflowEdgeResponse from a dict
|
|
25
|
+
v1_create_workflow_edge_response_from_dict = V1CreateWorkflowEdgeResponse.from_dict(v1_create_workflow_edge_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1CreateWorkflowNodeResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**node** | [**V1WorkflowNode**](V1WorkflowNode.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_create_workflow_node_response import V1CreateWorkflowNodeResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1CreateWorkflowNodeResponse from a JSON string
|
|
18
|
+
v1_create_workflow_node_response_instance = V1CreateWorkflowNodeResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1CreateWorkflowNodeResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_create_workflow_node_response_dict = v1_create_workflow_node_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1CreateWorkflowNodeResponse from a dict
|
|
25
|
+
v1_create_workflow_node_response_from_dict = V1CreateWorkflowNodeResponse.from_dict(v1_create_workflow_node_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1DeleteWorkflowEdgeResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**edge** | [**V1WorkflowEdge**](V1WorkflowEdge.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_delete_workflow_edge_response import V1DeleteWorkflowEdgeResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1DeleteWorkflowEdgeResponse from a JSON string
|
|
18
|
+
v1_delete_workflow_edge_response_instance = V1DeleteWorkflowEdgeResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1DeleteWorkflowEdgeResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_delete_workflow_edge_response_dict = v1_delete_workflow_edge_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1DeleteWorkflowEdgeResponse from a dict
|
|
25
|
+
v1_delete_workflow_edge_response_from_dict = V1DeleteWorkflowEdgeResponse.from_dict(v1_delete_workflow_edge_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# V1GeneratedTestCase
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**test_case** | [**V1TestCase**](V1TestCase.md) | | [optional]
|
|
9
|
+
**context** | [**List[V1Context]**](V1Context.md) | Context used for test_case generation. | [optional]
|
|
10
|
+
|
|
11
|
+
## Example
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from eval_studio_client.api.models.v1_generated_test_case import V1GeneratedTestCase
|
|
15
|
+
|
|
16
|
+
# TODO update the JSON string below
|
|
17
|
+
json = "{}"
|
|
18
|
+
# create an instance of V1GeneratedTestCase from a JSON string
|
|
19
|
+
v1_generated_test_case_instance = V1GeneratedTestCase.from_json(json)
|
|
20
|
+
# print the JSON string representation of the object
|
|
21
|
+
print(V1GeneratedTestCase.to_json())
|
|
22
|
+
|
|
23
|
+
# convert the object into a dict
|
|
24
|
+
v1_generated_test_case_dict = v1_generated_test_case_instance.to_dict()
|
|
25
|
+
# create an instance of V1GeneratedTestCase from a dict
|
|
26
|
+
v1_generated_test_case_from_dict = V1GeneratedTestCase.from_dict(v1_generated_test_case_dict)
|
|
27
|
+
```
|
|
28
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
29
|
+
|
|
30
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1GetLeaderboardReportResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**leaderboard_report** | [**V1LeaderboardReport**](V1LeaderboardReport.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_get_leaderboard_report_response import V1GetLeaderboardReportResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1GetLeaderboardReportResponse from a JSON string
|
|
18
|
+
v1_get_leaderboard_report_response_instance = V1GetLeaderboardReportResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1GetLeaderboardReportResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_get_leaderboard_report_response_dict = v1_get_leaderboard_report_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1GetLeaderboardReportResponse from a dict
|
|
25
|
+
v1_get_leaderboard_report_response_from_dict = V1GetLeaderboardReportResponse.from_dict(v1_get_leaderboard_report_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -12,6 +12,9 @@ Name | Type | Description | Notes
|
|
|
12
12
|
**h2o_gpte_allowlist** | **List[str]** | Allowlist of H2OGPTe models for UI that can be used in Eval Studio. E.g. gpt-35-turbo-1106, h2oai/h2ogpt-4096-llama2-13b-chat, h2oai/h2ogpt-4096-llama2-70b-chat-4bit, HuggingFaceH4/zephyr-7b-beta, h2oai/h2ogpt-gm-7b-mistral-chat-sft-dpo-v1, h2oai/h2ogpt-gm-experimental. | [optional]
|
|
13
13
|
**h2o_gpte_client_version** | **str** | The version of h2oGPTe client used by the workers. | [optional]
|
|
14
14
|
**h2o_sonar_version** | **str** | The version of H2O Sonar used by the workers. | [optional]
|
|
15
|
+
**preferred_llms_for_test_generation** | **List[str]** | Ordered list of LLMs preferred for test generation. The value might be a regular expression. | [optional]
|
|
16
|
+
**h2o_cloud_url** | **str** | The URL for the H2O Cloud host. | [optional]
|
|
17
|
+
**public_instance** | **bool** | If the Eval Studio instance is public. | [optional]
|
|
15
18
|
|
|
16
19
|
## Example
|
|
17
20
|
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1InitWorkflowNodeResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**node** | [**V1WorkflowNode**](V1WorkflowNode.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_init_workflow_node_response import V1InitWorkflowNodeResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1InitWorkflowNodeResponse from a JSON string
|
|
18
|
+
v1_init_workflow_node_response_instance = V1InitWorkflowNodeResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1InitWorkflowNodeResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_init_workflow_node_response_dict = v1_init_workflow_node_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1InitWorkflowNodeResponse from a dict
|
|
25
|
+
v1_init_workflow_node_response_from_dict = V1InitWorkflowNodeResponse.from_dict(v1_init_workflow_node_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# V1LeaderboardReport
|
|
2
|
+
|
|
3
|
+
LeaderboardReport represents the leaderboard report which is formed by the results, models and evaluator.
|
|
4
|
+
|
|
5
|
+
## Properties
|
|
6
|
+
|
|
7
|
+
Name | Type | Description | Notes
|
|
8
|
+
------------ | ------------- | ------------- | -------------
|
|
9
|
+
**results** | [**List[V1LeaderboardReportResult]**](V1LeaderboardReportResult.md) | Output only. List of per test case results. | [optional] [readonly]
|
|
10
|
+
**models** | [**List[V1LeaderboardReportModel]**](V1LeaderboardReportModel.md) | Output only. List of models which were used to create the results. | [optional] [readonly]
|
|
11
|
+
**evaluator** | [**V1LeaderboardReportEvaluator**](V1LeaderboardReportEvaluator.md) | | [optional]
|
|
12
|
+
|
|
13
|
+
## Example
|
|
14
|
+
|
|
15
|
+
```python
|
|
16
|
+
from eval_studio_client.api.models.v1_leaderboard_report import V1LeaderboardReport
|
|
17
|
+
|
|
18
|
+
# TODO update the JSON string below
|
|
19
|
+
json = "{}"
|
|
20
|
+
# create an instance of V1LeaderboardReport from a JSON string
|
|
21
|
+
v1_leaderboard_report_instance = V1LeaderboardReport.from_json(json)
|
|
22
|
+
# print the JSON string representation of the object
|
|
23
|
+
print(V1LeaderboardReport.to_json())
|
|
24
|
+
|
|
25
|
+
# convert the object into a dict
|
|
26
|
+
v1_leaderboard_report_dict = v1_leaderboard_report_instance.to_dict()
|
|
27
|
+
# create an instance of V1LeaderboardReport from a dict
|
|
28
|
+
v1_leaderboard_report_from_dict = V1LeaderboardReport.from_dict(v1_leaderboard_report_dict)
|
|
29
|
+
```
|
|
30
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
31
|
+
|
|
32
|
+
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# V1LeaderboardReportActualOutputData
|
|
2
|
+
|
|
3
|
+
ActualOutputData represents the actual output data.
|
|
4
|
+
|
|
5
|
+
## Properties
|
|
6
|
+
|
|
7
|
+
Name | Type | Description | Notes
|
|
8
|
+
------------ | ------------- | ------------- | -------------
|
|
9
|
+
**text** | **str** | Output only. Text fragment. | [optional] [readonly]
|
|
10
|
+
**metrics** | **object** | Output only. Metrics parsed as string to Value map. | [optional] [readonly]
|
|
11
|
+
|
|
12
|
+
## Example
|
|
13
|
+
|
|
14
|
+
```python
|
|
15
|
+
from eval_studio_client.api.models.v1_leaderboard_report_actual_output_data import V1LeaderboardReportActualOutputData
|
|
16
|
+
|
|
17
|
+
# TODO update the JSON string below
|
|
18
|
+
json = "{}"
|
|
19
|
+
# create an instance of V1LeaderboardReportActualOutputData from a JSON string
|
|
20
|
+
v1_leaderboard_report_actual_output_data_instance = V1LeaderboardReportActualOutputData.from_json(json)
|
|
21
|
+
# print the JSON string representation of the object
|
|
22
|
+
print(V1LeaderboardReportActualOutputData.to_json())
|
|
23
|
+
|
|
24
|
+
# convert the object into a dict
|
|
25
|
+
v1_leaderboard_report_actual_output_data_dict = v1_leaderboard_report_actual_output_data_instance.to_dict()
|
|
26
|
+
# create an instance of V1LeaderboardReportActualOutputData from a dict
|
|
27
|
+
v1_leaderboard_report_actual_output_data_from_dict = V1LeaderboardReportActualOutputData.from_dict(v1_leaderboard_report_actual_output_data_dict)
|
|
28
|
+
```
|
|
29
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
30
|
+
|
|
31
|
+
|