eval-studio-client 1.0.1__py3-none-any.whl → 1.1.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +36 -1
- eval_studio_client/api/api/__init__.py +4 -0
- eval_studio_client/api/api/adversarial_inputs_service_api.py +321 -0
- eval_studio_client/api/api/dashboard_service_api.py +1 -1
- eval_studio_client/api/api/document_service_api.py +1 -1
- eval_studio_client/api/api/evaluation_service_api.py +1 -1
- eval_studio_client/api/api/evaluator_service_api.py +1 -1
- eval_studio_client/api/api/generated_questions_validation_service_api.py +321 -0
- eval_studio_client/api/api/human_calibration_service_api.py +1 -1
- eval_studio_client/api/api/info_service_api.py +1 -1
- eval_studio_client/api/api/leaderboard_report_service_api.py +292 -0
- eval_studio_client/api/api/leaderboard_service_api.py +17 -17
- eval_studio_client/api/api/model_service_api.py +17 -17
- eval_studio_client/api/api/operation_progress_service_api.py +1 -1
- eval_studio_client/api/api/operation_service_api.py +272 -17
- eval_studio_client/api/api/perturbation_service_api.py +1 -1
- eval_studio_client/api/api/perturbator_service_api.py +17 -17
- eval_studio_client/api/api/prompt_generation_service_api.py +1 -1
- eval_studio_client/api/api/prompt_library_service_api.py +1 -1
- eval_studio_client/api/api/test_case_relationship_service_api.py +292 -0
- eval_studio_client/api/api/test_case_service_api.py +17 -17
- eval_studio_client/api/api/test_class_service_api.py +17 -17
- eval_studio_client/api/api/test_lab_service_api.py +1 -1
- eval_studio_client/api/api/test_service_api.py +585 -17
- eval_studio_client/api/api/who_am_i_service_api.py +1 -1
- eval_studio_client/api/api/workflow_edge_service_api.py +541 -2
- eval_studio_client/api/api/workflow_node_service_api.py +923 -126
- eval_studio_client/api/api/workflow_service_api.py +317 -33
- eval_studio_client/api/api_client.py +1 -1
- eval_studio_client/api/configuration.py +1 -1
- eval_studio_client/api/docs/AdversarialInputsServiceApi.md +78 -0
- eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +45 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceApi.md +78 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.md +30 -0
- eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +5 -5
- eval_studio_client/api/docs/ModelServiceApi.md +5 -5
- eval_studio_client/api/docs/OperationServiceApi.md +72 -5
- eval_studio_client/api/docs/PerturbatorServiceApi.md +5 -5
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +2 -1
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +2 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +3 -0
- eval_studio_client/api/docs/TestCaseRelationshipServiceApi.md +75 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +5 -5
- eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
- eval_studio_client/api/docs/TestServiceApi.md +145 -5
- eval_studio_client/api/docs/TestServiceCloneTestRequest.md +30 -0
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +3 -2
- eval_studio_client/api/docs/TestServicePerturbTestInPlaceRequest.md +30 -0
- eval_studio_client/api/docs/V1AbortOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneTestResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Context.md +32 -0
- eval_studio_client/api/docs/V1CreateWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1GeneratedTestCase.md +30 -0
- eval_studio_client/api/docs/V1GetLeaderboardReportResponse.md +29 -0
- eval_studio_client/api/docs/V1Info.md +3 -0
- eval_studio_client/api/docs/V1InitWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1LeaderboardReport.md +32 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputData.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluator.md +42 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluatorParameter.md +38 -0
- eval_studio_client/api/docs/V1LeaderboardReportExplanation.md +34 -0
- eval_studio_client/api/docs/V1LeaderboardReportMetricsMetaEntry.md +41 -0
- eval_studio_client/api/docs/V1LeaderboardReportModel.md +39 -0
- eval_studio_client/api/docs/V1LeaderboardReportResult.md +45 -0
- eval_studio_client/api/docs/V1LeaderboardReportResultRelationship.md +32 -0
- eval_studio_client/api/docs/V1ListTestCaseRelationshipsResponse.md +29 -0
- eval_studio_client/api/docs/V1MetricScore.md +31 -0
- eval_studio_client/api/docs/V1MetricScores.md +29 -0
- eval_studio_client/api/docs/V1PerturbTestInPlaceResponse.md +29 -0
- eval_studio_client/api/docs/V1RepeatedString.md +29 -0
- eval_studio_client/api/docs/V1ResetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1TestCase.md +2 -0
- eval_studio_client/api/docs/V1Workflow.md +3 -0
- eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +139 -0
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +221 -12
- eval_studio_client/api/docs/WorkflowServiceApi.md +81 -10
- eval_studio_client/api/docs/WorkflowServiceCloneWorkflowRequest.md +33 -0
- eval_studio_client/api/exceptions.py +1 -1
- eval_studio_client/api/models/__init__.py +32 -1
- eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +143 -0
- eval_studio_client/api/models/generated_questions_validation_service_validate_generated_questions_request.py +97 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +1 -1
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +13 -4
- eval_studio_client/api/models/protobuf_any.py +1 -1
- eval_studio_client/api/models/protobuf_null_value.py +1 -1
- eval_studio_client/api/models/required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_document_to_update.py +1 -1
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_model_to_update.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_case_to_update.py +10 -3
- eval_studio_client/api/models/required_the_test_to_update.py +1 -1
- eval_studio_client/api/models/required_the_updated_workflow.py +11 -3
- eval_studio_client/api/models/required_the_updated_workflow_node.py +1 -1
- eval_studio_client/api/models/rpc_status.py +1 -1
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/models/test_service_clone_test_request.py +89 -0
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +7 -5
- eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +1 -1
- eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +1 -1
- eval_studio_client/api/models/test_service_perturb_test_in_place_request.py +97 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +1 -1
- eval_studio_client/api/models/v1_abort_operation_response.py +91 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/models/v1_check_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_clone_test_response.py +91 -0
- eval_studio_client/api/models/v1_clone_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_collection_info.py +1 -1
- eval_studio_client/api/models/v1_context.py +93 -0
- eval_studio_client/api/models/v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_document_response.py +1 -1
- eval_studio_client/api/models/v1_create_evaluation_request.py +1 -1
- eval_studio_client/api/models/v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/models/v1_create_model_response.py +1 -1
- eval_studio_client/api/models/v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_response.py +1 -1
- eval_studio_client/api/models/v1_create_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_dashboard.py +1 -1
- eval_studio_client/api/models/v1_dashboard_status.py +1 -1
- eval_studio_client/api/models/v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_document_response.py +1 -1
- eval_studio_client/api/models/v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_model_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_document.py +1 -1
- eval_studio_client/api/models/v1_estimate_threshold_request.py +1 -1
- eval_studio_client/api/models/v1_evaluation_test.py +1 -1
- eval_studio_client/api/models/v1_evaluator.py +1 -1
- eval_studio_client/api/models/v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/models/v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/models/v1_evaluator_view.py +1 -1
- eval_studio_client/api/models/v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/models/v1_find_all_test_cases_by_id_response.py +1 -1
- eval_studio_client/api/models/v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_generated_test_case.py +101 -0
- eval_studio_client/api/models/v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_document_response.py +1 -1
- eval_studio_client/api/models/v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_get_info_response.py +1 -1
- eval_studio_client/api/models/v1_get_leaderboard_report_response.py +91 -0
- eval_studio_client/api/models/v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_model_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_response.py +1 -1
- eval_studio_client/api/models/v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_class_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_import_evaluation_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_request.py +1 -1
- eval_studio_client/api/models/v1_info.py +10 -4
- eval_studio_client/api/models/v1_init_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_insight.py +1 -1
- eval_studio_client/api/models/v1_labeled_test_case.py +1 -1
- eval_studio_client/api/models/v1_leaderboard.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_report.py +115 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_data.py +93 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +101 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator.py +155 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator_parameter.py +109 -0
- eval_studio_client/api/models/v1_leaderboard_report_explanation.py +103 -0
- eval_studio_client/api/models/v1_leaderboard_report_metrics_meta_entry.py +129 -0
- eval_studio_client/api/models/v1_leaderboard_report_model.py +121 -0
- eval_studio_client/api/models/v1_leaderboard_report_result.py +175 -0
- eval_studio_client/api/models/v1_leaderboard_report_result_relationship.py +97 -0
- eval_studio_client/api/models/v1_leaderboard_status.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_type.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_view.py +1 -1
- eval_studio_client/api/models/v1_list_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_documents_response.py +1 -1
- eval_studio_client/api/models/v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_operations_response.py +1 -1
- eval_studio_client/api/models/v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/models/v1_list_prompt_library_items_response.py +1 -1
- eval_studio_client/api/models/v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_library_items_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_relationships_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/models/v1_list_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_workflows_response.py +1 -1
- eval_studio_client/api/models/v1_metric_score.py +89 -0
- eval_studio_client/api/models/v1_metric_scores.py +95 -0
- eval_studio_client/api/models/v1_model.py +1 -1
- eval_studio_client/api/models/v1_model_type.py +1 -1
- eval_studio_client/api/models/v1_operation.py +1 -1
- eval_studio_client/api/models/v1_operation_progress.py +1 -1
- eval_studio_client/api/models/v1_perturb_test_in_place_response.py +91 -0
- eval_studio_client/api/models/v1_perturb_test_response.py +1 -1
- eval_studio_client/api/models/v1_perturbator.py +1 -1
- eval_studio_client/api/models/v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/models/v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/models/v1_problem_and_action.py +1 -1
- eval_studio_client/api/models/v1_process_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_prompt_library_item.py +1 -1
- eval_studio_client/api/models/v1_repeated_string.py +87 -0
- eval_studio_client/api/models/v1_reset_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_test.py +1 -1
- eval_studio_client/api/models/v1_test_case.py +10 -3
- eval_studio_client/api/models/v1_test_case_relationship.py +1 -1
- eval_studio_client/api/models/v1_test_cases_generator.py +1 -1
- eval_studio_client/api/models/v1_test_class.py +1 -1
- eval_studio_client/api/models/v1_test_class_type.py +1 -1
- eval_studio_client/api/models/v1_test_lab.py +1 -1
- eval_studio_client/api/models/v1_test_suite_evaluates.py +1 -1
- eval_studio_client/api/models/v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_document_response.py +1 -1
- eval_studio_client/api/models/v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_model_response.py +1 -1
- eval_studio_client/api/models/v1_update_operation_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_who_am_i_response.py +1 -1
- eval_studio_client/api/models/v1_workflow.py +11 -3
- eval_studio_client/api/models/v1_workflow_edge.py +1 -1
- eval_studio_client/api/models/v1_workflow_edge_type.py +1 -1
- eval_studio_client/api/models/v1_workflow_node.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_artifact.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_artifacts.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_attributes.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_status.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_type.py +4 -1
- eval_studio_client/api/models/v1_workflow_node_view.py +1 -1
- eval_studio_client/api/models/v1_workflow_type.py +1 -1
- eval_studio_client/api/models/workflow_service_clone_workflow_request.py +95 -0
- eval_studio_client/api/rest.py +1 -1
- eval_studio_client/api/test/test_adversarial_inputs_service_api.py +37 -0
- eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +128 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
- eval_studio_client/api/test/test_document_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
- eval_studio_client/api/test/test_generated_questions_validation_service_api.py +37 -0
- eval_studio_client/api/test/test_generated_questions_validation_service_validate_generated_questions_request.py +83 -0
- eval_studio_client/api/test/test_human_calibration_service_api.py +1 -1
- eval_studio_client/api/test/test_info_service_api.py +1 -1
- eval_studio_client/api/test/test_leaderboard_report_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
- eval_studio_client/api/test/test_model_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_service_api.py +7 -1
- eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +6 -2
- eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +9 -4
- eval_studio_client/api/test/test_prompt_library_service_api.py +1 -1
- eval_studio_client/api/test/test_protobuf_any.py +1 -1
- eval_studio_client/api/test/test_protobuf_null_value.py +1 -1
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +6 -2
- eval_studio_client/api/test/test_required_the_test_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_updated_workflow.py +5 -2
- eval_studio_client/api/test/test_required_the_updated_workflow_node.py +1 -1
- eval_studio_client/api/test/test_rpc_status.py +1 -1
- eval_studio_client/api/test/test_test_case_relationship_service_api.py +37 -0
- eval_studio_client/api/test/test_test_case_service_api.py +1 -1
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_test_class_service_api.py +1 -1
- eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
- eval_studio_client/api/test/test_test_service_api.py +13 -1
- eval_studio_client/api/test/test_test_service_clone_test_request.py +52 -0
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +4 -1
- eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +1 -1
- eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +1 -1
- eval_studio_client/api/test/test_test_service_perturb_test_in_place_request.py +59 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +1 -1
- eval_studio_client/api/test/test_v1_abort_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +6 -2
- eval_studio_client/api/test/test_v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +5 -2
- eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_check_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_clone_test_response.py +67 -0
- eval_studio_client/api/test/test_v1_clone_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_collection_info.py +1 -1
- eval_studio_client/api/test/test_v1_context.py +54 -0
- eval_studio_client/api/test/test_v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +6 -2
- eval_studio_client/api/test/test_v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_create_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_create_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_dashboard.py +1 -1
- eval_studio_client/api/test/test_v1_dashboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_delete_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_document.py +1 -1
- eval_studio_client/api/test/test_v1_estimate_threshold_request.py +1 -1
- eval_studio_client/api/test/test_v1_evaluation_test.py +6 -2
- eval_studio_client/api/test/test_v1_evaluator.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_view.py +1 -1
- eval_studio_client/api/test/test_v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +6 -2
- eval_studio_client/api/test/test_v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/test/test_v1_generated_test_case.py +79 -0
- eval_studio_client/api/test/test_v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_info_response.py +7 -2
- eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +175 -0
- eval_studio_client/api/test/test_v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_get_test_class_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_import_evaluation_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_v1_info.py +7 -2
- eval_studio_client/api/test/test_v1_init_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_insight.py +1 -1
- eval_studio_client/api/test/test_v1_labeled_test_case.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_report.py +174 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_data.py +52 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +56 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator.py +114 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator_parameter.py +63 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_explanation.py +58 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_metrics_meta_entry.py +66 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_model.py +62 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result.py +92 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result_relationship.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_type.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_view.py +1 -1
- eval_studio_client/api/test/test_v1_list_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_relationships_response.py +56 -0
- eval_studio_client/api/test/test_v1_list_test_cases_response.py +6 -2
- eval_studio_client/api/test/test_v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_workflows_response.py +5 -2
- eval_studio_client/api/test/test_v1_metric_score.py +52 -0
- eval_studio_client/api/test/test_v1_metric_scores.py +55 -0
- eval_studio_client/api/test/test_v1_model.py +1 -1
- eval_studio_client/api/test/test_v1_model_type.py +1 -1
- eval_studio_client/api/test/test_v1_operation.py +1 -1
- eval_studio_client/api/test/test_v1_operation_progress.py +1 -1
- eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +67 -0
- eval_studio_client/api/test/test_v1_perturb_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/test/test_v1_problem_and_action.py +1 -1
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_prompt_library_item.py +1 -1
- eval_studio_client/api/test/test_v1_repeated_string.py +53 -0
- eval_studio_client/api/test/test_v1_reset_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_test.py +1 -1
- eval_studio_client/api/test/test_v1_test_case.py +6 -2
- eval_studio_client/api/test/test_v1_test_case_relationship.py +1 -1
- eval_studio_client/api/test/test_v1_test_cases_generator.py +1 -1
- eval_studio_client/api/test/test_v1_test_class.py +1 -1
- eval_studio_client/api/test/test_v1_test_class_type.py +1 -1
- eval_studio_client/api/test/test_v1_test_lab.py +1 -1
- eval_studio_client/api/test/test_v1_test_suite_evaluates.py +1 -1
- eval_studio_client/api/test/test_v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_update_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_who_am_i_response.py +1 -1
- eval_studio_client/api/test/test_v1_workflow.py +5 -2
- eval_studio_client/api/test/test_v1_workflow_edge.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_edge_type.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_artifact.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_attributes.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_status.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_type.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_view.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_type.py +1 -1
- eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
- eval_studio_client/api/test/test_workflow_edge_service_api.py +15 -1
- eval_studio_client/api/test/test_workflow_node_service_api.py +23 -2
- eval_studio_client/api/test/test_workflow_service_api.py +8 -1
- eval_studio_client/api/test/test_workflow_service_clone_workflow_request.py +55 -0
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +1633 -219
- eval_studio_client/tests.py +103 -8
- {eval_studio_client-1.0.1.dist-info → eval_studio_client-1.1.0a5.dist-info}/METADATA +2 -2
- eval_studio_client-1.1.0a5.dist-info/RECORD +720 -0
- {eval_studio_client-1.0.1.dist-info → eval_studio_client-1.1.0a5.dist-info}/WHEEL +1 -1
- eval_studio_client-1.0.1.dist-info/RECORD +0 -615
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1_metric_scores import V1MetricScores
|
|
23
|
+
from eval_studio_client.api.models.v1_model import V1Model
|
|
24
|
+
from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
|
|
25
|
+
from typing import Optional, Set
|
|
26
|
+
from typing_extensions import Self
|
|
27
|
+
|
|
28
|
+
class AdversarialInputsServiceTestAdversarialInputsRobustnessRequest(BaseModel):
|
|
29
|
+
"""
|
|
30
|
+
AdversarialInputsServiceTestAdversarialInputsRobustnessRequest
|
|
31
|
+
""" # noqa: E501
|
|
32
|
+
operation: Optional[StrictStr] = Field(default=None, description="Required. The Operation processing adversarial inputs robustness testing.")
|
|
33
|
+
generator_input_types: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. The list of adversarial input types to generate.", alias="generatorInputTypes")
|
|
34
|
+
generator_document_urls: Optional[List[StrictStr]] = Field(default=None, description="Required. The document URLs which were used to generate the baseline TestCases.", alias="generatorDocumentUrls")
|
|
35
|
+
generator_model: Optional[V1Model] = Field(default=None, alias="generatorModel")
|
|
36
|
+
generator_base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to use for generation of adversarial the prompts.", alias="generatorBaseLlmModel")
|
|
37
|
+
generator_count: Optional[StrictInt] = Field(default=None, description="Required. The number of adversarial TestCases to generate.", alias="generatorCount")
|
|
38
|
+
generator_topics: Optional[List[StrictStr]] = Field(default=None, description="Optional. Topics to generate questions for. If not specified, use document summarization as topic generation.", alias="generatorTopics")
|
|
39
|
+
generator_chunks: Optional[List[StrictStr]] = Field(default=None, description="Optional. The list of chunks to use for generation. If set, the Documents assigned to the Test and h2ogpte_collection_id are ignored.", alias="generatorChunks")
|
|
40
|
+
generator_h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. ID of the h2oGPTe collection to use. If provided, documents referenced by Test and any specified chunks are ignored. This field is required if Test does not reference any documents and no chunks are provided. If this field is left empty, a temporary collection will be created.", alias="generatorH2ogpteCollectionId")
|
|
41
|
+
evaluator_identifiers: Optional[List[StrictStr]] = Field(default=None, description="Required. Evaluator identifiers to use for the model evaluation using the adversarial inputs.", alias="evaluatorIdentifiers")
|
|
42
|
+
evaluators_parameters: Optional[Dict[str, StrictStr]] = Field(default=None, description="Optional. Additional evaluators configuration, for all the evaluators used in the evaluation. Key is the evaluator identifier, and the value is a JSON string containing the configuration dictionary.", alias="evaluatorsParameters")
|
|
43
|
+
model: Optional[V1Model] = None
|
|
44
|
+
base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to be evaluated using the adversarial inputs.", alias="baseLlmModel")
|
|
45
|
+
model_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Parameters overrides for the Model host in JSON format.", alias="modelParameters")
|
|
46
|
+
default_h2ogpte_model: Optional[V1Model] = Field(default=None, alias="defaultH2ogpteModel")
|
|
47
|
+
baseline_eval: Optional[StrictStr] = Field(default=None, description="Required. Baseline evaluation name.", alias="baselineEval")
|
|
48
|
+
baseline_metrics: Optional[Dict[str, V1MetricScores]] = Field(default=None, description="Required. Map of baseline metrics from the evaluator to the metric scores for the evaluator.", alias="baselineMetrics")
|
|
49
|
+
__properties: ClassVar[List[str]] = ["operation", "generatorInputTypes", "generatorDocumentUrls", "generatorModel", "generatorBaseLlmModel", "generatorCount", "generatorTopics", "generatorChunks", "generatorH2ogpteCollectionId", "evaluatorIdentifiers", "evaluatorsParameters", "model", "baseLlmModel", "modelParameters", "defaultH2ogpteModel", "baselineEval", "baselineMetrics"]
|
|
50
|
+
|
|
51
|
+
model_config = ConfigDict(
|
|
52
|
+
populate_by_name=True,
|
|
53
|
+
validate_assignment=True,
|
|
54
|
+
protected_namespaces=(),
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def to_str(self) -> str:
|
|
59
|
+
"""Returns the string representation of the model using alias"""
|
|
60
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
61
|
+
|
|
62
|
+
def to_json(self) -> str:
|
|
63
|
+
"""Returns the JSON representation of the model using alias"""
|
|
64
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
65
|
+
return json.dumps(self.to_dict())
|
|
66
|
+
|
|
67
|
+
@classmethod
|
|
68
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
69
|
+
"""Create an instance of AdversarialInputsServiceTestAdversarialInputsRobustnessRequest from a JSON string"""
|
|
70
|
+
return cls.from_dict(json.loads(json_str))
|
|
71
|
+
|
|
72
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
73
|
+
"""Return the dictionary representation of the model using alias.
|
|
74
|
+
|
|
75
|
+
This has the following differences from calling pydantic's
|
|
76
|
+
`self.model_dump(by_alias=True)`:
|
|
77
|
+
|
|
78
|
+
* `None` is only added to the output dict for nullable fields that
|
|
79
|
+
were set at model initialization. Other fields with value `None`
|
|
80
|
+
are ignored.
|
|
81
|
+
"""
|
|
82
|
+
excluded_fields: Set[str] = set([
|
|
83
|
+
])
|
|
84
|
+
|
|
85
|
+
_dict = self.model_dump(
|
|
86
|
+
by_alias=True,
|
|
87
|
+
exclude=excluded_fields,
|
|
88
|
+
exclude_none=True,
|
|
89
|
+
)
|
|
90
|
+
# override the default output from pydantic by calling `to_dict()` of generator_model
|
|
91
|
+
if self.generator_model:
|
|
92
|
+
_dict['generatorModel'] = self.generator_model.to_dict()
|
|
93
|
+
# override the default output from pydantic by calling `to_dict()` of model
|
|
94
|
+
if self.model:
|
|
95
|
+
_dict['model'] = self.model.to_dict()
|
|
96
|
+
# override the default output from pydantic by calling `to_dict()` of default_h2ogpte_model
|
|
97
|
+
if self.default_h2ogpte_model:
|
|
98
|
+
_dict['defaultH2ogpteModel'] = self.default_h2ogpte_model.to_dict()
|
|
99
|
+
# override the default output from pydantic by calling `to_dict()` of each value in baseline_metrics (dict)
|
|
100
|
+
_field_dict = {}
|
|
101
|
+
if self.baseline_metrics:
|
|
102
|
+
for _key in self.baseline_metrics:
|
|
103
|
+
if self.baseline_metrics[_key]:
|
|
104
|
+
_field_dict[_key] = self.baseline_metrics[_key].to_dict()
|
|
105
|
+
_dict['baselineMetrics'] = _field_dict
|
|
106
|
+
return _dict
|
|
107
|
+
|
|
108
|
+
@classmethod
|
|
109
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
110
|
+
"""Create an instance of AdversarialInputsServiceTestAdversarialInputsRobustnessRequest from a dict"""
|
|
111
|
+
if obj is None:
|
|
112
|
+
return None
|
|
113
|
+
|
|
114
|
+
if not isinstance(obj, dict):
|
|
115
|
+
return cls.model_validate(obj)
|
|
116
|
+
|
|
117
|
+
_obj = cls.model_validate({
|
|
118
|
+
"operation": obj.get("operation"),
|
|
119
|
+
"generatorInputTypes": obj.get("generatorInputTypes"),
|
|
120
|
+
"generatorDocumentUrls": obj.get("generatorDocumentUrls"),
|
|
121
|
+
"generatorModel": V1Model.from_dict(obj["generatorModel"]) if obj.get("generatorModel") is not None else None,
|
|
122
|
+
"generatorBaseLlmModel": obj.get("generatorBaseLlmModel"),
|
|
123
|
+
"generatorCount": obj.get("generatorCount"),
|
|
124
|
+
"generatorTopics": obj.get("generatorTopics"),
|
|
125
|
+
"generatorChunks": obj.get("generatorChunks"),
|
|
126
|
+
"generatorH2ogpteCollectionId": obj.get("generatorH2ogpteCollectionId"),
|
|
127
|
+
"evaluatorIdentifiers": obj.get("evaluatorIdentifiers"),
|
|
128
|
+
"evaluatorsParameters": obj.get("evaluatorsParameters"),
|
|
129
|
+
"model": V1Model.from_dict(obj["model"]) if obj.get("model") is not None else None,
|
|
130
|
+
"baseLlmModel": obj.get("baseLlmModel"),
|
|
131
|
+
"modelParameters": obj.get("modelParameters"),
|
|
132
|
+
"defaultH2ogpteModel": V1Model.from_dict(obj["defaultH2ogpteModel"]) if obj.get("defaultH2ogpteModel") is not None else None,
|
|
133
|
+
"baselineEval": obj.get("baselineEval"),
|
|
134
|
+
"baselineMetrics": dict(
|
|
135
|
+
(_k, V1MetricScores.from_dict(_v))
|
|
136
|
+
for _k, _v in obj["baselineMetrics"].items()
|
|
137
|
+
)
|
|
138
|
+
if obj.get("baselineMetrics") is not None
|
|
139
|
+
else None
|
|
140
|
+
})
|
|
141
|
+
return _obj
|
|
142
|
+
|
|
143
|
+
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1_generated_test_case import V1GeneratedTestCase
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
operation: Optional[StrictStr] = Field(default=None, description="Required. The Operation processing this question validation process.")
|
|
31
|
+
test_cases: Optional[List[V1GeneratedTestCase]] = Field(default=None, description="Required. Generated Test Cases, i.e., Test cases with context that was used for their generation.", alias="testCases")
|
|
32
|
+
__properties: ClassVar[List[str]] = ["operation", "testCases"]
|
|
33
|
+
|
|
34
|
+
model_config = ConfigDict(
|
|
35
|
+
populate_by_name=True,
|
|
36
|
+
validate_assignment=True,
|
|
37
|
+
protected_namespaces=(),
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def to_str(self) -> str:
|
|
42
|
+
"""Returns the string representation of the model using alias"""
|
|
43
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
44
|
+
|
|
45
|
+
def to_json(self) -> str:
|
|
46
|
+
"""Returns the JSON representation of the model using alias"""
|
|
47
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
48
|
+
return json.dumps(self.to_dict())
|
|
49
|
+
|
|
50
|
+
@classmethod
|
|
51
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
52
|
+
"""Create an instance of GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest from a JSON string"""
|
|
53
|
+
return cls.from_dict(json.loads(json_str))
|
|
54
|
+
|
|
55
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
56
|
+
"""Return the dictionary representation of the model using alias.
|
|
57
|
+
|
|
58
|
+
This has the following differences from calling pydantic's
|
|
59
|
+
`self.model_dump(by_alias=True)`:
|
|
60
|
+
|
|
61
|
+
* `None` is only added to the output dict for nullable fields that
|
|
62
|
+
were set at model initialization. Other fields with value `None`
|
|
63
|
+
are ignored.
|
|
64
|
+
"""
|
|
65
|
+
excluded_fields: Set[str] = set([
|
|
66
|
+
])
|
|
67
|
+
|
|
68
|
+
_dict = self.model_dump(
|
|
69
|
+
by_alias=True,
|
|
70
|
+
exclude=excluded_fields,
|
|
71
|
+
exclude_none=True,
|
|
72
|
+
)
|
|
73
|
+
# override the default output from pydantic by calling `to_dict()` of each item in test_cases (list)
|
|
74
|
+
_items = []
|
|
75
|
+
if self.test_cases:
|
|
76
|
+
for _item in self.test_cases:
|
|
77
|
+
if _item:
|
|
78
|
+
_items.append(_item.to_dict())
|
|
79
|
+
_dict['testCases'] = _items
|
|
80
|
+
return _dict
|
|
81
|
+
|
|
82
|
+
@classmethod
|
|
83
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
84
|
+
"""Create an instance of GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest from a dict"""
|
|
85
|
+
if obj is None:
|
|
86
|
+
return None
|
|
87
|
+
|
|
88
|
+
if not isinstance(obj, dict):
|
|
89
|
+
return cls.model_validate(obj)
|
|
90
|
+
|
|
91
|
+
_obj = cls.model_validate({
|
|
92
|
+
"operation": obj.get("operation"),
|
|
93
|
+
"testCases": [V1GeneratedTestCase.from_dict(_item) for _item in obj["testCases"]] if obj.get("testCases") is not None else None
|
|
94
|
+
})
|
|
95
|
+
return _obj
|
|
96
|
+
|
|
97
|
+
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/v1/
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -20,6 +20,7 @@ import json
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
22
|
from eval_studio_client.api.models.v1_model import V1Model
|
|
23
|
+
from eval_studio_client.api.models.v1_repeated_string import V1RepeatedString
|
|
23
24
|
from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
|
|
24
25
|
from typing import Optional, Set
|
|
25
26
|
from typing_extensions import Self
|
|
@@ -32,11 +33,12 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
|
|
|
32
33
|
model: Optional[V1Model] = None
|
|
33
34
|
count: Optional[StrictInt] = Field(default=None, description="Required. The number of TestCases to generate.")
|
|
34
35
|
base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to use for generating the prompts.", alias="baseLlmModel")
|
|
35
|
-
document_urls: Optional[
|
|
36
|
+
document_urls: Optional[V1RepeatedString] = Field(default=None, alias="documentUrls")
|
|
37
|
+
chunks: Optional[V1RepeatedString] = None
|
|
36
38
|
generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. Type of questions to generate TestCases for. If not specified, all types of questions are selected.")
|
|
37
39
|
h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. The ID of the h2oGPTe collection to use. If empty, new temporary collection will be created.", alias="h2ogpteCollectionId")
|
|
38
40
|
topics: Optional[List[StrictStr]] = Field(default=None, description="Optional. Topics to generate questions for. If not specified, use document summarization as topic generation.")
|
|
39
|
-
__properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "generators", "h2ogpteCollectionId", "topics"]
|
|
41
|
+
__properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "chunks", "generators", "h2ogpteCollectionId", "topics"]
|
|
40
42
|
|
|
41
43
|
model_config = ConfigDict(
|
|
42
44
|
populate_by_name=True,
|
|
@@ -80,6 +82,12 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
|
|
|
80
82
|
# override the default output from pydantic by calling `to_dict()` of model
|
|
81
83
|
if self.model:
|
|
82
84
|
_dict['model'] = self.model.to_dict()
|
|
85
|
+
# override the default output from pydantic by calling `to_dict()` of document_urls
|
|
86
|
+
if self.document_urls:
|
|
87
|
+
_dict['documentUrls'] = self.document_urls.to_dict()
|
|
88
|
+
# override the default output from pydantic by calling `to_dict()` of chunks
|
|
89
|
+
if self.chunks:
|
|
90
|
+
_dict['chunks'] = self.chunks.to_dict()
|
|
83
91
|
return _dict
|
|
84
92
|
|
|
85
93
|
@classmethod
|
|
@@ -96,7 +104,8 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
|
|
|
96
104
|
"model": V1Model.from_dict(obj["model"]) if obj.get("model") is not None else None,
|
|
97
105
|
"count": obj.get("count"),
|
|
98
106
|
"baseLlmModel": obj.get("baseLlmModel"),
|
|
99
|
-
"documentUrls": obj.get("documentUrls"),
|
|
107
|
+
"documentUrls": V1RepeatedString.from_dict(obj["documentUrls"]) if obj.get("documentUrls") is not None else None,
|
|
108
|
+
"chunks": V1RepeatedString.from_dict(obj["chunks"]) if obj.get("chunks") is not None else None,
|
|
100
109
|
"generators": obj.get("generators"),
|
|
101
110
|
"h2ogpteCollectionId": obj.get("h2ogpteCollectionId"),
|
|
102
111
|
"topics": obj.get("topics")
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/v1/
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -20,6 +20,7 @@ import json
|
|
|
20
20
|
from datetime import datetime
|
|
21
21
|
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
22
22
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
23
|
+
from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
|
|
23
24
|
from typing import Optional, Set
|
|
24
25
|
from typing_extensions import Self
|
|
25
26
|
|
|
@@ -39,7 +40,9 @@ class RequiredTheTestCaseToUpdate(BaseModel):
|
|
|
39
40
|
constraints: Optional[List[StrictStr]] = Field(default=None, description="Constraints on the model output.")
|
|
40
41
|
condition: Optional[StrictStr] = Field(default=None, description="Optional. Test case output condition, in a form of AIP-160 compliant filter expression.")
|
|
41
42
|
perturbed_by: Optional[List[StrictStr]] = Field(default=None, description="Output only. The list of perturbators applied to this test case.", alias="perturbedBy")
|
|
42
|
-
|
|
43
|
+
topics: Optional[List[StrictStr]] = Field(default=None, description="Output only. The list of topics used to generate this test case.")
|
|
44
|
+
generator: Optional[V1TestCasesGenerator] = None
|
|
45
|
+
__properties: ClassVar[List[str]] = ["createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "parent", "prompt", "answer", "constraints", "condition", "perturbedBy", "topics", "generator"]
|
|
43
46
|
|
|
44
47
|
model_config = ConfigDict(
|
|
45
48
|
populate_by_name=True,
|
|
@@ -78,6 +81,7 @@ class RequiredTheTestCaseToUpdate(BaseModel):
|
|
|
78
81
|
* OpenAPI `readOnly` fields are excluded.
|
|
79
82
|
* OpenAPI `readOnly` fields are excluded.
|
|
80
83
|
* OpenAPI `readOnly` fields are excluded.
|
|
84
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
81
85
|
"""
|
|
82
86
|
excluded_fields: Set[str] = set([
|
|
83
87
|
"create_time",
|
|
@@ -87,6 +91,7 @@ class RequiredTheTestCaseToUpdate(BaseModel):
|
|
|
87
91
|
"delete_time",
|
|
88
92
|
"deleter",
|
|
89
93
|
"perturbed_by",
|
|
94
|
+
"topics",
|
|
90
95
|
])
|
|
91
96
|
|
|
92
97
|
_dict = self.model_dump(
|
|
@@ -117,7 +122,9 @@ class RequiredTheTestCaseToUpdate(BaseModel):
|
|
|
117
122
|
"answer": obj.get("answer"),
|
|
118
123
|
"constraints": obj.get("constraints"),
|
|
119
124
|
"condition": obj.get("condition"),
|
|
120
|
-
"perturbedBy": obj.get("perturbedBy")
|
|
125
|
+
"perturbedBy": obj.get("perturbedBy"),
|
|
126
|
+
"topics": obj.get("topics"),
|
|
127
|
+
"generator": obj.get("generator")
|
|
121
128
|
})
|
|
122
129
|
return _obj
|
|
123
130
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/v1/
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -45,7 +45,10 @@ class RequiredTheUpdatedWorkflow(BaseModel):
|
|
|
45
45
|
output_artifacts: Optional[Dict[str, V1WorkflowNodeArtifacts]] = Field(default=None, description="Output only. Optional. List of the WorkflowNodeArtifacts produces by all the WorkflowNodes in the Workflow.", alias="outputArtifacts")
|
|
46
46
|
llm_model: Optional[StrictStr] = Field(default=None, description="Immutable. LLM Model to use.", alias="llmModel")
|
|
47
47
|
model_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Immutable. Model parameter overrides in JSON format.", alias="modelParameters")
|
|
48
|
-
|
|
48
|
+
document: Optional[StrictStr] = Field(default=None, description="The resource name of a Document.")
|
|
49
|
+
h2ogpte_collection: Optional[StrictStr] = Field(default=None, description="Existing h2oGPTe collection.", alias="h2ogpteCollection")
|
|
50
|
+
cloned_from_workflow: Optional[StrictStr] = Field(default=None, description="Optional. Output only. The Workflow that this Workflow was cloned from.", alias="clonedFromWorkflow")
|
|
51
|
+
__properties: ClassVar[List[str]] = ["displayName", "description", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "type", "model", "nodes", "edges", "outputs", "outputArtifacts", "llmModel", "modelParameters", "document", "h2ogpteCollection", "clonedFromWorkflow"]
|
|
49
52
|
|
|
50
53
|
model_config = ConfigDict(
|
|
51
54
|
populate_by_name=True,
|
|
@@ -87,6 +90,7 @@ class RequiredTheUpdatedWorkflow(BaseModel):
|
|
|
87
90
|
* OpenAPI `readOnly` fields are excluded.
|
|
88
91
|
* OpenAPI `readOnly` fields are excluded.
|
|
89
92
|
* OpenAPI `readOnly` fields are excluded.
|
|
93
|
+
* OpenAPI `readOnly` fields are excluded.
|
|
90
94
|
"""
|
|
91
95
|
excluded_fields: Set[str] = set([
|
|
92
96
|
"create_time",
|
|
@@ -99,6 +103,7 @@ class RequiredTheUpdatedWorkflow(BaseModel):
|
|
|
99
103
|
"edges",
|
|
100
104
|
"outputs",
|
|
101
105
|
"output_artifacts",
|
|
106
|
+
"cloned_from_workflow",
|
|
102
107
|
])
|
|
103
108
|
|
|
104
109
|
_dict = self.model_dump(
|
|
@@ -145,7 +150,10 @@ class RequiredTheUpdatedWorkflow(BaseModel):
|
|
|
145
150
|
if obj.get("outputArtifacts") is not None
|
|
146
151
|
else None,
|
|
147
152
|
"llmModel": obj.get("llmModel"),
|
|
148
|
-
"modelParameters": obj.get("modelParameters")
|
|
153
|
+
"modelParameters": obj.get("modelParameters"),
|
|
154
|
+
"document": obj.get("document"),
|
|
155
|
+
"h2ogpteCollection": obj.get("h2ogpteCollection"),
|
|
156
|
+
"clonedFromWorkflow": obj.get("clonedFromWorkflow")
|
|
149
157
|
})
|
|
150
158
|
return _obj
|
|
151
159
|
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class TestServiceCloneTestRequest(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
TestServiceCloneTestRequest
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
new_test_display_name: Optional[StrictStr] = Field(default=None, description="Optional. Name of the newly created test.", alias="newTestDisplayName")
|
|
30
|
+
new_test_description: Optional[StrictStr] = Field(default=None, description="Optional. Description of the newly created Test.", alias="newTestDescription")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["newTestDisplayName", "newTestDescription"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of TestServiceCloneTestRequest from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([
|
|
65
|
+
])
|
|
66
|
+
|
|
67
|
+
_dict = self.model_dump(
|
|
68
|
+
by_alias=True,
|
|
69
|
+
exclude=excluded_fields,
|
|
70
|
+
exclude_none=True,
|
|
71
|
+
)
|
|
72
|
+
return _dict
|
|
73
|
+
|
|
74
|
+
@classmethod
|
|
75
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
76
|
+
"""Create an instance of TestServiceCloneTestRequest from a dict"""
|
|
77
|
+
if obj is None:
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
if not isinstance(obj, dict):
|
|
81
|
+
return cls.model_validate(obj)
|
|
82
|
+
|
|
83
|
+
_obj = cls.model_validate({
|
|
84
|
+
"newTestDisplayName": obj.get("newTestDisplayName"),
|
|
85
|
+
"newTestDescription": obj.get("newTestDescription")
|
|
86
|
+
})
|
|
87
|
+
return _obj
|
|
88
|
+
|
|
89
|
+
|