eval-studio-client 1.0.1__py3-none-any.whl → 1.1.0a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +36 -1
- eval_studio_client/api/api/__init__.py +4 -0
- eval_studio_client/api/api/adversarial_inputs_service_api.py +321 -0
- eval_studio_client/api/api/dashboard_service_api.py +1 -1
- eval_studio_client/api/api/document_service_api.py +1 -1
- eval_studio_client/api/api/evaluation_service_api.py +1 -1
- eval_studio_client/api/api/evaluator_service_api.py +1 -1
- eval_studio_client/api/api/generated_questions_validation_service_api.py +321 -0
- eval_studio_client/api/api/human_calibration_service_api.py +1 -1
- eval_studio_client/api/api/info_service_api.py +1 -1
- eval_studio_client/api/api/leaderboard_report_service_api.py +292 -0
- eval_studio_client/api/api/leaderboard_service_api.py +17 -17
- eval_studio_client/api/api/model_service_api.py +17 -17
- eval_studio_client/api/api/operation_progress_service_api.py +1 -1
- eval_studio_client/api/api/operation_service_api.py +272 -17
- eval_studio_client/api/api/perturbation_service_api.py +1 -1
- eval_studio_client/api/api/perturbator_service_api.py +17 -17
- eval_studio_client/api/api/prompt_generation_service_api.py +1 -1
- eval_studio_client/api/api/prompt_library_service_api.py +1 -1
- eval_studio_client/api/api/test_case_relationship_service_api.py +292 -0
- eval_studio_client/api/api/test_case_service_api.py +17 -17
- eval_studio_client/api/api/test_class_service_api.py +17 -17
- eval_studio_client/api/api/test_lab_service_api.py +1 -1
- eval_studio_client/api/api/test_service_api.py +585 -17
- eval_studio_client/api/api/who_am_i_service_api.py +1 -1
- eval_studio_client/api/api/workflow_edge_service_api.py +541 -2
- eval_studio_client/api/api/workflow_node_service_api.py +923 -126
- eval_studio_client/api/api/workflow_service_api.py +317 -33
- eval_studio_client/api/api_client.py +1 -1
- eval_studio_client/api/configuration.py +1 -1
- eval_studio_client/api/docs/AdversarialInputsServiceApi.md +78 -0
- eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +45 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceApi.md +78 -0
- eval_studio_client/api/docs/GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.md +30 -0
- eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +5 -5
- eval_studio_client/api/docs/ModelServiceApi.md +5 -5
- eval_studio_client/api/docs/OperationServiceApi.md +72 -5
- eval_studio_client/api/docs/PerturbatorServiceApi.md +5 -5
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +2 -1
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +2 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +3 -0
- eval_studio_client/api/docs/TestCaseRelationshipServiceApi.md +75 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +5 -5
- eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
- eval_studio_client/api/docs/TestServiceApi.md +145 -5
- eval_studio_client/api/docs/TestServiceCloneTestRequest.md +30 -0
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +3 -2
- eval_studio_client/api/docs/TestServicePerturbTestInPlaceRequest.md +30 -0
- eval_studio_client/api/docs/V1AbortOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneTestResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Context.md +32 -0
- eval_studio_client/api/docs/V1CreateWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1GeneratedTestCase.md +30 -0
- eval_studio_client/api/docs/V1GetLeaderboardReportResponse.md +29 -0
- eval_studio_client/api/docs/V1Info.md +3 -0
- eval_studio_client/api/docs/V1InitWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1LeaderboardReport.md +32 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputData.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluator.md +42 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluatorParameter.md +38 -0
- eval_studio_client/api/docs/V1LeaderboardReportExplanation.md +34 -0
- eval_studio_client/api/docs/V1LeaderboardReportMetricsMetaEntry.md +41 -0
- eval_studio_client/api/docs/V1LeaderboardReportModel.md +39 -0
- eval_studio_client/api/docs/V1LeaderboardReportResult.md +45 -0
- eval_studio_client/api/docs/V1LeaderboardReportResultRelationship.md +32 -0
- eval_studio_client/api/docs/V1ListTestCaseRelationshipsResponse.md +29 -0
- eval_studio_client/api/docs/V1MetricScore.md +31 -0
- eval_studio_client/api/docs/V1MetricScores.md +29 -0
- eval_studio_client/api/docs/V1PerturbTestInPlaceResponse.md +29 -0
- eval_studio_client/api/docs/V1RepeatedString.md +29 -0
- eval_studio_client/api/docs/V1ResetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1TestCase.md +2 -0
- eval_studio_client/api/docs/V1Workflow.md +3 -0
- eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +139 -0
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +221 -12
- eval_studio_client/api/docs/WorkflowServiceApi.md +81 -10
- eval_studio_client/api/docs/WorkflowServiceCloneWorkflowRequest.md +33 -0
- eval_studio_client/api/exceptions.py +1 -1
- eval_studio_client/api/models/__init__.py +32 -1
- eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +143 -0
- eval_studio_client/api/models/generated_questions_validation_service_validate_generated_questions_request.py +97 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +1 -1
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +13 -4
- eval_studio_client/api/models/protobuf_any.py +1 -1
- eval_studio_client/api/models/protobuf_null_value.py +1 -1
- eval_studio_client/api/models/required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_document_to_update.py +1 -1
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_model_to_update.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_case_to_update.py +10 -3
- eval_studio_client/api/models/required_the_test_to_update.py +1 -1
- eval_studio_client/api/models/required_the_updated_workflow.py +11 -3
- eval_studio_client/api/models/required_the_updated_workflow_node.py +1 -1
- eval_studio_client/api/models/rpc_status.py +1 -1
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/models/test_service_clone_test_request.py +89 -0
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +7 -5
- eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +1 -1
- eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +1 -1
- eval_studio_client/api/models/test_service_perturb_test_in_place_request.py +97 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +1 -1
- eval_studio_client/api/models/v1_abort_operation_response.py +91 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/models/v1_check_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_clone_test_response.py +91 -0
- eval_studio_client/api/models/v1_clone_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_collection_info.py +1 -1
- eval_studio_client/api/models/v1_context.py +93 -0
- eval_studio_client/api/models/v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_document_response.py +1 -1
- eval_studio_client/api/models/v1_create_evaluation_request.py +1 -1
- eval_studio_client/api/models/v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/models/v1_create_model_response.py +1 -1
- eval_studio_client/api/models/v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_response.py +1 -1
- eval_studio_client/api/models/v1_create_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_dashboard.py +1 -1
- eval_studio_client/api/models/v1_dashboard_status.py +1 -1
- eval_studio_client/api/models/v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_document_response.py +1 -1
- eval_studio_client/api/models/v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_model_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_document.py +1 -1
- eval_studio_client/api/models/v1_estimate_threshold_request.py +1 -1
- eval_studio_client/api/models/v1_evaluation_test.py +1 -1
- eval_studio_client/api/models/v1_evaluator.py +1 -1
- eval_studio_client/api/models/v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/models/v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/models/v1_evaluator_view.py +1 -1
- eval_studio_client/api/models/v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/models/v1_find_all_test_cases_by_id_response.py +1 -1
- eval_studio_client/api/models/v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_generated_test_case.py +101 -0
- eval_studio_client/api/models/v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_document_response.py +1 -1
- eval_studio_client/api/models/v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_get_info_response.py +1 -1
- eval_studio_client/api/models/v1_get_leaderboard_report_response.py +91 -0
- eval_studio_client/api/models/v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_model_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_response.py +1 -1
- eval_studio_client/api/models/v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_class_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_import_evaluation_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_request.py +1 -1
- eval_studio_client/api/models/v1_info.py +10 -4
- eval_studio_client/api/models/v1_init_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_insight.py +1 -1
- eval_studio_client/api/models/v1_labeled_test_case.py +1 -1
- eval_studio_client/api/models/v1_leaderboard.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_report.py +115 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_data.py +93 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +101 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator.py +155 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator_parameter.py +109 -0
- eval_studio_client/api/models/v1_leaderboard_report_explanation.py +103 -0
- eval_studio_client/api/models/v1_leaderboard_report_metrics_meta_entry.py +129 -0
- eval_studio_client/api/models/v1_leaderboard_report_model.py +121 -0
- eval_studio_client/api/models/v1_leaderboard_report_result.py +175 -0
- eval_studio_client/api/models/v1_leaderboard_report_result_relationship.py +97 -0
- eval_studio_client/api/models/v1_leaderboard_status.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_type.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_view.py +1 -1
- eval_studio_client/api/models/v1_list_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_documents_response.py +1 -1
- eval_studio_client/api/models/v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_operations_response.py +1 -1
- eval_studio_client/api/models/v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/models/v1_list_prompt_library_items_response.py +1 -1
- eval_studio_client/api/models/v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_library_items_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_relationships_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/models/v1_list_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_workflows_response.py +1 -1
- eval_studio_client/api/models/v1_metric_score.py +89 -0
- eval_studio_client/api/models/v1_metric_scores.py +95 -0
- eval_studio_client/api/models/v1_model.py +1 -1
- eval_studio_client/api/models/v1_model_type.py +1 -1
- eval_studio_client/api/models/v1_operation.py +1 -1
- eval_studio_client/api/models/v1_operation_progress.py +1 -1
- eval_studio_client/api/models/v1_perturb_test_in_place_response.py +91 -0
- eval_studio_client/api/models/v1_perturb_test_response.py +1 -1
- eval_studio_client/api/models/v1_perturbator.py +1 -1
- eval_studio_client/api/models/v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/models/v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/models/v1_problem_and_action.py +1 -1
- eval_studio_client/api/models/v1_process_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_prompt_library_item.py +1 -1
- eval_studio_client/api/models/v1_repeated_string.py +87 -0
- eval_studio_client/api/models/v1_reset_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_test.py +1 -1
- eval_studio_client/api/models/v1_test_case.py +10 -3
- eval_studio_client/api/models/v1_test_case_relationship.py +1 -1
- eval_studio_client/api/models/v1_test_cases_generator.py +1 -1
- eval_studio_client/api/models/v1_test_class.py +1 -1
- eval_studio_client/api/models/v1_test_class_type.py +1 -1
- eval_studio_client/api/models/v1_test_lab.py +1 -1
- eval_studio_client/api/models/v1_test_suite_evaluates.py +1 -1
- eval_studio_client/api/models/v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_document_response.py +1 -1
- eval_studio_client/api/models/v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_model_response.py +1 -1
- eval_studio_client/api/models/v1_update_operation_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_node_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_response.py +1 -1
- eval_studio_client/api/models/v1_who_am_i_response.py +1 -1
- eval_studio_client/api/models/v1_workflow.py +11 -3
- eval_studio_client/api/models/v1_workflow_edge.py +1 -1
- eval_studio_client/api/models/v1_workflow_edge_type.py +1 -1
- eval_studio_client/api/models/v1_workflow_node.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_artifact.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_artifacts.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_attributes.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_status.py +1 -1
- eval_studio_client/api/models/v1_workflow_node_type.py +4 -1
- eval_studio_client/api/models/v1_workflow_node_view.py +1 -1
- eval_studio_client/api/models/v1_workflow_type.py +1 -1
- eval_studio_client/api/models/workflow_service_clone_workflow_request.py +95 -0
- eval_studio_client/api/rest.py +1 -1
- eval_studio_client/api/test/test_adversarial_inputs_service_api.py +37 -0
- eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +128 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
- eval_studio_client/api/test/test_document_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
- eval_studio_client/api/test/test_generated_questions_validation_service_api.py +37 -0
- eval_studio_client/api/test/test_generated_questions_validation_service_validate_generated_questions_request.py +83 -0
- eval_studio_client/api/test/test_human_calibration_service_api.py +1 -1
- eval_studio_client/api/test/test_info_service_api.py +1 -1
- eval_studio_client/api/test/test_leaderboard_report_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
- eval_studio_client/api/test/test_model_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_service_api.py +7 -1
- eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +6 -2
- eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +9 -4
- eval_studio_client/api/test/test_prompt_library_service_api.py +1 -1
- eval_studio_client/api/test/test_protobuf_any.py +1 -1
- eval_studio_client/api/test/test_protobuf_null_value.py +1 -1
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +6 -2
- eval_studio_client/api/test/test_required_the_test_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_updated_workflow.py +5 -2
- eval_studio_client/api/test/test_required_the_updated_workflow_node.py +1 -1
- eval_studio_client/api/test/test_rpc_status.py +1 -1
- eval_studio_client/api/test/test_test_case_relationship_service_api.py +37 -0
- eval_studio_client/api/test/test_test_case_service_api.py +1 -1
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_test_class_service_api.py +1 -1
- eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
- eval_studio_client/api/test/test_test_service_api.py +13 -1
- eval_studio_client/api/test/test_test_service_clone_test_request.py +52 -0
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +4 -1
- eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +1 -1
- eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +1 -1
- eval_studio_client/api/test/test_test_service_perturb_test_in_place_request.py +59 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +1 -1
- eval_studio_client/api/test/test_v1_abort_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +6 -2
- eval_studio_client/api/test/test_v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +5 -2
- eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_check_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_clone_test_response.py +67 -0
- eval_studio_client/api/test/test_v1_clone_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_collection_info.py +1 -1
- eval_studio_client/api/test/test_v1_context.py +54 -0
- eval_studio_client/api/test/test_v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +6 -2
- eval_studio_client/api/test/test_v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_create_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_create_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_dashboard.py +1 -1
- eval_studio_client/api/test/test_v1_dashboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_delete_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_document.py +1 -1
- eval_studio_client/api/test/test_v1_estimate_threshold_request.py +1 -1
- eval_studio_client/api/test/test_v1_evaluation_test.py +6 -2
- eval_studio_client/api/test/test_v1_evaluator.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_view.py +1 -1
- eval_studio_client/api/test/test_v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +6 -2
- eval_studio_client/api/test/test_v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/test/test_v1_generated_test_case.py +79 -0
- eval_studio_client/api/test/test_v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_info_response.py +7 -2
- eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +175 -0
- eval_studio_client/api/test/test_v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_get_test_class_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_import_evaluation_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_v1_info.py +7 -2
- eval_studio_client/api/test/test_v1_init_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_insight.py +1 -1
- eval_studio_client/api/test/test_v1_labeled_test_case.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_report.py +174 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_data.py +52 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +56 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator.py +114 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator_parameter.py +63 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_explanation.py +58 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_metrics_meta_entry.py +66 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_model.py +62 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result.py +92 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result_relationship.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_type.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_view.py +1 -1
- eval_studio_client/api/test/test_v1_list_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_relationships_response.py +56 -0
- eval_studio_client/api/test/test_v1_list_test_cases_response.py +6 -2
- eval_studio_client/api/test/test_v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_tests_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_workflows_response.py +5 -2
- eval_studio_client/api/test/test_v1_metric_score.py +52 -0
- eval_studio_client/api/test/test_v1_metric_scores.py +55 -0
- eval_studio_client/api/test/test_v1_model.py +1 -1
- eval_studio_client/api/test/test_v1_model_type.py +1 -1
- eval_studio_client/api/test/test_v1_operation.py +1 -1
- eval_studio_client/api/test/test_v1_operation_progress.py +1 -1
- eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +67 -0
- eval_studio_client/api/test/test_v1_perturb_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/test/test_v1_problem_and_action.py +1 -1
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_prompt_library_item.py +1 -1
- eval_studio_client/api/test/test_v1_repeated_string.py +53 -0
- eval_studio_client/api/test/test_v1_reset_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_test.py +1 -1
- eval_studio_client/api/test/test_v1_test_case.py +6 -2
- eval_studio_client/api/test/test_v1_test_case_relationship.py +1 -1
- eval_studio_client/api/test/test_v1_test_cases_generator.py +1 -1
- eval_studio_client/api/test/test_v1_test_class.py +1 -1
- eval_studio_client/api/test/test_v1_test_class_type.py +1 -1
- eval_studio_client/api/test/test_v1_test_lab.py +1 -1
- eval_studio_client/api/test/test_v1_test_suite_evaluates.py +1 -1
- eval_studio_client/api/test/test_v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_test_case_response.py +6 -2
- eval_studio_client/api/test/test_v1_update_test_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_workflow_node_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_workflow_response.py +5 -2
- eval_studio_client/api/test/test_v1_who_am_i_response.py +1 -1
- eval_studio_client/api/test/test_v1_workflow.py +5 -2
- eval_studio_client/api/test/test_v1_workflow_edge.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_edge_type.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_artifact.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_attributes.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_status.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_type.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_node_view.py +1 -1
- eval_studio_client/api/test/test_v1_workflow_type.py +1 -1
- eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
- eval_studio_client/api/test/test_workflow_edge_service_api.py +15 -1
- eval_studio_client/api/test/test_workflow_node_service_api.py +23 -2
- eval_studio_client/api/test/test_workflow_service_api.py +8 -1
- eval_studio_client/api/test/test_workflow_service_clone_workflow_request.py +55 -0
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +1633 -219
- eval_studio_client/tests.py +103 -8
- {eval_studio_client-1.0.1.dist-info → eval_studio_client-1.1.0a5.dist-info}/METADATA +2 -2
- eval_studio_client-1.1.0a5.dist-info/RECORD +720 -0
- {eval_studio_client-1.0.1.dist-info → eval_studio_client-1.1.0a5.dist-info}/WHEEL +1 -1
- eval_studio_client-1.0.1.dist-info/RECORD +0 -615
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# V1LeaderboardReportActualOutputMeta
|
|
2
|
+
|
|
3
|
+
ActualOutputMeta represents the metadata about the actual output.
|
|
4
|
+
|
|
5
|
+
## Properties
|
|
6
|
+
|
|
7
|
+
Name | Type | Description | Notes
|
|
8
|
+
------------ | ------------- | ------------- | -------------
|
|
9
|
+
**tokenization** | **str** | Output only. Actual output data tokenization like sentence_level_punkt. | [optional] [readonly]
|
|
10
|
+
**data** | [**List[V1LeaderboardReportActualOutputData]**](V1LeaderboardReportActualOutputData.md) | Output only. Actual output data - list of text fragments coupled with the metric values. | [optional] [readonly]
|
|
11
|
+
|
|
12
|
+
## Example
|
|
13
|
+
|
|
14
|
+
```python
|
|
15
|
+
from eval_studio_client.api.models.v1_leaderboard_report_actual_output_meta import V1LeaderboardReportActualOutputMeta
|
|
16
|
+
|
|
17
|
+
# TODO update the JSON string below
|
|
18
|
+
json = "{}"
|
|
19
|
+
# create an instance of V1LeaderboardReportActualOutputMeta from a JSON string
|
|
20
|
+
v1_leaderboard_report_actual_output_meta_instance = V1LeaderboardReportActualOutputMeta.from_json(json)
|
|
21
|
+
# print the JSON string representation of the object
|
|
22
|
+
print(V1LeaderboardReportActualOutputMeta.to_json())
|
|
23
|
+
|
|
24
|
+
# convert the object into a dict
|
|
25
|
+
v1_leaderboard_report_actual_output_meta_dict = v1_leaderboard_report_actual_output_meta_instance.to_dict()
|
|
26
|
+
# create an instance of V1LeaderboardReportActualOutputMeta from a dict
|
|
27
|
+
v1_leaderboard_report_actual_output_meta_from_dict = V1LeaderboardReportActualOutputMeta.from_dict(v1_leaderboard_report_actual_output_meta_dict)
|
|
28
|
+
```
|
|
29
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
30
|
+
|
|
31
|
+
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# V1LeaderboardReportEvaluator
|
|
2
|
+
|
|
3
|
+
Evaluator represents the evaluator which evaluated the model outputs to create the results.
|
|
4
|
+
|
|
5
|
+
## Properties
|
|
6
|
+
|
|
7
|
+
Name | Type | Description | Notes
|
|
8
|
+
------------ | ------------- | ------------- | -------------
|
|
9
|
+
**id** | **str** | Output only. Evaluator ID. | [optional] [readonly]
|
|
10
|
+
**name** | **str** | Output only. Evaluator short name based on its class name. | [optional] [readonly]
|
|
11
|
+
**display_name** | **str** | Output only. Evaluator display name. | [optional] [readonly]
|
|
12
|
+
**tagline** | **str** | Optional. Evaluator one row description. | [optional]
|
|
13
|
+
**description** | **str** | Output only. Evaluator description. | [optional] [readonly]
|
|
14
|
+
**brief_description** | **str** | Optional. Brief description. | [optional]
|
|
15
|
+
**model_types** | **List[str]** | Output only. List of model types like rag. | [optional] [readonly]
|
|
16
|
+
**can_explain** | **List[str]** | Optional. List of experiment types the Explainer can explain like regression or multinomial. | [optional]
|
|
17
|
+
**explanation_scopes** | **List[str]** | Output only. List of explanation scopes like global or local. | [optional] [readonly]
|
|
18
|
+
**explanations** | [**List[V1LeaderboardReportExplanation]**](V1LeaderboardReportExplanation.md) | Output only. List of explanation types created by the Evaluator. | [optional] [readonly]
|
|
19
|
+
**parameters** | [**List[V1LeaderboardReportEvaluatorParameter]**](V1LeaderboardReportEvaluatorParameter.md) | Output only. List of parameter type definitions. | [optional] [readonly]
|
|
20
|
+
**keywords** | **List[str]** | Output only. List of keywords. | [optional] [readonly]
|
|
21
|
+
**metrics_meta** | [**List[V1LeaderboardReportMetricsMetaEntry]**](V1LeaderboardReportMetricsMetaEntry.md) | Output only. List of metrics metadata for metrics created by the Evaluator. | [optional] [readonly]
|
|
22
|
+
|
|
23
|
+
## Example
|
|
24
|
+
|
|
25
|
+
```python
|
|
26
|
+
from eval_studio_client.api.models.v1_leaderboard_report_evaluator import V1LeaderboardReportEvaluator
|
|
27
|
+
|
|
28
|
+
# TODO update the JSON string below
|
|
29
|
+
json = "{}"
|
|
30
|
+
# create an instance of V1LeaderboardReportEvaluator from a JSON string
|
|
31
|
+
v1_leaderboard_report_evaluator_instance = V1LeaderboardReportEvaluator.from_json(json)
|
|
32
|
+
# print the JSON string representation of the object
|
|
33
|
+
print(V1LeaderboardReportEvaluator.to_json())
|
|
34
|
+
|
|
35
|
+
# convert the object into a dict
|
|
36
|
+
v1_leaderboard_report_evaluator_dict = v1_leaderboard_report_evaluator_instance.to_dict()
|
|
37
|
+
# create an instance of V1LeaderboardReportEvaluator from a dict
|
|
38
|
+
v1_leaderboard_report_evaluator_from_dict = V1LeaderboardReportEvaluator.from_dict(v1_leaderboard_report_evaluator_dict)
|
|
39
|
+
```
|
|
40
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
41
|
+
|
|
42
|
+
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# V1LeaderboardReportEvaluatorParameter
|
|
2
|
+
|
|
3
|
+
Evaluation parameter definition.
|
|
4
|
+
|
|
5
|
+
## Properties
|
|
6
|
+
|
|
7
|
+
Name | Type | Description | Notes
|
|
8
|
+
------------ | ------------- | ------------- | -------------
|
|
9
|
+
**name** | **str** | Output only. Evaluator parameter ID. | [optional] [readonly]
|
|
10
|
+
**description** | **str** | Output only. Parameter description. | [optional] [readonly]
|
|
11
|
+
**comment** | **str** | Optional. Parameter comment. | [optional]
|
|
12
|
+
**type** | **str** | Output only. Parameter type like float or string. | [optional] [readonly]
|
|
13
|
+
**predefined** | **List[object]** | Optional. Predefined parameter values - numeric or non-numeric enum. | [optional]
|
|
14
|
+
**tags** | **List[str]** | Optional. Parameter tags. | [optional]
|
|
15
|
+
**min** | **float** | Optional. Parameter value lower range. | [optional]
|
|
16
|
+
**max** | **float** | Optional. Parameter value upper range. | [optional]
|
|
17
|
+
**category** | **str** | Optional. Parameter category. | [optional]
|
|
18
|
+
|
|
19
|
+
## Example
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
from eval_studio_client.api.models.v1_leaderboard_report_evaluator_parameter import V1LeaderboardReportEvaluatorParameter
|
|
23
|
+
|
|
24
|
+
# TODO update the JSON string below
|
|
25
|
+
json = "{}"
|
|
26
|
+
# create an instance of V1LeaderboardReportEvaluatorParameter from a JSON string
|
|
27
|
+
v1_leaderboard_report_evaluator_parameter_instance = V1LeaderboardReportEvaluatorParameter.from_json(json)
|
|
28
|
+
# print the JSON string representation of the object
|
|
29
|
+
print(V1LeaderboardReportEvaluatorParameter.to_json())
|
|
30
|
+
|
|
31
|
+
# convert the object into a dict
|
|
32
|
+
v1_leaderboard_report_evaluator_parameter_dict = v1_leaderboard_report_evaluator_parameter_instance.to_dict()
|
|
33
|
+
# create an instance of V1LeaderboardReportEvaluatorParameter from a dict
|
|
34
|
+
v1_leaderboard_report_evaluator_parameter_from_dict = V1LeaderboardReportEvaluatorParameter.from_dict(v1_leaderboard_report_evaluator_parameter_dict)
|
|
35
|
+
```
|
|
36
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
37
|
+
|
|
38
|
+
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# V1LeaderboardReportExplanation
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**explanation_type** | **str** | Output only. Explanation type ID. | [optional] [readonly]
|
|
9
|
+
**name** | **str** | Output only. Explanation display name. | [optional] [readonly]
|
|
10
|
+
**category** | **str** | Output only. Explanation display category. | [optional] [readonly]
|
|
11
|
+
**scope** | **str** | Optional. Explanation scope like global or local. | [optional]
|
|
12
|
+
**has_local** | **str** | Optional. Local explanation type id associated with (this) global explanation. | [optional]
|
|
13
|
+
**formats** | **List[str]** | Optional. List of formats available for the explanation. | [optional]
|
|
14
|
+
|
|
15
|
+
## Example
|
|
16
|
+
|
|
17
|
+
```python
|
|
18
|
+
from eval_studio_client.api.models.v1_leaderboard_report_explanation import V1LeaderboardReportExplanation
|
|
19
|
+
|
|
20
|
+
# TODO update the JSON string below
|
|
21
|
+
json = "{}"
|
|
22
|
+
# create an instance of V1LeaderboardReportExplanation from a JSON string
|
|
23
|
+
v1_leaderboard_report_explanation_instance = V1LeaderboardReportExplanation.from_json(json)
|
|
24
|
+
# print the JSON string representation of the object
|
|
25
|
+
print(V1LeaderboardReportExplanation.to_json())
|
|
26
|
+
|
|
27
|
+
# convert the object into a dict
|
|
28
|
+
v1_leaderboard_report_explanation_dict = v1_leaderboard_report_explanation_instance.to_dict()
|
|
29
|
+
# create an instance of V1LeaderboardReportExplanation from a dict
|
|
30
|
+
v1_leaderboard_report_explanation_from_dict = V1LeaderboardReportExplanation.from_dict(v1_leaderboard_report_explanation_dict)
|
|
31
|
+
```
|
|
32
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
33
|
+
|
|
34
|
+
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# V1LeaderboardReportMetricsMetaEntry
|
|
2
|
+
|
|
3
|
+
MetricsMetaEntry represents the metadata about the metric.
|
|
4
|
+
|
|
5
|
+
## Properties
|
|
6
|
+
|
|
7
|
+
Name | Type | Description | Notes
|
|
8
|
+
------------ | ------------- | ------------- | -------------
|
|
9
|
+
**key** | **str** | Output only. Metric key. | [optional] [readonly]
|
|
10
|
+
**display_name** | **str** | Output only. Metric display name. | [optional] [readonly]
|
|
11
|
+
**data_type** | **str** | Output only. Metric data type like float or string. | [optional] [readonly]
|
|
12
|
+
**display_value** | **str** | Output only. Metric display value. | [optional] [readonly]
|
|
13
|
+
**description** | **str** | Output only. Metric description. | [optional] [readonly]
|
|
14
|
+
**value_range** | **List[float]** | Optional. Metric value range for numeric scores. | [optional]
|
|
15
|
+
**value_enum** | **List[str]** | Optional. Metric value enum for non-numeric scores. | [optional]
|
|
16
|
+
**higher_is_better** | **bool** | Output only. Metric higher is better. | [optional] [readonly]
|
|
17
|
+
**threshold** | **float** | Output only. Metric threshold. | [optional] [readonly]
|
|
18
|
+
**is_primary_metric** | **bool** | Output only. Metric is primary. | [optional] [readonly]
|
|
19
|
+
**parent_metric** | **str** | Output only. This metric parent. | [optional] [readonly]
|
|
20
|
+
**exclude** | **bool** | Output only. Whether to exclude the metric. | [optional] [readonly]
|
|
21
|
+
|
|
22
|
+
## Example
|
|
23
|
+
|
|
24
|
+
```python
|
|
25
|
+
from eval_studio_client.api.models.v1_leaderboard_report_metrics_meta_entry import V1LeaderboardReportMetricsMetaEntry
|
|
26
|
+
|
|
27
|
+
# TODO update the JSON string below
|
|
28
|
+
json = "{}"
|
|
29
|
+
# create an instance of V1LeaderboardReportMetricsMetaEntry from a JSON string
|
|
30
|
+
v1_leaderboard_report_metrics_meta_entry_instance = V1LeaderboardReportMetricsMetaEntry.from_json(json)
|
|
31
|
+
# print the JSON string representation of the object
|
|
32
|
+
print(V1LeaderboardReportMetricsMetaEntry.to_json())
|
|
33
|
+
|
|
34
|
+
# convert the object into a dict
|
|
35
|
+
v1_leaderboard_report_metrics_meta_entry_dict = v1_leaderboard_report_metrics_meta_entry_instance.to_dict()
|
|
36
|
+
# create an instance of V1LeaderboardReportMetricsMetaEntry from a dict
|
|
37
|
+
v1_leaderboard_report_metrics_meta_entry_from_dict = V1LeaderboardReportMetricsMetaEntry.from_dict(v1_leaderboard_report_metrics_meta_entry_dict)
|
|
38
|
+
```
|
|
39
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
40
|
+
|
|
41
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# V1LeaderboardReportModel
|
|
2
|
+
|
|
3
|
+
Model represents the evaluated model whose outputs were evaluated to create the results.
|
|
4
|
+
|
|
5
|
+
## Properties
|
|
6
|
+
|
|
7
|
+
Name | Type | Description | Notes
|
|
8
|
+
------------ | ------------- | ------------- | -------------
|
|
9
|
+
**connection** | **str** | Output only. Connection key. | [optional] [readonly]
|
|
10
|
+
**model_type** | **str** | Output only. Model type. | [optional] [readonly]
|
|
11
|
+
**name** | **str** | Output only. Model display name. | [optional] [readonly]
|
|
12
|
+
**collection_id** | **str** | Optional. Collection ID. | [optional]
|
|
13
|
+
**collection_name** | **str** | Optional. Collection name. | [optional]
|
|
14
|
+
**llm_model_name** | **str** | Output only. LLM model name. | [optional] [readonly]
|
|
15
|
+
**documents** | **List[str]** | Output only. List of documents. | [optional] [readonly]
|
|
16
|
+
**model_cfg** | **object** | Output only. Model configuration. | [optional] [readonly]
|
|
17
|
+
**key** | **str** | Output only. Model key. | [optional] [readonly]
|
|
18
|
+
**llm_model_meta** | **object** | Output only. LLM model metadata - recursive dictionaries with an additional info like performance stats. | [optional] [readonly]
|
|
19
|
+
|
|
20
|
+
## Example
|
|
21
|
+
|
|
22
|
+
```python
|
|
23
|
+
from eval_studio_client.api.models.v1_leaderboard_report_model import V1LeaderboardReportModel
|
|
24
|
+
|
|
25
|
+
# TODO update the JSON string below
|
|
26
|
+
json = "{}"
|
|
27
|
+
# create an instance of V1LeaderboardReportModel from a JSON string
|
|
28
|
+
v1_leaderboard_report_model_instance = V1LeaderboardReportModel.from_json(json)
|
|
29
|
+
# print the JSON string representation of the object
|
|
30
|
+
print(V1LeaderboardReportModel.to_json())
|
|
31
|
+
|
|
32
|
+
# convert the object into a dict
|
|
33
|
+
v1_leaderboard_report_model_dict = v1_leaderboard_report_model_instance.to_dict()
|
|
34
|
+
# create an instance of V1LeaderboardReportModel from a dict
|
|
35
|
+
v1_leaderboard_report_model_from_dict = V1LeaderboardReportModel.from_dict(v1_leaderboard_report_model_dict)
|
|
36
|
+
```
|
|
37
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
38
|
+
|
|
39
|
+
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# V1LeaderboardReportResult
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**key** | **str** | Output only. Composite unique key of the result formed by the model key and test case key. | [optional] [readonly]
|
|
9
|
+
**input** | **str** | Output only. Input prompt or text to be processed. | [optional] [readonly]
|
|
10
|
+
**corpus** | **List[str]** | Output only. Collection of corpus documents to be used during evaluation. | [optional] [readonly]
|
|
11
|
+
**context** | **List[str]** | Output only. List of contextual information or references. | [optional] [readonly]
|
|
12
|
+
**categories** | **List[str]** | Output only. List of categories or labels for classification. | [optional] [readonly]
|
|
13
|
+
**relationships** | [**List[V1LeaderboardReportResultRelationship]**](V1LeaderboardReportResultRelationship.md) | Output only. List of relationships or associations between entities. | [optional] [readonly]
|
|
14
|
+
**expected_output** | **str** | Output only. Expected output or target result. | [optional] [readonly]
|
|
15
|
+
**output_constraints** | **List[str]** | Output only. List of constraints that should be applied to the output. | [optional] [readonly]
|
|
16
|
+
**output_condition** | **str** | Output only. Condition that output should satisfy. | [optional] [readonly]
|
|
17
|
+
**actual_output** | **str** | Output only. Actual output produced by the model. | [optional] [readonly]
|
|
18
|
+
**actual_duration** | **float** | Output only. Duration of processing in seconds. | [optional] [readonly]
|
|
19
|
+
**cost** | **float** | Output only. Cost of processing in currency units. | [optional] [readonly]
|
|
20
|
+
**model_key** | **str** | Output only. Unique identifier for the model used. | [optional] [readonly]
|
|
21
|
+
**test_case_key** | **str** | Output only. Unique identifier for the test case. | [optional] [readonly]
|
|
22
|
+
**metrics** | [**List[V1MetricScore]**](V1MetricScore.md) | Optional. All metrics values for the result. | [optional]
|
|
23
|
+
**result_error_message** | **str** | Output only. Error message if processing resulted in failure. | [optional] [readonly]
|
|
24
|
+
**actual_output_meta** | [**List[V1LeaderboardReportActualOutputMeta]**](V1LeaderboardReportActualOutputMeta.md) | Output only. Additional metadata about the actual output. | [optional] [readonly]
|
|
25
|
+
|
|
26
|
+
## Example
|
|
27
|
+
|
|
28
|
+
```python
|
|
29
|
+
from eval_studio_client.api.models.v1_leaderboard_report_result import V1LeaderboardReportResult
|
|
30
|
+
|
|
31
|
+
# TODO update the JSON string below
|
|
32
|
+
json = "{}"
|
|
33
|
+
# create an instance of V1LeaderboardReportResult from a JSON string
|
|
34
|
+
v1_leaderboard_report_result_instance = V1LeaderboardReportResult.from_json(json)
|
|
35
|
+
# print the JSON string representation of the object
|
|
36
|
+
print(V1LeaderboardReportResult.to_json())
|
|
37
|
+
|
|
38
|
+
# convert the object into a dict
|
|
39
|
+
v1_leaderboard_report_result_dict = v1_leaderboard_report_result_instance.to_dict()
|
|
40
|
+
# create an instance of V1LeaderboardReportResult from a dict
|
|
41
|
+
v1_leaderboard_report_result_from_dict = V1LeaderboardReportResult.from_dict(v1_leaderboard_report_result_dict)
|
|
42
|
+
```
|
|
43
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
44
|
+
|
|
45
|
+
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# V1LeaderboardReportResultRelationship
|
|
2
|
+
|
|
3
|
+
Relationship represents the relationship between result entries.
|
|
4
|
+
|
|
5
|
+
## Properties
|
|
6
|
+
|
|
7
|
+
Name | Type | Description | Notes
|
|
8
|
+
------------ | ------------- | ------------- | -------------
|
|
9
|
+
**type** | **str** | Output only. Type of the relationship. | [optional] [readonly]
|
|
10
|
+
**target** | **str** | Output only. Source result of the relationship. | [optional] [readonly]
|
|
11
|
+
**target_type** | **str** | Output only. Target type of the relationship like test_case. | [optional] [readonly]
|
|
12
|
+
|
|
13
|
+
## Example
|
|
14
|
+
|
|
15
|
+
```python
|
|
16
|
+
from eval_studio_client.api.models.v1_leaderboard_report_result_relationship import V1LeaderboardReportResultRelationship
|
|
17
|
+
|
|
18
|
+
# TODO update the JSON string below
|
|
19
|
+
json = "{}"
|
|
20
|
+
# create an instance of V1LeaderboardReportResultRelationship from a JSON string
|
|
21
|
+
v1_leaderboard_report_result_relationship_instance = V1LeaderboardReportResultRelationship.from_json(json)
|
|
22
|
+
# print the JSON string representation of the object
|
|
23
|
+
print(V1LeaderboardReportResultRelationship.to_json())
|
|
24
|
+
|
|
25
|
+
# convert the object into a dict
|
|
26
|
+
v1_leaderboard_report_result_relationship_dict = v1_leaderboard_report_result_relationship_instance.to_dict()
|
|
27
|
+
# create an instance of V1LeaderboardReportResultRelationship from a dict
|
|
28
|
+
v1_leaderboard_report_result_relationship_from_dict = V1LeaderboardReportResultRelationship.from_dict(v1_leaderboard_report_result_relationship_dict)
|
|
29
|
+
```
|
|
30
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
31
|
+
|
|
32
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1ListTestCaseRelationshipsResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**test_case_relationships** | [**List[V1TestCaseRelationship]**](V1TestCaseRelationship.md) | The TestCaseRelationships that were requested. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_list_test_case_relationships_response import V1ListTestCaseRelationshipsResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1ListTestCaseRelationshipsResponse from a JSON string
|
|
18
|
+
v1_list_test_case_relationships_response_instance = V1ListTestCaseRelationshipsResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1ListTestCaseRelationshipsResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_list_test_case_relationships_response_dict = v1_list_test_case_relationships_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1ListTestCaseRelationshipsResponse from a dict
|
|
25
|
+
v1_list_test_case_relationships_response_from_dict = V1ListTestCaseRelationshipsResponse.from_dict(v1_list_test_case_relationships_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# V1MetricScore
|
|
2
|
+
|
|
3
|
+
MetricScore represents the metric score.
|
|
4
|
+
|
|
5
|
+
## Properties
|
|
6
|
+
|
|
7
|
+
Name | Type | Description | Notes
|
|
8
|
+
------------ | ------------- | ------------- | -------------
|
|
9
|
+
**key** | **str** | Required. Metric key. | [optional]
|
|
10
|
+
**value** | **float** | Required. Metric value - consider NaN, Infinity or -Infinity for float representation. | [optional]
|
|
11
|
+
|
|
12
|
+
## Example
|
|
13
|
+
|
|
14
|
+
```python
|
|
15
|
+
from eval_studio_client.api.models.v1_metric_score import V1MetricScore
|
|
16
|
+
|
|
17
|
+
# TODO update the JSON string below
|
|
18
|
+
json = "{}"
|
|
19
|
+
# create an instance of V1MetricScore from a JSON string
|
|
20
|
+
v1_metric_score_instance = V1MetricScore.from_json(json)
|
|
21
|
+
# print the JSON string representation of the object
|
|
22
|
+
print(V1MetricScore.to_json())
|
|
23
|
+
|
|
24
|
+
# convert the object into a dict
|
|
25
|
+
v1_metric_score_dict = v1_metric_score_instance.to_dict()
|
|
26
|
+
# create an instance of V1MetricScore from a dict
|
|
27
|
+
v1_metric_score_from_dict = V1MetricScore.from_dict(v1_metric_score_dict)
|
|
28
|
+
```
|
|
29
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
30
|
+
|
|
31
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1MetricScores
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**scores** | [**List[V1MetricScore]**](V1MetricScore.md) | Required. The metric scores. | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_metric_scores import V1MetricScores
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1MetricScores from a JSON string
|
|
18
|
+
v1_metric_scores_instance = V1MetricScores.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1MetricScores.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_metric_scores_dict = v1_metric_scores_instance.to_dict()
|
|
24
|
+
# create an instance of V1MetricScores from a dict
|
|
25
|
+
v1_metric_scores_from_dict = V1MetricScores.from_dict(v1_metric_scores_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1PerturbTestInPlaceResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**test** | [**V1Test**](V1Test.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_perturb_test_in_place_response import V1PerturbTestInPlaceResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1PerturbTestInPlaceResponse from a JSON string
|
|
18
|
+
v1_perturb_test_in_place_response_instance = V1PerturbTestInPlaceResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1PerturbTestInPlaceResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_perturb_test_in_place_response_dict = v1_perturb_test_in_place_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1PerturbTestInPlaceResponse from a dict
|
|
25
|
+
v1_perturb_test_in_place_response_from_dict = V1PerturbTestInPlaceResponse.from_dict(v1_perturb_test_in_place_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1RepeatedString
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**content** | **List[str]** | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_repeated_string import V1RepeatedString
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1RepeatedString from a JSON string
|
|
18
|
+
v1_repeated_string_instance = V1RepeatedString.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1RepeatedString.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_repeated_string_dict = v1_repeated_string_instance.to_dict()
|
|
24
|
+
# create an instance of V1RepeatedString from a dict
|
|
25
|
+
v1_repeated_string_from_dict = V1RepeatedString.from_dict(v1_repeated_string_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# V1ResetWorkflowNodeResponse
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
## Properties
|
|
5
|
+
|
|
6
|
+
Name | Type | Description | Notes
|
|
7
|
+
------------ | ------------- | ------------- | -------------
|
|
8
|
+
**node** | [**V1WorkflowNode**](V1WorkflowNode.md) | | [optional]
|
|
9
|
+
|
|
10
|
+
## Example
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from eval_studio_client.api.models.v1_reset_workflow_node_response import V1ResetWorkflowNodeResponse
|
|
14
|
+
|
|
15
|
+
# TODO update the JSON string below
|
|
16
|
+
json = "{}"
|
|
17
|
+
# create an instance of V1ResetWorkflowNodeResponse from a JSON string
|
|
18
|
+
v1_reset_workflow_node_response_instance = V1ResetWorkflowNodeResponse.from_json(json)
|
|
19
|
+
# print the JSON string representation of the object
|
|
20
|
+
print(V1ResetWorkflowNodeResponse.to_json())
|
|
21
|
+
|
|
22
|
+
# convert the object into a dict
|
|
23
|
+
v1_reset_workflow_node_response_dict = v1_reset_workflow_node_response_instance.to_dict()
|
|
24
|
+
# create an instance of V1ResetWorkflowNodeResponse from a dict
|
|
25
|
+
v1_reset_workflow_node_response_from_dict = V1ResetWorkflowNodeResponse.from_dict(v1_reset_workflow_node_response_dict)
|
|
26
|
+
```
|
|
27
|
+
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
|
|
28
|
+
|
|
29
|
+
|
|
@@ -18,6 +18,8 @@ Name | Type | Description | Notes
|
|
|
18
18
|
**constraints** | **List[str]** | Constraints on the model output. | [optional]
|
|
19
19
|
**condition** | **str** | Optional. Test case output condition, in a form of AIP-160 compliant filter expression. | [optional]
|
|
20
20
|
**perturbed_by** | **List[str]** | Output only. The list of perturbators applied to this test case. | [optional] [readonly]
|
|
21
|
+
**topics** | **List[str]** | Output only. The list of topics used to generate this test case. | [optional] [readonly]
|
|
22
|
+
**generator** | [**V1TestCasesGenerator**](V1TestCasesGenerator.md) | | [optional]
|
|
21
23
|
|
|
22
24
|
## Example
|
|
23
25
|
|
|
@@ -23,6 +23,9 @@ Name | Type | Description | Notes
|
|
|
23
23
|
**output_artifacts** | [**Dict[str, V1WorkflowNodeArtifacts]**](V1WorkflowNodeArtifacts.md) | Output only. Optional. List of the WorkflowNodeArtifacts produces by all the WorkflowNodes in the Workflow. | [optional] [readonly]
|
|
24
24
|
**llm_model** | **str** | Immutable. LLM Model to use. | [optional]
|
|
25
25
|
**model_parameters** | **str** | Optional. Immutable. Model parameter overrides in JSON format. | [optional]
|
|
26
|
+
**document** | **str** | The resource name of a Document. | [optional]
|
|
27
|
+
**h2ogpte_collection** | **str** | Existing h2oGPTe collection. | [optional]
|
|
28
|
+
**cloned_from_workflow** | **str** | Optional. Output only. The Workflow that this Workflow was cloned from. | [optional] [readonly]
|
|
26
29
|
|
|
27
30
|
## Example
|
|
28
31
|
|
|
@@ -5,6 +5,8 @@ All URIs are relative to *http://localhost*
|
|
|
5
5
|
Method | HTTP request | Description
|
|
6
6
|
------------- | ------------- | -------------
|
|
7
7
|
[**workflow_edge_service_batch_get_workflow_edges**](WorkflowEdgeServiceApi.md#workflow_edge_service_batch_get_workflow_edges) | **GET** /v1/workflows/*/edges:batchGet | Retrieves all WorkflowEdges with the specified resource names. If any of the WorkflowEdges do not exist an error is returned. The order of resource names in the request and the returned WorkflowEdges might differ.
|
|
8
|
+
[**workflow_edge_service_create_workflow_edge**](WorkflowEdgeServiceApi.md#workflow_edge_service_create_workflow_edge) | **POST** /v1/{parent}/edges | CreateWorkflowEdge creates a new WorkflowEdge.
|
|
9
|
+
[**workflow_edge_service_delete_workflow_edge**](WorkflowEdgeServiceApi.md#workflow_edge_service_delete_workflow_edge) | **DELETE** /v1/{name_7} | DeleteWorkflowEdge deletes a WorkflowEdge. If the WorkflowEdge does not exist an error is returned
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
# **workflow_edge_service_batch_get_workflow_edges**
|
|
@@ -74,3 +76,140 @@ No authorization required
|
|
|
74
76
|
|
|
75
77
|
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
76
78
|
|
|
79
|
+
# **workflow_edge_service_create_workflow_edge**
|
|
80
|
+
> V1CreateWorkflowEdgeResponse workflow_edge_service_create_workflow_edge(parent, edge)
|
|
81
|
+
|
|
82
|
+
CreateWorkflowEdge creates a new WorkflowEdge.
|
|
83
|
+
|
|
84
|
+
### Example
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
```python
|
|
88
|
+
import eval_studio_client.api
|
|
89
|
+
from eval_studio_client.api.models.v1_create_workflow_edge_response import V1CreateWorkflowEdgeResponse
|
|
90
|
+
from eval_studio_client.api.models.v1_workflow_edge import V1WorkflowEdge
|
|
91
|
+
from eval_studio_client.api.rest import ApiException
|
|
92
|
+
from pprint import pprint
|
|
93
|
+
|
|
94
|
+
# Defining the host is optional and defaults to http://localhost
|
|
95
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
96
|
+
configuration = eval_studio_client.api.Configuration(
|
|
97
|
+
host = "http://localhost"
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
# Enter a context with an instance of the API client
|
|
102
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
103
|
+
# Create an instance of the API class
|
|
104
|
+
api_instance = eval_studio_client.api.WorkflowEdgeServiceApi(api_client)
|
|
105
|
+
parent = 'parent_example' # str | Required. The parent Workflow in format of `workflow/{workflow_id}`.
|
|
106
|
+
edge = eval_studio_client.api.V1WorkflowEdge() # V1WorkflowEdge | Required. The WorkflowEdge to create.
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
# CreateWorkflowEdge creates a new WorkflowEdge.
|
|
110
|
+
api_response = api_instance.workflow_edge_service_create_workflow_edge(parent, edge)
|
|
111
|
+
print("The response of WorkflowEdgeServiceApi->workflow_edge_service_create_workflow_edge:\n")
|
|
112
|
+
pprint(api_response)
|
|
113
|
+
except Exception as e:
|
|
114
|
+
print("Exception when calling WorkflowEdgeServiceApi->workflow_edge_service_create_workflow_edge: %s\n" % e)
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
### Parameters
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
Name | Type | Description | Notes
|
|
123
|
+
------------- | ------------- | ------------- | -------------
|
|
124
|
+
**parent** | **str**| Required. The parent Workflow in format of `workflow/{workflow_id}`. |
|
|
125
|
+
**edge** | [**V1WorkflowEdge**](V1WorkflowEdge.md)| Required. The WorkflowEdge to create. |
|
|
126
|
+
|
|
127
|
+
### Return type
|
|
128
|
+
|
|
129
|
+
[**V1CreateWorkflowEdgeResponse**](V1CreateWorkflowEdgeResponse.md)
|
|
130
|
+
|
|
131
|
+
### Authorization
|
|
132
|
+
|
|
133
|
+
No authorization required
|
|
134
|
+
|
|
135
|
+
### HTTP request headers
|
|
136
|
+
|
|
137
|
+
- **Content-Type**: application/json
|
|
138
|
+
- **Accept**: application/json
|
|
139
|
+
|
|
140
|
+
### HTTP response details
|
|
141
|
+
|
|
142
|
+
| Status code | Description | Response headers |
|
|
143
|
+
|-------------|-------------|------------------|
|
|
144
|
+
**200** | A successful response. | - |
|
|
145
|
+
**0** | An unexpected error response. | - |
|
|
146
|
+
|
|
147
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
148
|
+
|
|
149
|
+
# **workflow_edge_service_delete_workflow_edge**
|
|
150
|
+
> V1DeleteWorkflowEdgeResponse workflow_edge_service_delete_workflow_edge(name_7)
|
|
151
|
+
|
|
152
|
+
DeleteWorkflowEdge deletes a WorkflowEdge. If the WorkflowEdge does not exist an error is returned
|
|
153
|
+
|
|
154
|
+
### Example
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
```python
|
|
158
|
+
import eval_studio_client.api
|
|
159
|
+
from eval_studio_client.api.models.v1_delete_workflow_edge_response import V1DeleteWorkflowEdgeResponse
|
|
160
|
+
from eval_studio_client.api.rest import ApiException
|
|
161
|
+
from pprint import pprint
|
|
162
|
+
|
|
163
|
+
# Defining the host is optional and defaults to http://localhost
|
|
164
|
+
# See configuration.py for a list of all supported configuration parameters.
|
|
165
|
+
configuration = eval_studio_client.api.Configuration(
|
|
166
|
+
host = "http://localhost"
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
# Enter a context with an instance of the API client
|
|
171
|
+
with eval_studio_client.api.ApiClient(configuration) as api_client:
|
|
172
|
+
# Create an instance of the API class
|
|
173
|
+
api_instance = eval_studio_client.api.WorkflowEdgeServiceApi(api_client)
|
|
174
|
+
name_7 = 'name_7_example' # str | Required. The name of the WorkflowEdge to delete.
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
# DeleteWorkflowEdge deletes a WorkflowEdge. If the WorkflowEdge does not exist an error is returned
|
|
178
|
+
api_response = api_instance.workflow_edge_service_delete_workflow_edge(name_7)
|
|
179
|
+
print("The response of WorkflowEdgeServiceApi->workflow_edge_service_delete_workflow_edge:\n")
|
|
180
|
+
pprint(api_response)
|
|
181
|
+
except Exception as e:
|
|
182
|
+
print("Exception when calling WorkflowEdgeServiceApi->workflow_edge_service_delete_workflow_edge: %s\n" % e)
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
### Parameters
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
Name | Type | Description | Notes
|
|
191
|
+
------------- | ------------- | ------------- | -------------
|
|
192
|
+
**name_7** | **str**| Required. The name of the WorkflowEdge to delete. |
|
|
193
|
+
|
|
194
|
+
### Return type
|
|
195
|
+
|
|
196
|
+
[**V1DeleteWorkflowEdgeResponse**](V1DeleteWorkflowEdgeResponse.md)
|
|
197
|
+
|
|
198
|
+
### Authorization
|
|
199
|
+
|
|
200
|
+
No authorization required
|
|
201
|
+
|
|
202
|
+
### HTTP request headers
|
|
203
|
+
|
|
204
|
+
- **Content-Type**: Not defined
|
|
205
|
+
- **Accept**: application/json
|
|
206
|
+
|
|
207
|
+
### HTTP response details
|
|
208
|
+
|
|
209
|
+
| Status code | Description | Response headers |
|
|
210
|
+
|-------------|-------------|------------------|
|
|
211
|
+
**200** | A successful response. | - |
|
|
212
|
+
**0** | An unexpected error response. | - |
|
|
213
|
+
|
|
214
|
+
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
|
|
215
|
+
|