eval-studio-client 1.0.3a1__py3-none-any.whl → 1.1.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eval_studio_client/api/__init__.py +83 -1
- eval_studio_client/api/api/__init__.py +8 -0
- eval_studio_client/api/api/adversarial_inputs_service_api.py +321 -0
- eval_studio_client/api/api/dashboard_service_api.py +18 -1
- eval_studio_client/api/api/document_service_api.py +1 -1
- eval_studio_client/api/api/evaluation_service_api.py +1 -1
- eval_studio_client/api/api/evaluator_service_api.py +1 -1
- eval_studio_client/api/api/human_calibration_service_api.py +304 -0
- eval_studio_client/api/api/info_service_api.py +1 -1
- eval_studio_client/api/api/leaderboard_report_service_api.py +292 -0
- eval_studio_client/api/api/leaderboard_service_api.py +17 -17
- eval_studio_client/api/api/model_service_api.py +17 -17
- eval_studio_client/api/api/operation_progress_service_api.py +1 -1
- eval_studio_client/api/api/operation_service_api.py +272 -17
- eval_studio_client/api/api/perturbation_service_api.py +1 -1
- eval_studio_client/api/api/perturbator_service_api.py +285 -18
- eval_studio_client/api/api/prompt_generation_service_api.py +1 -1
- eval_studio_client/api/api/prompt_library_service_api.py +669 -0
- eval_studio_client/api/api/test_case_relationship_service_api.py +292 -0
- eval_studio_client/api/api/test_case_service_api.py +17 -17
- eval_studio_client/api/api/test_class_service_api.py +17 -17
- eval_studio_client/api/api/test_lab_service_api.py +1 -1
- eval_studio_client/api/api/test_service_api.py +1272 -102
- eval_studio_client/api/api/who_am_i_service_api.py +1 -1
- eval_studio_client/api/api/workflow_edge_service_api.py +835 -0
- eval_studio_client/api/api/workflow_node_service_api.py +2431 -0
- eval_studio_client/api/api/workflow_service_api.py +2403 -0
- eval_studio_client/api/api_client.py +1 -1
- eval_studio_client/api/configuration.py +1 -1
- eval_studio_client/api/docs/AdversarialInputsServiceApi.md +78 -0
- eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +45 -0
- eval_studio_client/api/docs/DashboardServiceApi.md +4 -2
- eval_studio_client/api/docs/HumanCalibrationServiceApi.md +77 -0
- eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -0
- eval_studio_client/api/docs/LeaderboardServiceApi.md +5 -5
- eval_studio_client/api/docs/ModelServiceApi.md +5 -5
- eval_studio_client/api/docs/OperationServiceApi.md +72 -5
- eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +1 -0
- eval_studio_client/api/docs/PerturbatorServiceApi.md +38 -8
- eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +4 -2
- eval_studio_client/api/docs/PromptLibraryServiceApi.md +155 -0
- eval_studio_client/api/docs/ProtobufNullValue.md +12 -0
- eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +1 -0
- eval_studio_client/api/docs/RequiredTheTestCaseToUpdate.md +3 -0
- eval_studio_client/api/docs/RequiredTheTestToUpdate.md +1 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflow.md +47 -0
- eval_studio_client/api/docs/RequiredTheUpdatedWorkflowNode.md +44 -0
- eval_studio_client/api/docs/TestCaseRelationshipServiceApi.md +75 -0
- eval_studio_client/api/docs/TestCaseServiceApi.md +5 -5
- eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
- eval_studio_client/api/docs/TestServiceApi.md +293 -9
- eval_studio_client/api/docs/TestServiceCloneTestRequest.md +30 -0
- eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +3 -1
- eval_studio_client/api/docs/TestServiceImportTestCasesFromLibraryRequest.md +32 -0
- eval_studio_client/api/docs/TestServiceListTestCaseLibraryItemsRequest.md +35 -0
- eval_studio_client/api/docs/TestServicePerturbTestInPlaceRequest.md +30 -0
- eval_studio_client/api/docs/TestServicePerturbTestRequest.md +1 -0
- eval_studio_client/api/docs/V1AbortOperationResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsRequest.md +29 -0
- eval_studio_client/api/docs/V1BatchDeleteWorkflowsResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetWorkflowEdgesResponse.md +29 -0
- eval_studio_client/api/docs/V1BatchGetWorkflowNodesResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneTestResponse.md +29 -0
- eval_studio_client/api/docs/V1CloneWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Context.md +37 -0
- eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1CreateWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1CreateWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Dashboard.md +1 -0
- eval_studio_client/api/docs/V1DashboardType.md +12 -0
- eval_studio_client/api/docs/V1DeleteWorkflowEdgeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1DeleteWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1DependencyList.md +30 -0
- eval_studio_client/api/docs/V1EstimateThresholdRequest.md +33 -0
- eval_studio_client/api/docs/V1Evaluator.md +2 -0
- eval_studio_client/api/docs/V1GetGuardrailsConfigurationResponse.md +29 -0
- eval_studio_client/api/docs/V1GetLeaderboardReportResponse.md +29 -0
- eval_studio_client/api/docs/V1GetWorkflowNodePrerequisitesResponse.md +30 -0
- eval_studio_client/api/docs/V1GetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1GetWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1ImportEvaluationRequest.md +1 -0
- eval_studio_client/api/docs/V1ImportTestCasesFromLibraryResponse.md +29 -0
- eval_studio_client/api/docs/V1ImportTestCasesRequest.md +33 -0
- eval_studio_client/api/docs/V1Info.md +3 -0
- eval_studio_client/api/docs/V1InitWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1LabeledTestCase.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReport.md +32 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputData.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +31 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluator.md +42 -0
- eval_studio_client/api/docs/V1LeaderboardReportEvaluatorParameter.md +38 -0
- eval_studio_client/api/docs/V1LeaderboardReportExplanation.md +34 -0
- eval_studio_client/api/docs/V1LeaderboardReportMetricsMetaEntry.md +41 -0
- eval_studio_client/api/docs/V1LeaderboardReportModel.md +37 -0
- eval_studio_client/api/docs/V1LeaderboardReportResult.md +45 -0
- eval_studio_client/api/docs/V1LeaderboardReportResultRelationship.md +32 -0
- eval_studio_client/api/docs/V1ListPromptLibraryItemsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCaseLibraryItemsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListTestCaseRelationshipsResponse.md +29 -0
- eval_studio_client/api/docs/V1ListWorkflowDependenciesResponse.md +30 -0
- eval_studio_client/api/docs/V1ListWorkflowsResponse.md +29 -0
- eval_studio_client/api/docs/V1MetricScore.md +31 -0
- eval_studio_client/api/docs/V1MetricScores.md +29 -0
- eval_studio_client/api/docs/V1PerturbTestInPlaceResponse.md +29 -0
- eval_studio_client/api/docs/V1ProcessWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1PromptLibraryItem.md +42 -0
- eval_studio_client/api/docs/V1RepeatedContext.md +29 -0
- eval_studio_client/api/docs/V1RepeatedString.md +29 -0
- eval_studio_client/api/docs/V1ResetWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1Test.md +1 -0
- eval_studio_client/api/docs/V1TestCase.md +3 -0
- eval_studio_client/api/docs/V1TestSuiteEvaluates.md +11 -0
- eval_studio_client/api/docs/V1TestType.md +12 -0
- eval_studio_client/api/docs/V1UpdateWorkflowNodeResponse.md +29 -0
- eval_studio_client/api/docs/V1UpdateWorkflowResponse.md +29 -0
- eval_studio_client/api/docs/V1Workflow.md +49 -0
- eval_studio_client/api/docs/V1WorkflowDependency.md +30 -0
- eval_studio_client/api/docs/V1WorkflowEdge.md +40 -0
- eval_studio_client/api/docs/V1WorkflowEdgeType.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNode.md +46 -0
- eval_studio_client/api/docs/V1WorkflowNodeArtifact.md +41 -0
- eval_studio_client/api/docs/V1WorkflowNodeArtifacts.md +29 -0
- eval_studio_client/api/docs/V1WorkflowNodeAttributes.md +30 -0
- eval_studio_client/api/docs/V1WorkflowNodeStatus.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNodeType.md +12 -0
- eval_studio_client/api/docs/V1WorkflowNodeView.md +12 -0
- eval_studio_client/api/docs/V1WorkflowType.md +12 -0
- eval_studio_client/api/docs/WorkflowEdgeServiceApi.md +215 -0
- eval_studio_client/api/docs/WorkflowNodeServiceApi.md +632 -0
- eval_studio_client/api/docs/WorkflowServiceApi.md +623 -0
- eval_studio_client/api/docs/WorkflowServiceCloneWorkflowRequest.md +33 -0
- eval_studio_client/api/exceptions.py +1 -1
- eval_studio_client/api/models/__init__.py +75 -1
- eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +143 -0
- eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +9 -3
- eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +18 -6
- eval_studio_client/api/models/protobuf_any.py +1 -1
- eval_studio_client/api/models/protobuf_null_value.py +36 -0
- eval_studio_client/api/models/required_the_dashboard_to_update.py +6 -3
- eval_studio_client/api/models/required_the_document_to_update.py +1 -1
- eval_studio_client/api/models/required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/models/required_the_model_to_update.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
- eval_studio_client/api/models/required_the_test_case_to_update.py +14 -3
- eval_studio_client/api/models/required_the_test_to_update.py +6 -3
- eval_studio_client/api/models/required_the_updated_workflow.py +160 -0
- eval_studio_client/api/models/required_the_updated_workflow_node.py +152 -0
- eval_studio_client/api/models/rpc_status.py +1 -1
- eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/models/test_service_clone_test_request.py +89 -0
- eval_studio_client/api/models/test_service_generate_test_cases_request.py +16 -4
- eval_studio_client/api/models/test_service_import_test_cases_from_library_request.py +93 -0
- eval_studio_client/api/models/test_service_list_test_case_library_items_request.py +99 -0
- eval_studio_client/api/models/test_service_perturb_test_in_place_request.py +97 -0
- eval_studio_client/api/models/test_service_perturb_test_request.py +5 -3
- eval_studio_client/api/models/v1_abort_operation_response.py +91 -0
- eval_studio_client/api/models/v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_delete_workflows_request.py +87 -0
- eval_studio_client/api/models/v1_batch_delete_workflows_response.py +95 -0
- eval_studio_client/api/models/v1_batch_get_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_tests_response.py +1 -1
- eval_studio_client/api/models/v1_batch_get_workflow_edges_response.py +95 -0
- eval_studio_client/api/models/v1_batch_get_workflow_nodes_response.py +95 -0
- eval_studio_client/api/models/v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/models/v1_batch_import_tests_response.py +1 -1
- eval_studio_client/api/models/v1_check_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_clone_test_response.py +91 -0
- eval_studio_client/api/models/v1_clone_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_collection_info.py +1 -1
- eval_studio_client/api/models/v1_context.py +103 -0
- eval_studio_client/api/models/v1_create_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_document_response.py +1 -1
- eval_studio_client/api/models/v1_create_evaluation_request.py +8 -3
- eval_studio_client/api/models/v1_create_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/models/v1_create_model_response.py +1 -1
- eval_studio_client/api/models/v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_create_test_response.py +1 -1
- eval_studio_client/api/models/v1_create_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_create_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_dashboard.py +6 -3
- eval_studio_client/api/models/v1_dashboard_status.py +1 -1
- eval_studio_client/api/models/v1_dashboard_type.py +38 -0
- eval_studio_client/api/models/v1_delete_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_document_response.py +1 -1
- eval_studio_client/api/models/v1_delete_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_delete_model_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_delete_test_response.py +1 -1
- eval_studio_client/api/models/v1_delete_workflow_edge_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_delete_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_dependency_list.py +97 -0
- eval_studio_client/api/models/v1_document.py +1 -1
- eval_studio_client/api/models/v1_estimate_threshold_request.py +103 -0
- eval_studio_client/api/models/v1_evaluation_test.py +1 -1
- eval_studio_client/api/models/v1_evaluator.py +12 -4
- eval_studio_client/api/models/v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/models/v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/models/v1_evaluator_view.py +1 -1
- eval_studio_client/api/models/v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/models/v1_find_all_test_cases_by_id_response.py +1 -1
- eval_studio_client/api/models/v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/models/v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_get_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_document_response.py +1 -1
- eval_studio_client/api/models/v1_get_evaluator_response.py +1 -1
- eval_studio_client/api/models/v1_get_guardrails_configuration_response.py +87 -0
- eval_studio_client/api/models/v1_get_info_response.py +1 -1
- eval_studio_client/api/models/v1_get_leaderboard_report_response.py +91 -0
- eval_studio_client/api/models/v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_get_model_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/models/v1_get_operation_response.py +1 -1
- eval_studio_client/api/models/v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_class_response.py +1 -1
- eval_studio_client/api/models/v1_get_test_response.py +1 -1
- eval_studio_client/api/models/v1_get_workflow_node_prerequisites_response.py +89 -0
- eval_studio_client/api/models/v1_get_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_get_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_import_evaluation_request.py +8 -3
- eval_studio_client/api/models/v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/models/v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_import_test_cases_from_library_response.py +91 -0
- eval_studio_client/api/models/v1_import_test_cases_request.py +95 -0
- eval_studio_client/api/models/v1_info.py +10 -4
- eval_studio_client/api/models/v1_init_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_insight.py +1 -1
- eval_studio_client/api/models/v1_labeled_test_case.py +91 -0
- eval_studio_client/api/models/v1_leaderboard.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_report.py +115 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_data.py +93 -0
- eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +101 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator.py +155 -0
- eval_studio_client/api/models/v1_leaderboard_report_evaluator_parameter.py +109 -0
- eval_studio_client/api/models/v1_leaderboard_report_explanation.py +103 -0
- eval_studio_client/api/models/v1_leaderboard_report_metrics_meta_entry.py +129 -0
- eval_studio_client/api/models/v1_leaderboard_report_model.py +113 -0
- eval_studio_client/api/models/v1_leaderboard_report_result.py +175 -0
- eval_studio_client/api/models/v1_leaderboard_report_result_relationship.py +97 -0
- eval_studio_client/api/models/v1_leaderboard_status.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_type.py +1 -1
- eval_studio_client/api/models/v1_leaderboard_view.py +1 -1
- eval_studio_client/api/models/v1_list_base_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_documents_response.py +1 -1
- eval_studio_client/api/models/v1_list_evaluators_response.py +1 -1
- eval_studio_client/api/models/v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_dashboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/models/v1_list_most_recent_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_operations_response.py +1 -1
- eval_studio_client/api/models/v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/models/v1_list_prompt_library_items_response.py +95 -0
- eval_studio_client/api/models/v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_case_library_items_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_case_relationships_response.py +95 -0
- eval_studio_client/api/models/v1_list_test_cases_response.py +1 -1
- eval_studio_client/api/models/v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/models/v1_list_tests_response.py +1 -1
- eval_studio_client/api/models/v1_list_workflow_dependencies_response.py +105 -0
- eval_studio_client/api/models/v1_list_workflows_response.py +95 -0
- eval_studio_client/api/models/v1_metric_score.py +89 -0
- eval_studio_client/api/models/v1_metric_scores.py +95 -0
- eval_studio_client/api/models/v1_model.py +1 -1
- eval_studio_client/api/models/v1_model_type.py +1 -1
- eval_studio_client/api/models/v1_operation.py +1 -1
- eval_studio_client/api/models/v1_operation_progress.py +1 -1
- eval_studio_client/api/models/v1_perturb_test_in_place_response.py +91 -0
- eval_studio_client/api/models/v1_perturb_test_response.py +1 -1
- eval_studio_client/api/models/v1_perturbator.py +1 -1
- eval_studio_client/api/models/v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/models/v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/models/v1_problem_and_action.py +1 -1
- eval_studio_client/api/models/v1_process_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_prompt_library_item.py +129 -0
- eval_studio_client/api/models/v1_repeated_context.py +95 -0
- eval_studio_client/api/models/v1_repeated_string.py +87 -0
- eval_studio_client/api/models/v1_reset_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_test.py +6 -3
- eval_studio_client/api/models/v1_test_case.py +14 -3
- eval_studio_client/api/models/v1_test_case_relationship.py +1 -1
- eval_studio_client/api/models/v1_test_cases_generator.py +1 -1
- eval_studio_client/api/models/v1_test_class.py +1 -1
- eval_studio_client/api/models/v1_test_class_type.py +1 -1
- eval_studio_client/api/models/v1_test_lab.py +1 -1
- eval_studio_client/api/models/v1_test_suite_evaluates.py +39 -0
- eval_studio_client/api/models/v1_test_type.py +38 -0
- eval_studio_client/api/models/v1_update_dashboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_document_response.py +1 -1
- eval_studio_client/api/models/v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/models/v1_update_model_response.py +1 -1
- eval_studio_client/api/models/v1_update_operation_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_case_response.py +1 -1
- eval_studio_client/api/models/v1_update_test_response.py +1 -1
- eval_studio_client/api/models/v1_update_workflow_node_response.py +91 -0
- eval_studio_client/api/models/v1_update_workflow_response.py +91 -0
- eval_studio_client/api/models/v1_who_am_i_response.py +1 -1
- eval_studio_client/api/models/v1_workflow.py +164 -0
- eval_studio_client/api/models/v1_workflow_dependency.py +89 -0
- eval_studio_client/api/models/v1_workflow_edge.py +123 -0
- eval_studio_client/api/models/v1_workflow_edge_type.py +38 -0
- eval_studio_client/api/models/v1_workflow_node.py +156 -0
- eval_studio_client/api/models/v1_workflow_node_artifact.py +126 -0
- eval_studio_client/api/models/v1_workflow_node_artifacts.py +97 -0
- eval_studio_client/api/models/v1_workflow_node_attributes.py +87 -0
- eval_studio_client/api/models/v1_workflow_node_status.py +40 -0
- eval_studio_client/api/models/v1_workflow_node_type.py +44 -0
- eval_studio_client/api/models/v1_workflow_node_view.py +38 -0
- eval_studio_client/api/models/v1_workflow_type.py +37 -0
- eval_studio_client/api/models/workflow_service_clone_workflow_request.py +95 -0
- eval_studio_client/api/rest.py +1 -1
- eval_studio_client/api/test/test_adversarial_inputs_service_api.py +37 -0
- eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +128 -0
- eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
- eval_studio_client/api/test/test_document_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
- eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
- eval_studio_client/api/test/test_human_calibration_service_api.py +38 -0
- eval_studio_client/api/test/test_info_service_api.py +1 -1
- eval_studio_client/api/test/test_leaderboard_report_service_api.py +37 -0
- eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
- eval_studio_client/api/test/test_model_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
- eval_studio_client/api/test/test_operation_service_api.py +7 -1
- eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
- eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +25 -3
- eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_api.py +1 -1
- eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +21 -5
- eval_studio_client/api/test/test_prompt_library_service_api.py +43 -0
- eval_studio_client/api/test/test_protobuf_any.py +1 -1
- eval_studio_client/api/test/test_protobuf_null_value.py +33 -0
- eval_studio_client/api/test/test_required_the_dashboard_to_update.py +3 -2
- eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
- eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
- eval_studio_client/api/test/test_required_the_test_case_to_update.py +9 -2
- eval_studio_client/api/test/test_required_the_test_to_update.py +3 -2
- eval_studio_client/api/test/test_required_the_updated_workflow.py +92 -0
- eval_studio_client/api/test/test_required_the_updated_workflow_node.py +81 -0
- eval_studio_client/api/test/test_rpc_status.py +1 -1
- eval_studio_client/api/test/test_test_case_relationship_service_api.py +37 -0
- eval_studio_client/api/test/test_test_case_service_api.py +1 -1
- eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
- eval_studio_client/api/test/test_test_class_service_api.py +1 -1
- eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
- eval_studio_client/api/test/test_test_service_api.py +25 -1
- eval_studio_client/api/test/test_test_service_clone_test_request.py +52 -0
- eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +17 -2
- eval_studio_client/api/test/test_test_service_import_test_cases_from_library_request.py +56 -0
- eval_studio_client/api/test/test_test_service_list_test_case_library_items_request.py +63 -0
- eval_studio_client/api/test/test_test_service_perturb_test_in_place_request.py +59 -0
- eval_studio_client/api/test/test_test_service_perturb_test_request.py +5 -2
- eval_studio_client/api/test/test_v1_abort_operation_response.py +71 -0
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +3 -2
- eval_studio_client/api/test/test_v1_batch_delete_documents_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +4 -2
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_test_cases_response.py +9 -2
- eval_studio_client/api/test/test_v1_batch_delete_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_batch_delete_workflows_request.py +53 -0
- eval_studio_client/api/test/test_v1_batch_delete_workflows_response.py +95 -0
- eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +3 -2
- eval_studio_client/api/test/test_v1_batch_get_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_get_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_batch_get_workflow_edges_response.py +64 -0
- eval_studio_client/api/test/test_v1_batch_get_workflow_nodes_response.py +84 -0
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_request.py +1 -1
- eval_studio_client/api/test/test_v1_batch_import_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_check_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_clone_test_response.py +68 -0
- eval_studio_client/api/test/test_v1_clone_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_collection_info.py +1 -1
- eval_studio_client/api/test/test_v1_context.py +59 -0
- eval_studio_client/api/test/test_v1_create_dashboard_response.py +3 -2
- eval_studio_client/api/test/test_v1_create_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_evaluation_request.py +25 -3
- eval_studio_client/api/test/test_v1_create_evaluator_response.py +4 -2
- eval_studio_client/api/test/test_v1_create_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_perturbation_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_create_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_create_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_create_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_create_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_create_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_dashboard.py +3 -2
- eval_studio_client/api/test/test_v1_dashboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_dashboard_type.py +33 -0
- eval_studio_client/api/test/test_v1_delete_dashboard_response.py +3 -2
- eval_studio_client/api/test/test_v1_delete_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_evaluator_response.py +4 -2
- eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_delete_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_delete_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_delete_workflow_edge_response.py +62 -0
- eval_studio_client/api/test/test_v1_delete_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_delete_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_dependency_list.py +56 -0
- eval_studio_client/api/test/test_v1_document.py +1 -1
- eval_studio_client/api/test/test_v1_estimate_threshold_request.py +60 -0
- eval_studio_client/api/test/test_v1_evaluation_test.py +9 -2
- eval_studio_client/api/test/test_v1_evaluator.py +4 -2
- eval_studio_client/api/test/test_v1_evaluator_param_type.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_parameter.py +1 -1
- eval_studio_client/api/test/test_v1_evaluator_view.py +1 -1
- eval_studio_client/api/test/test_v1_finalize_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_find_all_test_cases_by_id_response.py +9 -2
- eval_studio_client/api/test/test_v1_find_test_lab_response.py +1 -1
- eval_studio_client/api/test/test_v1_generate_test_cases_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_dashboard_response.py +3 -2
- eval_studio_client/api/test/test_v1_get_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_evaluator_response.py +4 -2
- eval_studio_client/api/test/test_v1_get_guardrails_configuration_response.py +51 -0
- eval_studio_client/api/test/test_v1_get_info_response.py +7 -2
- eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +173 -0
- eval_studio_client/api/test/test_v1_get_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_progress_by_parent_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_perturbator_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_get_test_class_response.py +1 -1
- eval_studio_client/api/test/test_v1_get_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_get_workflow_node_prerequisites_response.py +56 -0
- eval_studio_client/api/test/test_v1_get_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_get_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_import_evaluation_request.py +17 -2
- eval_studio_client/api/test/test_v1_import_leaderboard_request.py +1 -1
- eval_studio_client/api/test/test_v1_import_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +71 -0
- eval_studio_client/api/test/test_v1_import_test_cases_request.py +57 -0
- eval_studio_client/api/test/test_v1_info.py +7 -2
- eval_studio_client/api/test/test_v1_init_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_insight.py +1 -1
- eval_studio_client/api/test/test_v1_labeled_test_case.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_report.py +172 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_data.py +52 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +56 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator.py +114 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_evaluator_parameter.py +63 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_explanation.py +58 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_metrics_meta_entry.py +66 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_model.py +60 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result.py +92 -0
- eval_studio_client/api/test/test_v1_leaderboard_report_result_relationship.py +53 -0
- eval_studio_client/api/test/test_v1_leaderboard_status.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_type.py +1 -1
- eval_studio_client/api/test/test_v1_leaderboard_view.py +1 -1
- eval_studio_client/api/test/test_v1_list_base_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_dashboards_response.py +3 -2
- eval_studio_client/api/test/test_v1_list_documents_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_evaluators_response.py +4 -2
- eval_studio_client/api/test/test_v1_list_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_llm_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_model_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +3 -2
- eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_models_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_list_operations_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_perturbators_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_prompt_library_items_response.py +71 -0
- eval_studio_client/api/test/test_v1_list_rag_collections_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_test_case_library_items_response.py +71 -0
- eval_studio_client/api/test/test_v1_list_test_case_relationships_response.py +56 -0
- eval_studio_client/api/test/test_v1_list_test_cases_response.py +9 -2
- eval_studio_client/api/test/test_v1_list_test_classes_response.py +1 -1
- eval_studio_client/api/test/test_v1_list_tests_response.py +3 -2
- eval_studio_client/api/test/test_v1_list_workflow_dependencies_response.py +93 -0
- eval_studio_client/api/test/test_v1_list_workflows_response.py +95 -0
- eval_studio_client/api/test/test_v1_metric_score.py +52 -0
- eval_studio_client/api/test/test_v1_metric_scores.py +55 -0
- eval_studio_client/api/test/test_v1_model.py +1 -1
- eval_studio_client/api/test/test_v1_model_type.py +1 -1
- eval_studio_client/api/test/test_v1_operation.py +1 -1
- eval_studio_client/api/test/test_v1_operation_progress.py +1 -1
- eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +68 -0
- eval_studio_client/api/test/test_v1_perturb_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_perturbator.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_configuration.py +1 -1
- eval_studio_client/api/test/test_v1_perturbator_intensity.py +1 -1
- eval_studio_client/api/test/test_v1_problem_and_action.py +1 -1
- eval_studio_client/api/test/test_v1_process_workflow_node_response.py +71 -0
- eval_studio_client/api/test/test_v1_prompt_library_item.py +68 -0
- eval_studio_client/api/test/test_v1_repeated_context.py +62 -0
- eval_studio_client/api/test/test_v1_repeated_string.py +53 -0
- eval_studio_client/api/test/test_v1_reset_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_test.py +3 -2
- eval_studio_client/api/test/test_v1_test_case.py +9 -2
- eval_studio_client/api/test/test_v1_test_case_relationship.py +1 -1
- eval_studio_client/api/test/test_v1_test_cases_generator.py +1 -1
- eval_studio_client/api/test/test_v1_test_class.py +1 -1
- eval_studio_client/api/test/test_v1_test_class_type.py +1 -1
- eval_studio_client/api/test/test_v1_test_lab.py +1 -1
- eval_studio_client/api/test/test_v1_test_suite_evaluates.py +33 -0
- eval_studio_client/api/test/test_v1_test_type.py +33 -0
- eval_studio_client/api/test/test_v1_update_dashboard_response.py +3 -2
- eval_studio_client/api/test/test_v1_update_document_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_leaderboard_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_model_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_operation_response.py +1 -1
- eval_studio_client/api/test/test_v1_update_test_case_response.py +9 -2
- eval_studio_client/api/test/test_v1_update_test_response.py +3 -2
- eval_studio_client/api/test/test_v1_update_workflow_node_response.py +82 -0
- eval_studio_client/api/test/test_v1_update_workflow_response.py +93 -0
- eval_studio_client/api/test/test_v1_who_am_i_response.py +1 -1
- eval_studio_client/api/test/test_v1_workflow.py +93 -0
- eval_studio_client/api/test/test_v1_workflow_dependency.py +52 -0
- eval_studio_client/api/test/test_v1_workflow_edge.py +61 -0
- eval_studio_client/api/test/test_v1_workflow_edge_type.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node.py +82 -0
- eval_studio_client/api/test/test_v1_workflow_node_artifact.py +62 -0
- eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +65 -0
- eval_studio_client/api/test/test_v1_workflow_node_attributes.py +51 -0
- eval_studio_client/api/test/test_v1_workflow_node_status.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node_type.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_node_view.py +33 -0
- eval_studio_client/api/test/test_v1_workflow_type.py +33 -0
- eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
- eval_studio_client/api/test/test_workflow_edge_service_api.py +52 -0
- eval_studio_client/api/test/test_workflow_node_service_api.py +94 -0
- eval_studio_client/api/test/test_workflow_service_api.py +93 -0
- eval_studio_client/api/test/test_workflow_service_clone_workflow_request.py +55 -0
- eval_studio_client/client.py +7 -0
- eval_studio_client/dashboards.py +29 -0
- eval_studio_client/gen/openapiv2/eval_studio.swagger.json +5318 -1884
- eval_studio_client/leaderboards.py +123 -0
- eval_studio_client/models.py +3 -42
- eval_studio_client/test_labs.py +49 -21
- eval_studio_client/tests.py +290 -8
- {eval_studio_client-1.0.3a1.dist-info → eval_studio_client-1.1.0a6.dist-info}/METADATA +1 -2
- eval_studio_client-1.1.0a6.dist-info/RECORD +732 -0
- eval_studio_client-1.0.3a1.dist-info/RECORD +0 -486
- {eval_studio_client-1.0.3a1.dist-info → eval_studio_client-1.1.0a6.dist-info}/WHEEL +0 -0
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
# flake8: noqa
|
|
4
4
|
"""
|
|
5
|
-
ai/h2o/eval_studio/v1/
|
|
5
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
6
6
|
|
|
7
7
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
8
8
|
|
|
@@ -14,9 +14,11 @@
|
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
# import models into model package
|
|
17
|
+
from eval_studio_client.api.models.adversarial_inputs_service_test_adversarial_inputs_robustness_request import AdversarialInputsServiceTestAdversarialInputsRobustnessRequest
|
|
17
18
|
from eval_studio_client.api.models.perturbation_service_create_perturbation_request import PerturbationServiceCreatePerturbationRequest
|
|
18
19
|
from eval_studio_client.api.models.prompt_generation_service_auto_generate_prompts_request import PromptGenerationServiceAutoGeneratePromptsRequest
|
|
19
20
|
from eval_studio_client.api.models.protobuf_any import ProtobufAny
|
|
21
|
+
from eval_studio_client.api.models.protobuf_null_value import ProtobufNullValue
|
|
20
22
|
from eval_studio_client.api.models.required_the_dashboard_to_update import RequiredTheDashboardToUpdate
|
|
21
23
|
from eval_studio_client.api.models.required_the_document_to_update import RequiredTheDocumentToUpdate
|
|
22
24
|
from eval_studio_client.api.models.required_the_leaderboard_to_update import RequiredTheLeaderboardToUpdate
|
|
@@ -25,10 +27,17 @@ from eval_studio_client.api.models.required_the_operation_to_finalize import Req
|
|
|
25
27
|
from eval_studio_client.api.models.required_the_operation_to_update import RequiredTheOperationToUpdate
|
|
26
28
|
from eval_studio_client.api.models.required_the_test_case_to_update import RequiredTheTestCaseToUpdate
|
|
27
29
|
from eval_studio_client.api.models.required_the_test_to_update import RequiredTheTestToUpdate
|
|
30
|
+
from eval_studio_client.api.models.required_the_updated_workflow import RequiredTheUpdatedWorkflow
|
|
31
|
+
from eval_studio_client.api.models.required_the_updated_workflow_node import RequiredTheUpdatedWorkflowNode
|
|
28
32
|
from eval_studio_client.api.models.rpc_status import RpcStatus
|
|
29
33
|
from eval_studio_client.api.models.test_case_service_batch_delete_test_cases_request import TestCaseServiceBatchDeleteTestCasesRequest
|
|
34
|
+
from eval_studio_client.api.models.test_service_clone_test_request import TestServiceCloneTestRequest
|
|
30
35
|
from eval_studio_client.api.models.test_service_generate_test_cases_request import TestServiceGenerateTestCasesRequest
|
|
36
|
+
from eval_studio_client.api.models.test_service_import_test_cases_from_library_request import TestServiceImportTestCasesFromLibraryRequest
|
|
37
|
+
from eval_studio_client.api.models.test_service_list_test_case_library_items_request import TestServiceListTestCaseLibraryItemsRequest
|
|
38
|
+
from eval_studio_client.api.models.test_service_perturb_test_in_place_request import TestServicePerturbTestInPlaceRequest
|
|
31
39
|
from eval_studio_client.api.models.test_service_perturb_test_request import TestServicePerturbTestRequest
|
|
40
|
+
from eval_studio_client.api.models.v1_abort_operation_response import V1AbortOperationResponse
|
|
32
41
|
from eval_studio_client.api.models.v1_batch_create_leaderboards_request import V1BatchCreateLeaderboardsRequest
|
|
33
42
|
from eval_studio_client.api.models.v1_batch_create_leaderboards_response import V1BatchCreateLeaderboardsResponse
|
|
34
43
|
from eval_studio_client.api.models.v1_batch_delete_dashboards_request import V1BatchDeleteDashboardsRequest
|
|
@@ -44,18 +53,25 @@ from eval_studio_client.api.models.v1_batch_delete_models_response import V1Batc
|
|
|
44
53
|
from eval_studio_client.api.models.v1_batch_delete_test_cases_response import V1BatchDeleteTestCasesResponse
|
|
45
54
|
from eval_studio_client.api.models.v1_batch_delete_tests_request import V1BatchDeleteTestsRequest
|
|
46
55
|
from eval_studio_client.api.models.v1_batch_delete_tests_response import V1BatchDeleteTestsResponse
|
|
56
|
+
from eval_studio_client.api.models.v1_batch_delete_workflows_request import V1BatchDeleteWorkflowsRequest
|
|
57
|
+
from eval_studio_client.api.models.v1_batch_delete_workflows_response import V1BatchDeleteWorkflowsResponse
|
|
47
58
|
from eval_studio_client.api.models.v1_batch_get_dashboards_response import V1BatchGetDashboardsResponse
|
|
48
59
|
from eval_studio_client.api.models.v1_batch_get_documents_response import V1BatchGetDocumentsResponse
|
|
49
60
|
from eval_studio_client.api.models.v1_batch_get_leaderboards_response import V1BatchGetLeaderboardsResponse
|
|
50
61
|
from eval_studio_client.api.models.v1_batch_get_models_response import V1BatchGetModelsResponse
|
|
51
62
|
from eval_studio_client.api.models.v1_batch_get_operations_response import V1BatchGetOperationsResponse
|
|
52
63
|
from eval_studio_client.api.models.v1_batch_get_tests_response import V1BatchGetTestsResponse
|
|
64
|
+
from eval_studio_client.api.models.v1_batch_get_workflow_edges_response import V1BatchGetWorkflowEdgesResponse
|
|
65
|
+
from eval_studio_client.api.models.v1_batch_get_workflow_nodes_response import V1BatchGetWorkflowNodesResponse
|
|
53
66
|
from eval_studio_client.api.models.v1_batch_import_leaderboard_request import V1BatchImportLeaderboardRequest
|
|
54
67
|
from eval_studio_client.api.models.v1_batch_import_leaderboard_response import V1BatchImportLeaderboardResponse
|
|
55
68
|
from eval_studio_client.api.models.v1_batch_import_tests_request import V1BatchImportTestsRequest
|
|
56
69
|
from eval_studio_client.api.models.v1_batch_import_tests_response import V1BatchImportTestsResponse
|
|
57
70
|
from eval_studio_client.api.models.v1_check_base_models_response import V1CheckBaseModelsResponse
|
|
71
|
+
from eval_studio_client.api.models.v1_clone_test_response import V1CloneTestResponse
|
|
72
|
+
from eval_studio_client.api.models.v1_clone_workflow_response import V1CloneWorkflowResponse
|
|
58
73
|
from eval_studio_client.api.models.v1_collection_info import V1CollectionInfo
|
|
74
|
+
from eval_studio_client.api.models.v1_context import V1Context
|
|
59
75
|
from eval_studio_client.api.models.v1_create_dashboard_response import V1CreateDashboardResponse
|
|
60
76
|
from eval_studio_client.api.models.v1_create_document_response import V1CreateDocumentResponse
|
|
61
77
|
from eval_studio_client.api.models.v1_create_evaluation_request import V1CreateEvaluationRequest
|
|
@@ -68,8 +84,12 @@ from eval_studio_client.api.models.v1_create_perturbation_response import V1Crea
|
|
|
68
84
|
from eval_studio_client.api.models.v1_create_test_case_response import V1CreateTestCaseResponse
|
|
69
85
|
from eval_studio_client.api.models.v1_create_test_lab_response import V1CreateTestLabResponse
|
|
70
86
|
from eval_studio_client.api.models.v1_create_test_response import V1CreateTestResponse
|
|
87
|
+
from eval_studio_client.api.models.v1_create_workflow_edge_response import V1CreateWorkflowEdgeResponse
|
|
88
|
+
from eval_studio_client.api.models.v1_create_workflow_node_response import V1CreateWorkflowNodeResponse
|
|
89
|
+
from eval_studio_client.api.models.v1_create_workflow_response import V1CreateWorkflowResponse
|
|
71
90
|
from eval_studio_client.api.models.v1_dashboard import V1Dashboard
|
|
72
91
|
from eval_studio_client.api.models.v1_dashboard_status import V1DashboardStatus
|
|
92
|
+
from eval_studio_client.api.models.v1_dashboard_type import V1DashboardType
|
|
73
93
|
from eval_studio_client.api.models.v1_delete_dashboard_response import V1DeleteDashboardResponse
|
|
74
94
|
from eval_studio_client.api.models.v1_delete_document_response import V1DeleteDocumentResponse
|
|
75
95
|
from eval_studio_client.api.models.v1_delete_evaluator_response import V1DeleteEvaluatorResponse
|
|
@@ -77,7 +97,12 @@ from eval_studio_client.api.models.v1_delete_leaderboard_response import V1Delet
|
|
|
77
97
|
from eval_studio_client.api.models.v1_delete_model_response import V1DeleteModelResponse
|
|
78
98
|
from eval_studio_client.api.models.v1_delete_test_case_response import V1DeleteTestCaseResponse
|
|
79
99
|
from eval_studio_client.api.models.v1_delete_test_response import V1DeleteTestResponse
|
|
100
|
+
from eval_studio_client.api.models.v1_delete_workflow_edge_response import V1DeleteWorkflowEdgeResponse
|
|
101
|
+
from eval_studio_client.api.models.v1_delete_workflow_node_response import V1DeleteWorkflowNodeResponse
|
|
102
|
+
from eval_studio_client.api.models.v1_delete_workflow_response import V1DeleteWorkflowResponse
|
|
103
|
+
from eval_studio_client.api.models.v1_dependency_list import V1DependencyList
|
|
80
104
|
from eval_studio_client.api.models.v1_document import V1Document
|
|
105
|
+
from eval_studio_client.api.models.v1_estimate_threshold_request import V1EstimateThresholdRequest
|
|
81
106
|
from eval_studio_client.api.models.v1_evaluation_test import V1EvaluationTest
|
|
82
107
|
from eval_studio_client.api.models.v1_evaluator import V1Evaluator
|
|
83
108
|
from eval_studio_client.api.models.v1_evaluator_param_type import V1EvaluatorParamType
|
|
@@ -90,7 +115,9 @@ from eval_studio_client.api.models.v1_generate_test_cases_response import V1Gene
|
|
|
90
115
|
from eval_studio_client.api.models.v1_get_dashboard_response import V1GetDashboardResponse
|
|
91
116
|
from eval_studio_client.api.models.v1_get_document_response import V1GetDocumentResponse
|
|
92
117
|
from eval_studio_client.api.models.v1_get_evaluator_response import V1GetEvaluatorResponse
|
|
118
|
+
from eval_studio_client.api.models.v1_get_guardrails_configuration_response import V1GetGuardrailsConfigurationResponse
|
|
93
119
|
from eval_studio_client.api.models.v1_get_info_response import V1GetInfoResponse
|
|
120
|
+
from eval_studio_client.api.models.v1_get_leaderboard_report_response import V1GetLeaderboardReportResponse
|
|
94
121
|
from eval_studio_client.api.models.v1_get_leaderboard_response import V1GetLeaderboardResponse
|
|
95
122
|
from eval_studio_client.api.models.v1_get_model_response import V1GetModelResponse
|
|
96
123
|
from eval_studio_client.api.models.v1_get_operation_progress_by_parent_response import V1GetOperationProgressByParentResponse
|
|
@@ -99,12 +126,29 @@ from eval_studio_client.api.models.v1_get_perturbator_response import V1GetPertu
|
|
|
99
126
|
from eval_studio_client.api.models.v1_get_test_case_response import V1GetTestCaseResponse
|
|
100
127
|
from eval_studio_client.api.models.v1_get_test_class_response import V1GetTestClassResponse
|
|
101
128
|
from eval_studio_client.api.models.v1_get_test_response import V1GetTestResponse
|
|
129
|
+
from eval_studio_client.api.models.v1_get_workflow_node_prerequisites_response import V1GetWorkflowNodePrerequisitesResponse
|
|
130
|
+
from eval_studio_client.api.models.v1_get_workflow_node_response import V1GetWorkflowNodeResponse
|
|
131
|
+
from eval_studio_client.api.models.v1_get_workflow_response import V1GetWorkflowResponse
|
|
102
132
|
from eval_studio_client.api.models.v1_import_evaluation_request import V1ImportEvaluationRequest
|
|
103
133
|
from eval_studio_client.api.models.v1_import_leaderboard_request import V1ImportLeaderboardRequest
|
|
104
134
|
from eval_studio_client.api.models.v1_import_leaderboard_response import V1ImportLeaderboardResponse
|
|
135
|
+
from eval_studio_client.api.models.v1_import_test_cases_from_library_response import V1ImportTestCasesFromLibraryResponse
|
|
136
|
+
from eval_studio_client.api.models.v1_import_test_cases_request import V1ImportTestCasesRequest
|
|
105
137
|
from eval_studio_client.api.models.v1_info import V1Info
|
|
138
|
+
from eval_studio_client.api.models.v1_init_workflow_node_response import V1InitWorkflowNodeResponse
|
|
106
139
|
from eval_studio_client.api.models.v1_insight import V1Insight
|
|
140
|
+
from eval_studio_client.api.models.v1_labeled_test_case import V1LabeledTestCase
|
|
107
141
|
from eval_studio_client.api.models.v1_leaderboard import V1Leaderboard
|
|
142
|
+
from eval_studio_client.api.models.v1_leaderboard_report import V1LeaderboardReport
|
|
143
|
+
from eval_studio_client.api.models.v1_leaderboard_report_actual_output_data import V1LeaderboardReportActualOutputData
|
|
144
|
+
from eval_studio_client.api.models.v1_leaderboard_report_actual_output_meta import V1LeaderboardReportActualOutputMeta
|
|
145
|
+
from eval_studio_client.api.models.v1_leaderboard_report_evaluator import V1LeaderboardReportEvaluator
|
|
146
|
+
from eval_studio_client.api.models.v1_leaderboard_report_evaluator_parameter import V1LeaderboardReportEvaluatorParameter
|
|
147
|
+
from eval_studio_client.api.models.v1_leaderboard_report_explanation import V1LeaderboardReportExplanation
|
|
148
|
+
from eval_studio_client.api.models.v1_leaderboard_report_metrics_meta_entry import V1LeaderboardReportMetricsMetaEntry
|
|
149
|
+
from eval_studio_client.api.models.v1_leaderboard_report_model import V1LeaderboardReportModel
|
|
150
|
+
from eval_studio_client.api.models.v1_leaderboard_report_result import V1LeaderboardReportResult
|
|
151
|
+
from eval_studio_client.api.models.v1_leaderboard_report_result_relationship import V1LeaderboardReportResultRelationship
|
|
108
152
|
from eval_studio_client.api.models.v1_leaderboard_status import V1LeaderboardStatus
|
|
109
153
|
from eval_studio_client.api.models.v1_leaderboard_type import V1LeaderboardType
|
|
110
154
|
from eval_studio_client.api.models.v1_leaderboard_view import V1LeaderboardView
|
|
@@ -122,19 +166,32 @@ from eval_studio_client.api.models.v1_list_most_recent_models_response import V1
|
|
|
122
166
|
from eval_studio_client.api.models.v1_list_most_recent_tests_response import V1ListMostRecentTestsResponse
|
|
123
167
|
from eval_studio_client.api.models.v1_list_operations_response import V1ListOperationsResponse
|
|
124
168
|
from eval_studio_client.api.models.v1_list_perturbators_response import V1ListPerturbatorsResponse
|
|
169
|
+
from eval_studio_client.api.models.v1_list_prompt_library_items_response import V1ListPromptLibraryItemsResponse
|
|
125
170
|
from eval_studio_client.api.models.v1_list_rag_collections_response import V1ListRAGCollectionsResponse
|
|
171
|
+
from eval_studio_client.api.models.v1_list_test_case_library_items_response import V1ListTestCaseLibraryItemsResponse
|
|
172
|
+
from eval_studio_client.api.models.v1_list_test_case_relationships_response import V1ListTestCaseRelationshipsResponse
|
|
126
173
|
from eval_studio_client.api.models.v1_list_test_cases_response import V1ListTestCasesResponse
|
|
127
174
|
from eval_studio_client.api.models.v1_list_test_classes_response import V1ListTestClassesResponse
|
|
128
175
|
from eval_studio_client.api.models.v1_list_tests_response import V1ListTestsResponse
|
|
176
|
+
from eval_studio_client.api.models.v1_list_workflow_dependencies_response import V1ListWorkflowDependenciesResponse
|
|
177
|
+
from eval_studio_client.api.models.v1_list_workflows_response import V1ListWorkflowsResponse
|
|
178
|
+
from eval_studio_client.api.models.v1_metric_score import V1MetricScore
|
|
179
|
+
from eval_studio_client.api.models.v1_metric_scores import V1MetricScores
|
|
129
180
|
from eval_studio_client.api.models.v1_model import V1Model
|
|
130
181
|
from eval_studio_client.api.models.v1_model_type import V1ModelType
|
|
131
182
|
from eval_studio_client.api.models.v1_operation import V1Operation
|
|
132
183
|
from eval_studio_client.api.models.v1_operation_progress import V1OperationProgress
|
|
184
|
+
from eval_studio_client.api.models.v1_perturb_test_in_place_response import V1PerturbTestInPlaceResponse
|
|
133
185
|
from eval_studio_client.api.models.v1_perturb_test_response import V1PerturbTestResponse
|
|
134
186
|
from eval_studio_client.api.models.v1_perturbator import V1Perturbator
|
|
135
187
|
from eval_studio_client.api.models.v1_perturbator_configuration import V1PerturbatorConfiguration
|
|
136
188
|
from eval_studio_client.api.models.v1_perturbator_intensity import V1PerturbatorIntensity
|
|
137
189
|
from eval_studio_client.api.models.v1_problem_and_action import V1ProblemAndAction
|
|
190
|
+
from eval_studio_client.api.models.v1_process_workflow_node_response import V1ProcessWorkflowNodeResponse
|
|
191
|
+
from eval_studio_client.api.models.v1_prompt_library_item import V1PromptLibraryItem
|
|
192
|
+
from eval_studio_client.api.models.v1_repeated_context import V1RepeatedContext
|
|
193
|
+
from eval_studio_client.api.models.v1_repeated_string import V1RepeatedString
|
|
194
|
+
from eval_studio_client.api.models.v1_reset_workflow_node_response import V1ResetWorkflowNodeResponse
|
|
138
195
|
from eval_studio_client.api.models.v1_test import V1Test
|
|
139
196
|
from eval_studio_client.api.models.v1_test_case import V1TestCase
|
|
140
197
|
from eval_studio_client.api.models.v1_test_case_relationship import V1TestCaseRelationship
|
|
@@ -142,6 +199,8 @@ from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGen
|
|
|
142
199
|
from eval_studio_client.api.models.v1_test_class import V1TestClass
|
|
143
200
|
from eval_studio_client.api.models.v1_test_class_type import V1TestClassType
|
|
144
201
|
from eval_studio_client.api.models.v1_test_lab import V1TestLab
|
|
202
|
+
from eval_studio_client.api.models.v1_test_suite_evaluates import V1TestSuiteEvaluates
|
|
203
|
+
from eval_studio_client.api.models.v1_test_type import V1TestType
|
|
145
204
|
from eval_studio_client.api.models.v1_update_dashboard_response import V1UpdateDashboardResponse
|
|
146
205
|
from eval_studio_client.api.models.v1_update_document_response import V1UpdateDocumentResponse
|
|
147
206
|
from eval_studio_client.api.models.v1_update_leaderboard_response import V1UpdateLeaderboardResponse
|
|
@@ -149,4 +208,19 @@ from eval_studio_client.api.models.v1_update_model_response import V1UpdateModel
|
|
|
149
208
|
from eval_studio_client.api.models.v1_update_operation_response import V1UpdateOperationResponse
|
|
150
209
|
from eval_studio_client.api.models.v1_update_test_case_response import V1UpdateTestCaseResponse
|
|
151
210
|
from eval_studio_client.api.models.v1_update_test_response import V1UpdateTestResponse
|
|
211
|
+
from eval_studio_client.api.models.v1_update_workflow_node_response import V1UpdateWorkflowNodeResponse
|
|
212
|
+
from eval_studio_client.api.models.v1_update_workflow_response import V1UpdateWorkflowResponse
|
|
152
213
|
from eval_studio_client.api.models.v1_who_am_i_response import V1WhoAmIResponse
|
|
214
|
+
from eval_studio_client.api.models.v1_workflow import V1Workflow
|
|
215
|
+
from eval_studio_client.api.models.v1_workflow_dependency import V1WorkflowDependency
|
|
216
|
+
from eval_studio_client.api.models.v1_workflow_edge import V1WorkflowEdge
|
|
217
|
+
from eval_studio_client.api.models.v1_workflow_edge_type import V1WorkflowEdgeType
|
|
218
|
+
from eval_studio_client.api.models.v1_workflow_node import V1WorkflowNode
|
|
219
|
+
from eval_studio_client.api.models.v1_workflow_node_artifact import V1WorkflowNodeArtifact
|
|
220
|
+
from eval_studio_client.api.models.v1_workflow_node_artifacts import V1WorkflowNodeArtifacts
|
|
221
|
+
from eval_studio_client.api.models.v1_workflow_node_attributes import V1WorkflowNodeAttributes
|
|
222
|
+
from eval_studio_client.api.models.v1_workflow_node_status import V1WorkflowNodeStatus
|
|
223
|
+
from eval_studio_client.api.models.v1_workflow_node_type import V1WorkflowNodeType
|
|
224
|
+
from eval_studio_client.api.models.v1_workflow_node_view import V1WorkflowNodeView
|
|
225
|
+
from eval_studio_client.api.models.v1_workflow_type import V1WorkflowType
|
|
226
|
+
from eval_studio_client.api.models.workflow_service_clone_workflow_request import WorkflowServiceCloneWorkflowRequest
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1_metric_scores import V1MetricScores
|
|
23
|
+
from eval_studio_client.api.models.v1_model import V1Model
|
|
24
|
+
from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
|
|
25
|
+
from typing import Optional, Set
|
|
26
|
+
from typing_extensions import Self
|
|
27
|
+
|
|
28
|
+
class AdversarialInputsServiceTestAdversarialInputsRobustnessRequest(BaseModel):
|
|
29
|
+
"""
|
|
30
|
+
AdversarialInputsServiceTestAdversarialInputsRobustnessRequest
|
|
31
|
+
""" # noqa: E501
|
|
32
|
+
operation: Optional[StrictStr] = Field(default=None, description="Required. The Operation processing adversarial inputs robustness testing.")
|
|
33
|
+
generator_input_types: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. The list of adversarial input types to generate.", alias="generatorInputTypes")
|
|
34
|
+
generator_document_urls: Optional[List[StrictStr]] = Field(default=None, description="Required. The document URLs which were used to generate the baseline TestCases.", alias="generatorDocumentUrls")
|
|
35
|
+
generator_model: Optional[V1Model] = Field(default=None, alias="generatorModel")
|
|
36
|
+
generator_base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to use for generation of adversarial the prompts.", alias="generatorBaseLlmModel")
|
|
37
|
+
generator_count: Optional[StrictInt] = Field(default=None, description="Required. The number of adversarial TestCases to generate.", alias="generatorCount")
|
|
38
|
+
generator_topics: Optional[List[StrictStr]] = Field(default=None, description="Optional. Topics to generate questions for. If not specified, use document summarization as topic generation.", alias="generatorTopics")
|
|
39
|
+
generator_chunks: Optional[List[StrictStr]] = Field(default=None, description="Optional. The list of chunks to use for generation. If set, the Documents assigned to the Test and h2ogpte_collection_id are ignored.", alias="generatorChunks")
|
|
40
|
+
generator_h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. ID of the h2oGPTe collection to use. If provided, documents referenced by Test and any specified chunks are ignored. This field is required if Test does not reference any documents and no chunks are provided. If this field is left empty, a temporary collection will be created.", alias="generatorH2ogpteCollectionId")
|
|
41
|
+
evaluator_identifiers: Optional[List[StrictStr]] = Field(default=None, description="Required. Evaluator identifiers to use for the model evaluation using the adversarial inputs.", alias="evaluatorIdentifiers")
|
|
42
|
+
evaluators_parameters: Optional[Dict[str, StrictStr]] = Field(default=None, description="Optional. Additional evaluators configuration, for all the evaluators used in the evaluation. Key is the evaluator identifier, and the value is a JSON string containing the configuration dictionary.", alias="evaluatorsParameters")
|
|
43
|
+
model: Optional[V1Model] = None
|
|
44
|
+
base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to be evaluated using the adversarial inputs.", alias="baseLlmModel")
|
|
45
|
+
model_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Parameters overrides for the Model host in JSON format.", alias="modelParameters")
|
|
46
|
+
default_h2ogpte_model: Optional[V1Model] = Field(default=None, alias="defaultH2ogpteModel")
|
|
47
|
+
baseline_eval: Optional[StrictStr] = Field(default=None, description="Required. Baseline evaluation name.", alias="baselineEval")
|
|
48
|
+
baseline_metrics: Optional[Dict[str, V1MetricScores]] = Field(default=None, description="Required. Map of baseline metrics from the evaluator to the metric scores for the evaluator.", alias="baselineMetrics")
|
|
49
|
+
__properties: ClassVar[List[str]] = ["operation", "generatorInputTypes", "generatorDocumentUrls", "generatorModel", "generatorBaseLlmModel", "generatorCount", "generatorTopics", "generatorChunks", "generatorH2ogpteCollectionId", "evaluatorIdentifiers", "evaluatorsParameters", "model", "baseLlmModel", "modelParameters", "defaultH2ogpteModel", "baselineEval", "baselineMetrics"]
|
|
50
|
+
|
|
51
|
+
model_config = ConfigDict(
|
|
52
|
+
populate_by_name=True,
|
|
53
|
+
validate_assignment=True,
|
|
54
|
+
protected_namespaces=(),
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def to_str(self) -> str:
|
|
59
|
+
"""Returns the string representation of the model using alias"""
|
|
60
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
61
|
+
|
|
62
|
+
def to_json(self) -> str:
|
|
63
|
+
"""Returns the JSON representation of the model using alias"""
|
|
64
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
65
|
+
return json.dumps(self.to_dict())
|
|
66
|
+
|
|
67
|
+
@classmethod
|
|
68
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
69
|
+
"""Create an instance of AdversarialInputsServiceTestAdversarialInputsRobustnessRequest from a JSON string"""
|
|
70
|
+
return cls.from_dict(json.loads(json_str))
|
|
71
|
+
|
|
72
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
73
|
+
"""Return the dictionary representation of the model using alias.
|
|
74
|
+
|
|
75
|
+
This has the following differences from calling pydantic's
|
|
76
|
+
`self.model_dump(by_alias=True)`:
|
|
77
|
+
|
|
78
|
+
* `None` is only added to the output dict for nullable fields that
|
|
79
|
+
were set at model initialization. Other fields with value `None`
|
|
80
|
+
are ignored.
|
|
81
|
+
"""
|
|
82
|
+
excluded_fields: Set[str] = set([
|
|
83
|
+
])
|
|
84
|
+
|
|
85
|
+
_dict = self.model_dump(
|
|
86
|
+
by_alias=True,
|
|
87
|
+
exclude=excluded_fields,
|
|
88
|
+
exclude_none=True,
|
|
89
|
+
)
|
|
90
|
+
# override the default output from pydantic by calling `to_dict()` of generator_model
|
|
91
|
+
if self.generator_model:
|
|
92
|
+
_dict['generatorModel'] = self.generator_model.to_dict()
|
|
93
|
+
# override the default output from pydantic by calling `to_dict()` of model
|
|
94
|
+
if self.model:
|
|
95
|
+
_dict['model'] = self.model.to_dict()
|
|
96
|
+
# override the default output from pydantic by calling `to_dict()` of default_h2ogpte_model
|
|
97
|
+
if self.default_h2ogpte_model:
|
|
98
|
+
_dict['defaultH2ogpteModel'] = self.default_h2ogpte_model.to_dict()
|
|
99
|
+
# override the default output from pydantic by calling `to_dict()` of each value in baseline_metrics (dict)
|
|
100
|
+
_field_dict = {}
|
|
101
|
+
if self.baseline_metrics:
|
|
102
|
+
for _key in self.baseline_metrics:
|
|
103
|
+
if self.baseline_metrics[_key]:
|
|
104
|
+
_field_dict[_key] = self.baseline_metrics[_key].to_dict()
|
|
105
|
+
_dict['baselineMetrics'] = _field_dict
|
|
106
|
+
return _dict
|
|
107
|
+
|
|
108
|
+
@classmethod
|
|
109
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
110
|
+
"""Create an instance of AdversarialInputsServiceTestAdversarialInputsRobustnessRequest from a dict"""
|
|
111
|
+
if obj is None:
|
|
112
|
+
return None
|
|
113
|
+
|
|
114
|
+
if not isinstance(obj, dict):
|
|
115
|
+
return cls.model_validate(obj)
|
|
116
|
+
|
|
117
|
+
_obj = cls.model_validate({
|
|
118
|
+
"operation": obj.get("operation"),
|
|
119
|
+
"generatorInputTypes": obj.get("generatorInputTypes"),
|
|
120
|
+
"generatorDocumentUrls": obj.get("generatorDocumentUrls"),
|
|
121
|
+
"generatorModel": V1Model.from_dict(obj["generatorModel"]) if obj.get("generatorModel") is not None else None,
|
|
122
|
+
"generatorBaseLlmModel": obj.get("generatorBaseLlmModel"),
|
|
123
|
+
"generatorCount": obj.get("generatorCount"),
|
|
124
|
+
"generatorTopics": obj.get("generatorTopics"),
|
|
125
|
+
"generatorChunks": obj.get("generatorChunks"),
|
|
126
|
+
"generatorH2ogpteCollectionId": obj.get("generatorH2ogpteCollectionId"),
|
|
127
|
+
"evaluatorIdentifiers": obj.get("evaluatorIdentifiers"),
|
|
128
|
+
"evaluatorsParameters": obj.get("evaluatorsParameters"),
|
|
129
|
+
"model": V1Model.from_dict(obj["model"]) if obj.get("model") is not None else None,
|
|
130
|
+
"baseLlmModel": obj.get("baseLlmModel"),
|
|
131
|
+
"modelParameters": obj.get("modelParameters"),
|
|
132
|
+
"defaultH2ogpteModel": V1Model.from_dict(obj["defaultH2ogpteModel"]) if obj.get("defaultH2ogpteModel") is not None else None,
|
|
133
|
+
"baselineEval": obj.get("baselineEval"),
|
|
134
|
+
"baselineMetrics": dict(
|
|
135
|
+
(_k, V1MetricScores.from_dict(_v))
|
|
136
|
+
for _k, _v in obj["baselineMetrics"].items()
|
|
137
|
+
)
|
|
138
|
+
if obj.get("baselineMetrics") is not None
|
|
139
|
+
else None
|
|
140
|
+
})
|
|
141
|
+
return _obj
|
|
142
|
+
|
|
143
|
+
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/v1/
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -19,6 +19,7 @@ import json
|
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from eval_studio_client.api.models.v1_model import V1Model
|
|
22
23
|
from eval_studio_client.api.models.v1_perturbator_configuration import V1PerturbatorConfiguration
|
|
23
24
|
from eval_studio_client.api.models.v1_test_case import V1TestCase
|
|
24
25
|
from eval_studio_client.api.models.v1_test_case_relationship import V1TestCaseRelationship
|
|
@@ -32,7 +33,8 @@ class PerturbationServiceCreatePerturbationRequest(BaseModel):
|
|
|
32
33
|
perturbator_configurations: Optional[List[V1PerturbatorConfiguration]] = Field(default=None, description="Required. PerturbatorConfiguration to apply to the parent Test.", alias="perturbatorConfigurations")
|
|
33
34
|
test_cases: Optional[List[V1TestCase]] = Field(default=None, description="Required. List of test cases to perturbate. These are the test cases from the parent test. TODO: breaks https://google.aip.dev/144", alias="testCases")
|
|
34
35
|
test_case_relationships: Optional[List[V1TestCaseRelationship]] = Field(default=None, description="Optional. List of relationships between test cases.", alias="testCaseRelationships")
|
|
35
|
-
|
|
36
|
+
default_h2ogpte_model: Optional[V1Model] = Field(default=None, alias="defaultH2ogpteModel")
|
|
37
|
+
__properties: ClassVar[List[str]] = ["perturbatorConfigurations", "testCases", "testCaseRelationships", "defaultH2ogpteModel"]
|
|
36
38
|
|
|
37
39
|
model_config = ConfigDict(
|
|
38
40
|
populate_by_name=True,
|
|
@@ -94,6 +96,9 @@ class PerturbationServiceCreatePerturbationRequest(BaseModel):
|
|
|
94
96
|
if _item:
|
|
95
97
|
_items.append(_item.to_dict())
|
|
96
98
|
_dict['testCaseRelationships'] = _items
|
|
99
|
+
# override the default output from pydantic by calling `to_dict()` of default_h2ogpte_model
|
|
100
|
+
if self.default_h2ogpte_model:
|
|
101
|
+
_dict['defaultH2ogpteModel'] = self.default_h2ogpte_model.to_dict()
|
|
97
102
|
return _dict
|
|
98
103
|
|
|
99
104
|
@classmethod
|
|
@@ -108,7 +113,8 @@ class PerturbationServiceCreatePerturbationRequest(BaseModel):
|
|
|
108
113
|
_obj = cls.model_validate({
|
|
109
114
|
"perturbatorConfigurations": [V1PerturbatorConfiguration.from_dict(_item) for _item in obj["perturbatorConfigurations"]] if obj.get("perturbatorConfigurations") is not None else None,
|
|
110
115
|
"testCases": [V1TestCase.from_dict(_item) for _item in obj["testCases"]] if obj.get("testCases") is not None else None,
|
|
111
|
-
"testCaseRelationships": [V1TestCaseRelationship.from_dict(_item) for _item in obj["testCaseRelationships"]] if obj.get("testCaseRelationships") is not None else None
|
|
116
|
+
"testCaseRelationships": [V1TestCaseRelationship.from_dict(_item) for _item in obj["testCaseRelationships"]] if obj.get("testCaseRelationships") is not None else None,
|
|
117
|
+
"defaultH2ogpteModel": V1Model.from_dict(obj["defaultH2ogpteModel"]) if obj.get("defaultH2ogpteModel") is not None else None
|
|
112
118
|
})
|
|
113
119
|
return _obj
|
|
114
120
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/v1/
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -20,6 +20,8 @@ import json
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
22
|
from eval_studio_client.api.models.v1_model import V1Model
|
|
23
|
+
from eval_studio_client.api.models.v1_repeated_context import V1RepeatedContext
|
|
24
|
+
from eval_studio_client.api.models.v1_repeated_string import V1RepeatedString
|
|
23
25
|
from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
|
|
24
26
|
from typing import Optional, Set
|
|
25
27
|
from typing_extensions import Self
|
|
@@ -32,10 +34,12 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
|
|
|
32
34
|
model: Optional[V1Model] = None
|
|
33
35
|
count: Optional[StrictInt] = Field(default=None, description="Required. The number of TestCases to generate.")
|
|
34
36
|
base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to use for generating the prompts.", alias="baseLlmModel")
|
|
35
|
-
document_urls: Optional[
|
|
36
|
-
|
|
37
|
+
document_urls: Optional[V1RepeatedString] = Field(default=None, alias="documentUrls")
|
|
38
|
+
chunks: Optional[V1RepeatedContext] = None
|
|
39
|
+
generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. Type of questions to generate TestCases for. If not specified, all types of questions are selected.")
|
|
37
40
|
h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. The ID of the h2oGPTe collection to use. If empty, new temporary collection will be created.", alias="h2ogpteCollectionId")
|
|
38
|
-
|
|
41
|
+
topics: Optional[List[StrictStr]] = Field(default=None, description="Optional. Topics to generate questions for. If not specified, use document summarization as topic generation.")
|
|
42
|
+
__properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "chunks", "generators", "h2ogpteCollectionId", "topics"]
|
|
39
43
|
|
|
40
44
|
model_config = ConfigDict(
|
|
41
45
|
populate_by_name=True,
|
|
@@ -79,6 +83,12 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
|
|
|
79
83
|
# override the default output from pydantic by calling `to_dict()` of model
|
|
80
84
|
if self.model:
|
|
81
85
|
_dict['model'] = self.model.to_dict()
|
|
86
|
+
# override the default output from pydantic by calling `to_dict()` of document_urls
|
|
87
|
+
if self.document_urls:
|
|
88
|
+
_dict['documentUrls'] = self.document_urls.to_dict()
|
|
89
|
+
# override the default output from pydantic by calling `to_dict()` of chunks
|
|
90
|
+
if self.chunks:
|
|
91
|
+
_dict['chunks'] = self.chunks.to_dict()
|
|
82
92
|
return _dict
|
|
83
93
|
|
|
84
94
|
@classmethod
|
|
@@ -95,9 +105,11 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
|
|
|
95
105
|
"model": V1Model.from_dict(obj["model"]) if obj.get("model") is not None else None,
|
|
96
106
|
"count": obj.get("count"),
|
|
97
107
|
"baseLlmModel": obj.get("baseLlmModel"),
|
|
98
|
-
"documentUrls": obj.get("documentUrls"),
|
|
108
|
+
"documentUrls": V1RepeatedString.from_dict(obj["documentUrls"]) if obj.get("documentUrls") is not None else None,
|
|
109
|
+
"chunks": V1RepeatedContext.from_dict(obj["chunks"]) if obj.get("chunks") is not None else None,
|
|
99
110
|
"generators": obj.get("generators"),
|
|
100
|
-
"h2ogpteCollectionId": obj.get("h2ogpteCollectionId")
|
|
111
|
+
"h2ogpteCollectionId": obj.get("h2ogpteCollectionId"),
|
|
112
|
+
"topics": obj.get("topics")
|
|
101
113
|
})
|
|
102
114
|
return _obj
|
|
103
115
|
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: version not set
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import json
|
|
17
|
+
from enum import Enum
|
|
18
|
+
from typing_extensions import Self
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ProtobufNullValue(str, Enum):
|
|
22
|
+
"""
|
|
23
|
+
`NullValue` is a singleton enumeration to represent the null value for the `Value` type union. The JSON representation for `NullValue` is JSON `null`. - NULL_VALUE: Null value.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
"""
|
|
27
|
+
allowed enum values
|
|
28
|
+
"""
|
|
29
|
+
NULL_VALUE = 'NULL_VALUE'
|
|
30
|
+
|
|
31
|
+
@classmethod
|
|
32
|
+
def from_json(cls, json_str: str) -> Self:
|
|
33
|
+
"""Create an instance of ProtobufNullValue from a JSON string"""
|
|
34
|
+
return cls(json.loads(json_str))
|
|
35
|
+
|
|
36
|
+
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# coding: utf-8
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
ai/h2o/eval_studio/v1/
|
|
4
|
+
ai/h2o/eval_studio/v1/insight.proto
|
|
5
5
|
|
|
6
6
|
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
7
|
|
|
@@ -21,6 +21,7 @@ from datetime import datetime
|
|
|
21
21
|
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
22
22
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
23
23
|
from eval_studio_client.api.models.v1_dashboard_status import V1DashboardStatus
|
|
24
|
+
from eval_studio_client.api.models.v1_dashboard_type import V1DashboardType
|
|
24
25
|
from typing import Optional, Set
|
|
25
26
|
from typing_extensions import Self
|
|
26
27
|
|
|
@@ -40,7 +41,8 @@ class RequiredTheDashboardToUpdate(BaseModel):
|
|
|
40
41
|
leaderboards: Optional[List[StrictStr]] = Field(default=None, description="Immutable. Resource names of the Leaderboards used in this Dashboard.")
|
|
41
42
|
create_operation: Optional[StrictStr] = Field(default=None, description="Output only. Operation resource name that created this Dashboard.", alias="createOperation")
|
|
42
43
|
demo: Optional[StrictBool] = Field(default=None, description="Output only. Whether the Dashboard is a demo resource or not. Demo resources are read only.")
|
|
43
|
-
|
|
44
|
+
type: Optional[V1DashboardType] = None
|
|
45
|
+
__properties: ClassVar[List[str]] = ["createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "status", "leaderboards", "createOperation", "demo", "type"]
|
|
44
46
|
|
|
45
47
|
model_config = ConfigDict(
|
|
46
48
|
populate_by_name=True,
|
|
@@ -120,7 +122,8 @@ class RequiredTheDashboardToUpdate(BaseModel):
|
|
|
120
122
|
"status": obj.get("status"),
|
|
121
123
|
"leaderboards": obj.get("leaderboards"),
|
|
122
124
|
"createOperation": obj.get("createOperation"),
|
|
123
|
-
"demo": obj.get("demo")
|
|
125
|
+
"demo": obj.get("demo"),
|
|
126
|
+
"type": obj.get("type")
|
|
124
127
|
})
|
|
125
128
|
return _obj
|
|
126
129
|
|