eval-studio-client 1.2.4a2__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (306) hide show
  1. eval_studio_client/api/__init__.py +65 -0
  2. eval_studio_client/api/api/__init__.py +3 -0
  3. eval_studio_client/api/api/dashboard_report_service_api.py +292 -0
  4. eval_studio_client/api/api/dashboard_service_api.py +16 -16
  5. eval_studio_client/api/api/dashboard_test_case_annotation_service_api.py +611 -0
  6. eval_studio_client/api/api/document_service_api.py +16 -16
  7. eval_studio_client/api/api/evaluation_service_api.py +12 -12
  8. eval_studio_client/api/api/evaluator_service_api.py +16 -16
  9. eval_studio_client/api/api/leaderboard_report_service_api.py +304 -17
  10. eval_studio_client/api/api/leaderboard_service_api.py +554 -16
  11. eval_studio_client/api/api/leaderboard_test_case_annotation_service_api.py +611 -0
  12. eval_studio_client/api/api/model_service_api.py +16 -16
  13. eval_studio_client/api/api/operation_service_api.py +821 -17
  14. eval_studio_client/api/api/perturbator_service_api.py +22 -22
  15. eval_studio_client/api/api/test_case_service_api.py +300 -16
  16. eval_studio_client/api/api/test_class_service_api.py +16 -16
  17. eval_studio_client/api/api/test_service_api.py +285 -16
  18. eval_studio_client/api/api/workflow_node_service_api.py +16 -16
  19. eval_studio_client/api/api/workflow_service_api.py +16 -16
  20. eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +2 -1
  21. eval_studio_client/api/docs/DashboardReportServiceApi.md +75 -0
  22. eval_studio_client/api/docs/DashboardServiceApi.md +5 -5
  23. eval_studio_client/api/docs/DashboardTestCaseAnnotationServiceApi.md +149 -0
  24. eval_studio_client/api/docs/DocumentServiceApi.md +5 -5
  25. eval_studio_client/api/docs/EvaluationServiceApi.md +4 -4
  26. eval_studio_client/api/docs/EvaluatorServiceApi.md +5 -5
  27. eval_studio_client/api/docs/LeaderboardReportServiceApi.md +75 -5
  28. eval_studio_client/api/docs/LeaderboardServiceApi.md +141 -5
  29. eval_studio_client/api/docs/LeaderboardTestCaseAnnotationServiceApi.md +149 -0
  30. eval_studio_client/api/docs/ModelServiceApi.md +5 -5
  31. eval_studio_client/api/docs/OperationServiceApi.md +215 -8
  32. eval_studio_client/api/docs/PerturbatorServiceApi.md +7 -7
  33. eval_studio_client/api/docs/RequiredTheDashboardTestCaseAnnotationToUpdate.md +35 -0
  34. eval_studio_client/api/docs/RequiredTheLeaderboardTestCaseAnnotationToUpdate.md +35 -0
  35. eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +1 -0
  36. eval_studio_client/api/docs/RequiredTheOperationToFinalize.md +1 -0
  37. eval_studio_client/api/docs/RequiredTheOperationToUpdate.md +1 -0
  38. eval_studio_client/api/docs/TestCaseServiceApi.md +75 -5
  39. eval_studio_client/api/docs/TestCaseServiceAppendTestCasesRequest.md +30 -0
  40. eval_studio_client/api/docs/TestClassServiceApi.md +5 -5
  41. eval_studio_client/api/docs/TestServiceApi.md +73 -5
  42. eval_studio_client/api/docs/V1ActualOutputMeta.md +30 -0
  43. eval_studio_client/api/docs/V1ActualOutputMetaDiff.md +36 -0
  44. eval_studio_client/api/docs/V1AgentChatActivityDiagram.md +31 -0
  45. eval_studio_client/api/docs/V1AgentChatActivityDiagramEdge.md +32 -0
  46. eval_studio_client/api/docs/V1AgentChatActivityDiagramNode.md +32 -0
  47. eval_studio_client/api/docs/V1AgentChatActivityDiagramRow.md +30 -0
  48. eval_studio_client/api/docs/V1AgentChatScriptUsage.md +33 -0
  49. eval_studio_client/api/docs/V1AgentChatScriptsBarChart.md +30 -0
  50. eval_studio_client/api/docs/V1AgentChatToolUsage.md +33 -0
  51. eval_studio_client/api/docs/V1AgentChatToolsBarChart.md +30 -0
  52. eval_studio_client/api/docs/V1AllMetricScores.md +29 -0
  53. eval_studio_client/api/docs/V1AppendTestCasesResponse.md +29 -0
  54. eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheRequest.md +31 -0
  55. eval_studio_client/api/docs/V1BatchCreateLeaderboardsWithoutCacheResponse.md +29 -0
  56. eval_studio_client/api/docs/V1BatchMarkOperationSeenByCreatorResponse.md +29 -0
  57. eval_studio_client/api/docs/V1CmpLeaderboardReportsRequest.md +33 -0
  58. eval_studio_client/api/docs/V1CmpLeaderboardReportsResponse.md +29 -0
  59. eval_studio_client/api/docs/V1ComparisonItem.md +36 -0
  60. eval_studio_client/api/docs/V1ComparisonMetricScore.md +30 -0
  61. eval_studio_client/api/docs/V1ComparisonResult.md +31 -0
  62. eval_studio_client/api/docs/V1ComparisonSummary.md +31 -0
  63. eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
  64. eval_studio_client/api/docs/V1CreateTestFromTestCasesRequest.md +32 -0
  65. eval_studio_client/api/docs/V1CreateTestFromTestCasesResponse.md +29 -0
  66. eval_studio_client/api/docs/V1DashboardReport.md +31 -0
  67. eval_studio_client/api/docs/V1DashboardReportResult.md +39 -0
  68. eval_studio_client/api/docs/V1DashboardTestCaseAnnotation.md +36 -0
  69. eval_studio_client/api/docs/V1DataFragment.md +31 -0
  70. eval_studio_client/api/docs/V1DeepCompareLeaderboardsRequest.md +33 -0
  71. eval_studio_client/api/docs/V1DeepCompareLeaderboardsResponse.md +29 -0
  72. eval_studio_client/api/docs/V1DiffItem.md +36 -0
  73. eval_studio_client/api/docs/V1EvaluationType.md +12 -0
  74. eval_studio_client/api/docs/V1FlippedMetric.md +31 -0
  75. eval_studio_client/api/docs/V1GetDashboardReportResponse.md +29 -0
  76. eval_studio_client/api/docs/V1HumanDecision.md +12 -0
  77. eval_studio_client/api/docs/V1Info.md +1 -0
  78. eval_studio_client/api/docs/V1Leaderboard.md +1 -0
  79. eval_studio_client/api/docs/V1LeaderboardCmpReport.md +30 -0
  80. eval_studio_client/api/docs/V1LeaderboardComparisonItem.md +31 -0
  81. eval_studio_client/api/docs/V1LeaderboardInfo.md +30 -0
  82. eval_studio_client/api/docs/V1LeaderboardReportActualOutputMeta.md +6 -3
  83. eval_studio_client/api/docs/V1LeaderboardReportResult.md +11 -8
  84. eval_studio_client/api/docs/V1LeaderboardReportResultView.md +12 -0
  85. eval_studio_client/api/docs/V1LeaderboardTestCaseAnnotation.md +36 -0
  86. eval_studio_client/api/docs/V1ListDashboardTestCaseAnnotationsResponse.md +29 -0
  87. eval_studio_client/api/docs/V1ListLeaderboardTestCaseAnnotationsResponse.md +29 -0
  88. eval_studio_client/api/docs/V1ListOperationsResponse.md +1 -0
  89. eval_studio_client/api/docs/V1ListUnseenOperationsResponse.md +30 -0
  90. eval_studio_client/api/docs/V1MarkOperationSeenByCreatorResponse.md +29 -0
  91. eval_studio_client/api/docs/V1Metric.md +30 -0
  92. eval_studio_client/api/docs/V1MetricAverage.md +36 -0
  93. eval_studio_client/api/docs/V1MetricMeta.md +40 -0
  94. eval_studio_client/api/docs/V1MetricScore.md +1 -1
  95. eval_studio_client/api/docs/V1MetricScores.md +1 -1
  96. eval_studio_client/api/docs/V1ModelType.md +1 -1
  97. eval_studio_client/api/docs/V1ModelsComparisons.md +32 -0
  98. eval_studio_client/api/docs/V1ModelsComparisonsMetrics.md +33 -0
  99. eval_studio_client/api/docs/V1ModelsOverview.md +34 -0
  100. eval_studio_client/api/docs/V1Operation.md +1 -0
  101. eval_studio_client/api/docs/V1OperationView.md +12 -0
  102. eval_studio_client/api/docs/V1RetrievedContextDiff.md +36 -0
  103. eval_studio_client/api/docs/V1Stats.md +2 -0
  104. eval_studio_client/api/docs/V1TechnicalMetrics.md +30 -0
  105. eval_studio_client/api/docs/V1TechnicalMetricsDetail.md +33 -0
  106. eval_studio_client/api/docs/V1TestCaseLeaderboardItem.md +31 -0
  107. eval_studio_client/api/docs/V1TestCaseRelationshipInfo.md +31 -0
  108. eval_studio_client/api/docs/V1TestCaseResult.md +48 -0
  109. eval_studio_client/api/docs/V1TextSimilarityMetric.md +12 -0
  110. eval_studio_client/api/docs/V1UpdateDashboardTestCaseAnnotationResponse.md +29 -0
  111. eval_studio_client/api/docs/V1UpdateLeaderboardTestCaseAnnotationResponse.md +29 -0
  112. eval_studio_client/api/docs/WorkflowNodeServiceApi.md +5 -5
  113. eval_studio_client/api/docs/WorkflowServiceApi.md +5 -5
  114. eval_studio_client/api/models/__init__.py +62 -0
  115. eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +17 -2
  116. eval_studio_client/api/models/required_the_dashboard_test_case_annotation_to_update.py +108 -0
  117. eval_studio_client/api/models/required_the_leaderboard_test_case_annotation_to_update.py +108 -0
  118. eval_studio_client/api/models/required_the_leaderboard_to_update.py +5 -2
  119. eval_studio_client/api/models/required_the_operation_to_finalize.py +6 -2
  120. eval_studio_client/api/models/required_the_operation_to_update.py +6 -2
  121. eval_studio_client/api/models/test_case_service_append_test_cases_request.py +89 -0
  122. eval_studio_client/api/models/v1_actual_output_meta.py +97 -0
  123. eval_studio_client/api/models/v1_actual_output_meta_diff.py +101 -0
  124. eval_studio_client/api/models/v1_agent_chat_activity_diagram.py +109 -0
  125. eval_studio_client/api/models/v1_agent_chat_activity_diagram_edge.py +97 -0
  126. eval_studio_client/api/models/v1_agent_chat_activity_diagram_node.py +97 -0
  127. eval_studio_client/api/models/v1_agent_chat_activity_diagram_row.py +97 -0
  128. eval_studio_client/api/models/v1_agent_chat_script_usage.py +101 -0
  129. eval_studio_client/api/models/v1_agent_chat_scripts_bar_chart.py +102 -0
  130. eval_studio_client/api/models/v1_agent_chat_tool_usage.py +101 -0
  131. eval_studio_client/api/models/v1_agent_chat_tools_bar_chart.py +102 -0
  132. eval_studio_client/api/models/v1_all_metric_scores.py +87 -0
  133. eval_studio_client/api/models/v1_append_test_cases_response.py +95 -0
  134. eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_request.py +99 -0
  135. eval_studio_client/api/models/v1_batch_create_leaderboards_without_cache_response.py +91 -0
  136. eval_studio_client/api/models/v1_batch_mark_operation_seen_by_creator_response.py +95 -0
  137. eval_studio_client/api/models/v1_cmp_leaderboard_reports_request.py +96 -0
  138. eval_studio_client/api/models/v1_cmp_leaderboard_reports_response.py +91 -0
  139. eval_studio_client/api/models/v1_comparison_item.py +130 -0
  140. eval_studio_client/api/models/v1_comparison_metric_score.py +89 -0
  141. eval_studio_client/api/models/v1_comparison_result.py +120 -0
  142. eval_studio_client/api/models/v1_comparison_summary.py +91 -0
  143. eval_studio_client/api/models/v1_create_evaluation_request.py +5 -2
  144. eval_studio_client/api/models/v1_create_test_from_test_cases_request.py +93 -0
  145. eval_studio_client/api/models/v1_create_test_from_test_cases_response.py +91 -0
  146. eval_studio_client/api/models/v1_dashboard_report.py +109 -0
  147. eval_studio_client/api/models/v1_dashboard_report_result.py +139 -0
  148. eval_studio_client/api/models/v1_dashboard_test_case_annotation.py +112 -0
  149. eval_studio_client/api/models/v1_data_fragment.py +91 -0
  150. eval_studio_client/api/models/v1_deep_compare_leaderboards_request.py +96 -0
  151. eval_studio_client/api/models/v1_deep_compare_leaderboards_response.py +91 -0
  152. eval_studio_client/api/models/v1_diff_item.py +137 -0
  153. eval_studio_client/api/models/v1_evaluation_type.py +39 -0
  154. eval_studio_client/api/models/v1_flipped_metric.py +91 -0
  155. eval_studio_client/api/models/v1_get_dashboard_report_response.py +91 -0
  156. eval_studio_client/api/models/v1_human_decision.py +38 -0
  157. eval_studio_client/api/models/v1_info.py +4 -2
  158. eval_studio_client/api/models/v1_leaderboard.py +5 -2
  159. eval_studio_client/api/models/v1_leaderboard_cmp_report.py +93 -0
  160. eval_studio_client/api/models/v1_leaderboard_comparison_item.py +91 -0
  161. eval_studio_client/api/models/v1_leaderboard_info.py +97 -0
  162. eval_studio_client/api/models/v1_leaderboard_report_actual_output_meta.py +23 -9
  163. eval_studio_client/api/models/v1_leaderboard_report_result.py +21 -10
  164. eval_studio_client/api/models/v1_leaderboard_report_result_view.py +38 -0
  165. eval_studio_client/api/models/v1_leaderboard_test_case_annotation.py +112 -0
  166. eval_studio_client/api/models/v1_list_dashboard_test_case_annotations_response.py +95 -0
  167. eval_studio_client/api/models/v1_list_leaderboard_test_case_annotations_response.py +95 -0
  168. eval_studio_client/api/models/v1_list_operations_response.py +5 -3
  169. eval_studio_client/api/models/v1_list_unseen_operations_response.py +97 -0
  170. eval_studio_client/api/models/v1_mark_operation_seen_by_creator_response.py +91 -0
  171. eval_studio_client/api/models/v1_metric.py +89 -0
  172. eval_studio_client/api/models/v1_metric_average.py +101 -0
  173. eval_studio_client/api/models/v1_metric_meta.py +109 -0
  174. eval_studio_client/api/models/v1_metric_score.py +6 -1
  175. eval_studio_client/api/models/v1_metric_scores.py +1 -1
  176. eval_studio_client/api/models/v1_model_type.py +2 -1
  177. eval_studio_client/api/models/v1_models_comparisons.py +93 -0
  178. eval_studio_client/api/models/v1_models_comparisons_metrics.py +103 -0
  179. eval_studio_client/api/models/v1_models_overview.py +97 -0
  180. eval_studio_client/api/models/v1_operation.py +6 -2
  181. eval_studio_client/api/models/v1_operation_view.py +38 -0
  182. eval_studio_client/api/models/v1_retrieved_context_diff.py +101 -0
  183. eval_studio_client/api/models/v1_stats.py +16 -2
  184. eval_studio_client/api/models/v1_technical_metrics.py +96 -0
  185. eval_studio_client/api/models/v1_technical_metrics_detail.py +95 -0
  186. eval_studio_client/api/models/v1_test_case_leaderboard_item.py +91 -0
  187. eval_studio_client/api/models/v1_test_case_relationship_info.py +91 -0
  188. eval_studio_client/api/models/v1_test_case_result.py +157 -0
  189. eval_studio_client/api/models/v1_text_similarity_metric.py +39 -0
  190. eval_studio_client/api/models/v1_update_dashboard_test_case_annotation_response.py +91 -0
  191. eval_studio_client/api/models/v1_update_leaderboard_test_case_annotation_response.py +91 -0
  192. eval_studio_client/api/models/v1_workflow_node_type.py +1 -0
  193. eval_studio_client/api/models/v1_workflow_type.py +1 -0
  194. eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +6 -0
  195. eval_studio_client/api/test/test_dashboard_report_service_api.py +37 -0
  196. eval_studio_client/api/test/test_dashboard_test_case_annotation_service_api.py +43 -0
  197. eval_studio_client/api/test/test_leaderboard_report_service_api.py +6 -0
  198. eval_studio_client/api/test/test_leaderboard_service_api.py +12 -0
  199. eval_studio_client/api/test/test_leaderboard_test_case_annotation_service_api.py +43 -0
  200. eval_studio_client/api/test/test_operation_service_api.py +18 -0
  201. eval_studio_client/api/test/test_required_the_dashboard_test_case_annotation_to_update.py +57 -0
  202. eval_studio_client/api/test/test_required_the_leaderboard_test_case_annotation_to_update.py +57 -0
  203. eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +2 -1
  204. eval_studio_client/api/test/test_required_the_operation_to_finalize.py +2 -1
  205. eval_studio_client/api/test/test_required_the_operation_to_update.py +2 -1
  206. eval_studio_client/api/test/test_test_case_service_api.py +6 -0
  207. eval_studio_client/api/test/test_test_case_service_append_test_cases_request.py +52 -0
  208. eval_studio_client/api/test/test_test_service_api.py +6 -0
  209. eval_studio_client/api/test/test_v1_abort_operation_response.py +2 -1
  210. eval_studio_client/api/test/test_v1_actual_output_meta.py +61 -0
  211. eval_studio_client/api/test/test_v1_actual_output_meta_diff.py +66 -0
  212. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram.py +65 -0
  213. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_edge.py +53 -0
  214. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_node.py +53 -0
  215. eval_studio_client/api/test/test_v1_agent_chat_activity_diagram_row.py +56 -0
  216. eval_studio_client/api/test/test_v1_agent_chat_script_usage.py +54 -0
  217. eval_studio_client/api/test/test_v1_agent_chat_scripts_bar_chart.py +57 -0
  218. eval_studio_client/api/test/test_v1_agent_chat_tool_usage.py +54 -0
  219. eval_studio_client/api/test/test_v1_agent_chat_tools_bar_chart.py +57 -0
  220. eval_studio_client/api/test/test_v1_all_metric_scores.py +53 -0
  221. eval_studio_client/api/test/test_v1_append_test_cases_response.py +74 -0
  222. eval_studio_client/api/test/test_v1_batch_create_leaderboards_request.py +2 -1
  223. eval_studio_client/api/test/test_v1_batch_create_leaderboards_response.py +2 -1
  224. eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_request.py +120 -0
  225. eval_studio_client/api/test/test_v1_batch_create_leaderboards_without_cache_response.py +72 -0
  226. eval_studio_client/api/test/test_v1_batch_delete_leaderboards_response.py +2 -1
  227. eval_studio_client/api/test/test_v1_batch_get_leaderboards_response.py +2 -1
  228. eval_studio_client/api/test/test_v1_batch_get_operations_response.py +2 -1
  229. eval_studio_client/api/test/test_v1_batch_import_leaderboard_response.py +2 -1
  230. eval_studio_client/api/test/test_v1_batch_mark_operation_seen_by_creator_response.py +74 -0
  231. eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_request.py +55 -0
  232. eval_studio_client/api/test/test_v1_cmp_leaderboard_reports_response.py +255 -0
  233. eval_studio_client/api/test/test_v1_comparison_item.py +233 -0
  234. eval_studio_client/api/test/test_v1_comparison_metric_score.py +52 -0
  235. eval_studio_client/api/test/test_v1_comparison_result.py +258 -0
  236. eval_studio_client/api/test/test_v1_comparison_summary.py +53 -0
  237. eval_studio_client/api/test/test_v1_create_evaluation_request.py +2 -1
  238. eval_studio_client/api/test/test_v1_create_leaderboard_request.py +2 -1
  239. eval_studio_client/api/test/test_v1_create_leaderboard_response.py +2 -1
  240. eval_studio_client/api/test/test_v1_create_leaderboard_without_cache_response.py +2 -1
  241. eval_studio_client/api/test/test_v1_create_test_from_test_cases_request.py +54 -0
  242. eval_studio_client/api/test/test_v1_create_test_from_test_cases_response.py +68 -0
  243. eval_studio_client/api/test/test_v1_dashboard_report.py +142 -0
  244. eval_studio_client/api/test/test_v1_dashboard_report_result.py +72 -0
  245. eval_studio_client/api/test/test_v1_dashboard_test_case_annotation.py +58 -0
  246. eval_studio_client/api/test/test_v1_data_fragment.py +57 -0
  247. eval_studio_client/api/test/test_v1_deep_compare_leaderboards_request.py +55 -0
  248. eval_studio_client/api/test/test_v1_deep_compare_leaderboards_response.py +255 -0
  249. eval_studio_client/api/test/test_v1_delete_leaderboard_response.py +2 -1
  250. eval_studio_client/api/test/test_v1_diff_item.py +226 -0
  251. eval_studio_client/api/test/test_v1_evaluation_type.py +33 -0
  252. eval_studio_client/api/test/test_v1_finalize_operation_response.py +2 -1
  253. eval_studio_client/api/test/test_v1_flipped_metric.py +53 -0
  254. eval_studio_client/api/test/test_v1_generate_test_cases_response.py +2 -1
  255. eval_studio_client/api/test/test_v1_get_dashboard_report_response.py +143 -0
  256. eval_studio_client/api/test/test_v1_get_info_response.py +4 -1
  257. eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +39 -2
  258. eval_studio_client/api/test/test_v1_get_leaderboard_response.py +2 -1
  259. eval_studio_client/api/test/test_v1_get_operation_response.py +2 -1
  260. eval_studio_client/api/test/test_v1_get_stats_response.py +3 -1
  261. eval_studio_client/api/test/test_v1_human_decision.py +33 -0
  262. eval_studio_client/api/test/test_v1_import_leaderboard_response.py +2 -1
  263. eval_studio_client/api/test/test_v1_import_test_cases_from_library_response.py +2 -1
  264. eval_studio_client/api/test/test_v1_info.py +4 -1
  265. eval_studio_client/api/test/test_v1_leaderboard.py +2 -1
  266. eval_studio_client/api/test/test_v1_leaderboard_cmp_report.py +254 -0
  267. eval_studio_client/api/test/test_v1_leaderboard_comparison_item.py +53 -0
  268. eval_studio_client/api/test/test_v1_leaderboard_info.py +57 -0
  269. eval_studio_client/api/test/test_v1_leaderboard_report.py +39 -2
  270. eval_studio_client/api/test/test_v1_leaderboard_report_actual_output_meta.py +33 -1
  271. eval_studio_client/api/test/test_v1_leaderboard_report_result.py +39 -2
  272. eval_studio_client/api/test/test_v1_leaderboard_report_result_view.py +33 -0
  273. eval_studio_client/api/test/test_v1_leaderboard_test_case_annotation.py +58 -0
  274. eval_studio_client/api/test/test_v1_list_dashboard_test_case_annotations_response.py +61 -0
  275. eval_studio_client/api/test/test_v1_list_leaderboard_test_case_annotations_response.py +61 -0
  276. eval_studio_client/api/test/test_v1_list_leaderboards_response.py +2 -1
  277. eval_studio_client/api/test/test_v1_list_most_recent_leaderboards_response.py +2 -1
  278. eval_studio_client/api/test/test_v1_list_operations_response.py +4 -2
  279. eval_studio_client/api/test/test_v1_list_unseen_operations_response.py +75 -0
  280. eval_studio_client/api/test/test_v1_mark_operation_seen_by_creator_response.py +72 -0
  281. eval_studio_client/api/test/test_v1_metric.py +52 -0
  282. eval_studio_client/api/test/test_v1_metric_average.py +58 -0
  283. eval_studio_client/api/test/test_v1_metric_meta.py +66 -0
  284. eval_studio_client/api/test/test_v1_models_comparisons.py +54 -0
  285. eval_studio_client/api/test/test_v1_models_comparisons_metrics.py +65 -0
  286. eval_studio_client/api/test/test_v1_models_overview.py +60 -0
  287. eval_studio_client/api/test/test_v1_operation.py +2 -1
  288. eval_studio_client/api/test/test_v1_operation_view.py +33 -0
  289. eval_studio_client/api/test/test_v1_process_workflow_node_response.py +2 -1
  290. eval_studio_client/api/test/test_v1_retrieved_context_diff.py +66 -0
  291. eval_studio_client/api/test/test_v1_stats.py +3 -1
  292. eval_studio_client/api/test/test_v1_technical_metrics.py +62 -0
  293. eval_studio_client/api/test/test_v1_technical_metrics_detail.py +55 -0
  294. eval_studio_client/api/test/test_v1_test_case_leaderboard_item.py +53 -0
  295. eval_studio_client/api/test/test_v1_test_case_relationship_info.py +53 -0
  296. eval_studio_client/api/test/test_v1_test_case_result.py +106 -0
  297. eval_studio_client/api/test/test_v1_text_similarity_metric.py +33 -0
  298. eval_studio_client/api/test/test_v1_update_dashboard_test_case_annotation_response.py +59 -0
  299. eval_studio_client/api/test/test_v1_update_leaderboard_response.py +2 -1
  300. eval_studio_client/api/test/test_v1_update_leaderboard_test_case_annotation_response.py +59 -0
  301. eval_studio_client/api/test/test_v1_update_operation_response.py +2 -1
  302. eval_studio_client/gen/openapiv2/eval_studio.swagger.json +2340 -210
  303. eval_studio_client/models.py +18 -6
  304. {eval_studio_client-1.2.4a2.dist-info → eval_studio_client-1.3.0.dist-info}/METADATA +2 -2
  305. {eval_studio_client-1.2.4a2.dist-info → eval_studio_client-1.3.0.dist-info}/RECORD +306 -111
  306. {eval_studio_client-1.2.4a2.dist-info → eval_studio_client-1.3.0.dist-info}/WHEEL +0 -0
@@ -4,12 +4,12 @@ All URIs are relative to *http://localhost*
4
4
 
5
5
  Method | HTTP request | Description
6
6
  ------------- | ------------- | -------------
7
- [**perturbator_service_get_perturbator**](PerturbatorServiceApi.md#perturbator_service_get_perturbator) | **GET** /v1/{name_7} |
7
+ [**perturbator_service_get_perturbator**](PerturbatorServiceApi.md#perturbator_service_get_perturbator) | **GET** /v1/{name_8} |
8
8
  [**perturbator_service_list_perturbators**](PerturbatorServiceApi.md#perturbator_service_list_perturbators) | **GET** /v1/perturbators |
9
9
 
10
10
 
11
11
  # **perturbator_service_get_perturbator**
12
- > V1GetPerturbatorResponse perturbator_service_get_perturbator(name_7)
12
+ > V1GetPerturbatorResponse perturbator_service_get_perturbator(name_8)
13
13
 
14
14
 
15
15
 
@@ -33,10 +33,10 @@ configuration = eval_studio_client.api.Configuration(
33
33
  with eval_studio_client.api.ApiClient(configuration) as api_client:
34
34
  # Create an instance of the API class
35
35
  api_instance = eval_studio_client.api.PerturbatorServiceApi(api_client)
36
- name_7 = 'name_7_example' # str | Required. The name of the Perturbator to retrieve.
36
+ name_8 = 'name_8_example' # str | Required. The name of the Perturbator to retrieve.
37
37
 
38
38
  try:
39
- api_response = api_instance.perturbator_service_get_perturbator(name_7)
39
+ api_response = api_instance.perturbator_service_get_perturbator(name_8)
40
40
  print("The response of PerturbatorServiceApi->perturbator_service_get_perturbator:\n")
41
41
  pprint(api_response)
42
42
  except Exception as e:
@@ -50,7 +50,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
50
50
 
51
51
  Name | Type | Description | Notes
52
52
  ------------- | ------------- | ------------- | -------------
53
- **name_7** | **str**| Required. The name of the Perturbator to retrieve. |
53
+ **name_8** | **str**| Required. The name of the Perturbator to retrieve. |
54
54
 
55
55
  ### Return type
56
56
 
@@ -110,7 +110,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
110
110
  default_h2ogpte_model_description = 'default_h2ogpte_model_description_example' # str | Optional. Arbitrary description of the Model. (optional)
111
111
  default_h2ogpte_model_url = 'default_h2ogpte_model_url_example' # str | Optional. Immutable. Absolute URL to the Model. (optional)
112
112
  default_h2ogpte_model_api_key = 'default_h2ogpte_model_api_key_example' # str | Optional. API key used to access the Model. Not set for read calls (i.e. get, list) by public clients (front-end). Set only for internal (server-to-worker) communication. (optional)
113
- default_h2ogpte_model_type = 'MODEL_TYPE_UNSPECIFIED' # str | Immutable. Type of this Model. - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock. (optional) (default to 'MODEL_TYPE_UNSPECIFIED')
113
+ default_h2ogpte_model_type = 'MODEL_TYPE_UNSPECIFIED' # str | Immutable. Type of this Model. - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock. - MODEL_TYPE_ANTHROPIC_CLAUDE: Anthropic Claude chat. (optional) (default to 'MODEL_TYPE_UNSPECIFIED')
114
114
  default_h2ogpte_model_parameters = 'default_h2ogpte_model_parameters_example' # str | Optional. Model specific parameters in JSON format. (optional)
115
115
  default_h2ogpte_model_demo = True # bool | Output only. Whether the Model is a demo resource or not. Demo resources are read only. (optional)
116
116
 
@@ -140,7 +140,7 @@ Name | Type | Description | Notes
140
140
  **default_h2ogpte_model_description** | **str**| Optional. Arbitrary description of the Model. | [optional]
141
141
  **default_h2ogpte_model_url** | **str**| Optional. Immutable. Absolute URL to the Model. | [optional]
142
142
  **default_h2ogpte_model_api_key** | **str**| Optional. API key used to access the Model. Not set for read calls (i.e. get, list) by public clients (front-end). Set only for internal (server-to-worker) communication. | [optional]
143
- **default_h2ogpte_model_type** | **str**| Immutable. Type of this Model. - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock. | [optional] [default to 'MODEL_TYPE_UNSPECIFIED']
143
+ **default_h2ogpte_model_type** | **str**| Immutable. Type of this Model. - MODEL_TYPE_UNSPECIFIED: Unspecified type. - MODEL_TYPE_H2OGPTE_RAG: h2oGPTe RAG. - MODEL_TYPE_OPENAI_RAG: OpenAI Assistant RAG. - MODEL_TYPE_H2OGPTE_LLM: h2oGPTe LLM. - MODEL_TYPE_H2OGPT_LLM: h2oGPT LLM. - MODEL_TYPE_OPENAI_CHAT: OpenAI chat. - MODEL_TYPE_AZURE_OPENAI_CHAT: Microsoft Azure hosted OpenAI Chat. - MODEL_TYPE_OPENAI_API_CHAT: OpenAI API chat. - MODEL_TYPE_H2OLLMOPS: H2O LLMOps. - MODEL_TYPE_OLLAMA: Ollama. - MODEL_TYPE_AMAZON_BEDROCK: Amazon Bedrock. - MODEL_TYPE_ANTHROPIC_CLAUDE: Anthropic Claude chat. | [optional] [default to 'MODEL_TYPE_UNSPECIFIED']
144
144
  **default_h2ogpte_model_parameters** | **str**| Optional. Model specific parameters in JSON format. | [optional]
145
145
  **default_h2ogpte_model_demo** | **bool**| Output only. Whether the Model is a demo resource or not. Demo resources are read only. | [optional]
146
146
 
@@ -0,0 +1,35 @@
1
+ # RequiredTheDashboardTestCaseAnnotationToUpdate
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **create_time** | **datetime** | Output only. Timestamp when the DashboardTestCaseAnnotation was created. | [optional] [readonly]
9
+ **creator** | **str** | Output only. Name of the user or service that requested creation of the DashboardTestCaseAnnotation. | [optional] [readonly]
10
+ **update_time** | **datetime** | Output only. Optional. Timestamp when the DashboardTestCaseAnnotation was last updated. | [optional] [readonly]
11
+ **updater** | **str** | Output only. Optional. Name of the user or service that requested update of the DashboardTestCaseAnnotation. | [optional] [readonly]
12
+ **parent** | **str** | Parent Dashboard Test Case resource name. e.g.: \"dashboards/<UUID>/testCases/<UUID>\". | [optional]
13
+ **key** | **str** | Immutable. Annotation key. | [optional]
14
+ **value** | **object** | Annotation value. | [optional]
15
+
16
+ ## Example
17
+
18
+ ```python
19
+ from eval_studio_client.api.models.required_the_dashboard_test_case_annotation_to_update import RequiredTheDashboardTestCaseAnnotationToUpdate
20
+
21
+ # TODO update the JSON string below
22
+ json = "{}"
23
+ # create an instance of RequiredTheDashboardTestCaseAnnotationToUpdate from a JSON string
24
+ required_the_dashboard_test_case_annotation_to_update_instance = RequiredTheDashboardTestCaseAnnotationToUpdate.from_json(json)
25
+ # print the JSON string representation of the object
26
+ print(RequiredTheDashboardTestCaseAnnotationToUpdate.to_json())
27
+
28
+ # convert the object into a dict
29
+ required_the_dashboard_test_case_annotation_to_update_dict = required_the_dashboard_test_case_annotation_to_update_instance.to_dict()
30
+ # create an instance of RequiredTheDashboardTestCaseAnnotationToUpdate from a dict
31
+ required_the_dashboard_test_case_annotation_to_update_from_dict = RequiredTheDashboardTestCaseAnnotationToUpdate.from_dict(required_the_dashboard_test_case_annotation_to_update_dict)
32
+ ```
33
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
34
+
35
+
@@ -0,0 +1,35 @@
1
+ # RequiredTheLeaderboardTestCaseAnnotationToUpdate
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **create_time** | **datetime** | Output only. Timestamp when the LeaderboardTestCaseAnnotation was created. | [optional] [readonly]
9
+ **creator** | **str** | Output only. Name of the user or service that requested creation of the LeaderboardTestCaseAnnotation. | [optional] [readonly]
10
+ **update_time** | **datetime** | Output only. Optional. Timestamp when the LeaderboardTestCaseAnnotation was last updated. | [optional] [readonly]
11
+ **updater** | **str** | Output only. Optional. Name of the user or service that requested update of the LeaderboardTestCaseAnnotation. | [optional] [readonly]
12
+ **parent** | **str** | Parent Leaderboard Test Case resource name. e.g.: \"leaderboards/<UUID>/testCases/<UUID>\". | [optional]
13
+ **key** | **str** | Immutable. Annotation key. | [optional]
14
+ **value** | **object** | Annotation value. | [optional]
15
+
16
+ ## Example
17
+
18
+ ```python
19
+ from eval_studio_client.api.models.required_the_leaderboard_test_case_annotation_to_update import RequiredTheLeaderboardTestCaseAnnotationToUpdate
20
+
21
+ # TODO update the JSON string below
22
+ json = "{}"
23
+ # create an instance of RequiredTheLeaderboardTestCaseAnnotationToUpdate from a JSON string
24
+ required_the_leaderboard_test_case_annotation_to_update_instance = RequiredTheLeaderboardTestCaseAnnotationToUpdate.from_json(json)
25
+ # print the JSON string representation of the object
26
+ print(RequiredTheLeaderboardTestCaseAnnotationToUpdate.to_json())
27
+
28
+ # convert the object into a dict
29
+ required_the_leaderboard_test_case_annotation_to_update_dict = required_the_leaderboard_test_case_annotation_to_update_instance.to_dict()
30
+ # create an instance of RequiredTheLeaderboardTestCaseAnnotationToUpdate from a dict
31
+ required_the_leaderboard_test_case_annotation_to_update_from_dict = RequiredTheLeaderboardTestCaseAnnotationToUpdate.from_dict(required_the_leaderboard_test_case_annotation_to_update_dict)
32
+ ```
33
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
34
+
35
+
@@ -31,6 +31,7 @@ Name | Type | Description | Notes
31
31
  **type** | [**V1LeaderboardType**](V1LeaderboardType.md) | | [optional]
32
32
  **demo** | **bool** | Output only. Whether the Leaderboard is a demo resource or not. Demo resources are read only. | [optional] [readonly]
33
33
  **test_lab** | **str** | Optional. Resource name of the TestLab if Leaderboard was created from a imported TestLab. | [optional]
34
+ **evaluation_type** | [**V1EvaluationType**](V1EvaluationType.md) | | [optional]
34
35
 
35
36
  ## Example
36
37
 
@@ -16,6 +16,7 @@ Name | Type | Description | Notes
16
16
  **done** | **bool** | If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. | [optional]
17
17
  **error** | [**RpcStatus**](RpcStatus.md) | | [optional]
18
18
  **response** | [**ProtobufAny**](ProtobufAny.md) | | [optional]
19
+ **seen_by_creator_time** | **datetime** | Output only. Optional. Timestamp when the creator marked the Operation as seen. Once set, this field cannot be changed. Set via MarkOperationSeenByCreator method. | [optional] [readonly]
19
20
 
20
21
  ## Example
21
22
 
@@ -16,6 +16,7 @@ Name | Type | Description | Notes
16
16
  **done** | **bool** | If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. | [optional]
17
17
  **error** | [**RpcStatus**](RpcStatus.md) | | [optional]
18
18
  **response** | [**ProtobufAny**](ProtobufAny.md) | | [optional]
19
+ **seen_by_creator_time** | **datetime** | Output only. Optional. Timestamp when the creator marked the Operation as seen. Once set, this field cannot be changed. Set via MarkOperationSeenByCreator method. | [optional] [readonly]
19
20
 
20
21
  ## Example
21
22
 
@@ -4,15 +4,85 @@ All URIs are relative to *http://localhost*
4
4
 
5
5
  Method | HTTP request | Description
6
6
  ------------- | ------------- | -------------
7
+ [**test_case_service_append_test_cases**](TestCaseServiceApi.md#test_case_service_append_test_cases) | **POST** /v1/{parent}/testCases:append |
7
8
  [**test_case_service_batch_delete_test_cases**](TestCaseServiceApi.md#test_case_service_batch_delete_test_cases) | **POST** /v1/{parent}/testCases:batchDelete |
8
9
  [**test_case_service_create_test_case**](TestCaseServiceApi.md#test_case_service_create_test_case) | **POST** /v1/{parent}/testCases |
9
10
  [**test_case_service_delete_test_case**](TestCaseServiceApi.md#test_case_service_delete_test_case) | **DELETE** /v1/{name_5} |
10
11
  [**test_case_service_find_all_test_cases_by_id**](TestCaseServiceApi.md#test_case_service_find_all_test_cases_by_id) | **GET** /v1/tests/-/testCases:findAllTestCasesByID |
11
- [**test_case_service_get_test_case**](TestCaseServiceApi.md#test_case_service_get_test_case) | **GET** /v1/{name_8} |
12
+ [**test_case_service_get_test_case**](TestCaseServiceApi.md#test_case_service_get_test_case) | **GET** /v1/{name_9} |
12
13
  [**test_case_service_list_test_cases**](TestCaseServiceApi.md#test_case_service_list_test_cases) | **GET** /v1/{parent}/testCases |
13
14
  [**test_case_service_update_test_case**](TestCaseServiceApi.md#test_case_service_update_test_case) | **PATCH** /v1/{testCase.name} |
14
15
 
15
16
 
17
+ # **test_case_service_append_test_cases**
18
+ > V1AppendTestCasesResponse test_case_service_append_test_cases(parent, body)
19
+
20
+
21
+
22
+ ### Example
23
+
24
+
25
+ ```python
26
+ import eval_studio_client.api
27
+ from eval_studio_client.api.models.test_case_service_append_test_cases_request import TestCaseServiceAppendTestCasesRequest
28
+ from eval_studio_client.api.models.v1_append_test_cases_response import V1AppendTestCasesResponse
29
+ from eval_studio_client.api.rest import ApiException
30
+ from pprint import pprint
31
+
32
+ # Defining the host is optional and defaults to http://localhost
33
+ # See configuration.py for a list of all supported configuration parameters.
34
+ configuration = eval_studio_client.api.Configuration(
35
+ host = "http://localhost"
36
+ )
37
+
38
+
39
+ # Enter a context with an instance of the API client
40
+ with eval_studio_client.api.ApiClient(configuration) as api_client:
41
+ # Create an instance of the API class
42
+ api_instance = eval_studio_client.api.TestCaseServiceApi(api_client)
43
+ parent = 'parent_example' # str | Required. The parent Test where the TestCases will be imported. Format: tests/<UUID>
44
+ body = eval_studio_client.api.TestCaseServiceAppendTestCasesRequest() # TestCaseServiceAppendTestCasesRequest |
45
+
46
+ try:
47
+ api_response = api_instance.test_case_service_append_test_cases(parent, body)
48
+ print("The response of TestCaseServiceApi->test_case_service_append_test_cases:\n")
49
+ pprint(api_response)
50
+ except Exception as e:
51
+ print("Exception when calling TestCaseServiceApi->test_case_service_append_test_cases: %s\n" % e)
52
+ ```
53
+
54
+
55
+
56
+ ### Parameters
57
+
58
+
59
+ Name | Type | Description | Notes
60
+ ------------- | ------------- | ------------- | -------------
61
+ **parent** | **str**| Required. The parent Test where the TestCases will be imported. Format: tests/&lt;UUID&gt; |
62
+ **body** | [**TestCaseServiceAppendTestCasesRequest**](TestCaseServiceAppendTestCasesRequest.md)| |
63
+
64
+ ### Return type
65
+
66
+ [**V1AppendTestCasesResponse**](V1AppendTestCasesResponse.md)
67
+
68
+ ### Authorization
69
+
70
+ No authorization required
71
+
72
+ ### HTTP request headers
73
+
74
+ - **Content-Type**: application/json
75
+ - **Accept**: application/json
76
+
77
+ ### HTTP response details
78
+
79
+ | Status code | Description | Response headers |
80
+ |-------------|-------------|------------------|
81
+ **200** | A successful response. | - |
82
+ **0** | An unexpected error response. | - |
83
+
84
+ [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
85
+
16
86
  # **test_case_service_batch_delete_test_cases**
17
87
  > V1BatchDeleteTestCasesResponse test_case_service_batch_delete_test_cases(parent, body)
18
88
 
@@ -284,7 +354,7 @@ No authorization required
284
354
  [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
285
355
 
286
356
  # **test_case_service_get_test_case**
287
- > V1GetTestCaseResponse test_case_service_get_test_case(name_8)
357
+ > V1GetTestCaseResponse test_case_service_get_test_case(name_9)
288
358
 
289
359
 
290
360
 
@@ -308,10 +378,10 @@ configuration = eval_studio_client.api.Configuration(
308
378
  with eval_studio_client.api.ApiClient(configuration) as api_client:
309
379
  # Create an instance of the API class
310
380
  api_instance = eval_studio_client.api.TestCaseServiceApi(api_client)
311
- name_8 = 'name_8_example' # str | The name of the TestCase to retrieve. Format: tests/<UUID>/testCases/<UUID>
381
+ name_9 = 'name_9_example' # str | The name of the TestCase to retrieve. Format: tests/<UUID>/testCases/<UUID>
312
382
 
313
383
  try:
314
- api_response = api_instance.test_case_service_get_test_case(name_8)
384
+ api_response = api_instance.test_case_service_get_test_case(name_9)
315
385
  print("The response of TestCaseServiceApi->test_case_service_get_test_case:\n")
316
386
  pprint(api_response)
317
387
  except Exception as e:
@@ -325,7 +395,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
325
395
 
326
396
  Name | Type | Description | Notes
327
397
  ------------- | ------------- | ------------- | -------------
328
- **name_8** | **str**| The name of the TestCase to retrieve. Format: tests/&lt;UUID&gt;/testCases/&lt;UUID&gt; |
398
+ **name_9** | **str**| The name of the TestCase to retrieve. Format: tests/&lt;UUID&gt;/testCases/&lt;UUID&gt; |
329
399
 
330
400
  ### Return type
331
401
 
@@ -0,0 +1,30 @@
1
+ # TestCaseServiceAppendTestCasesRequest
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **test_cases_json** | **str** | Test Cases in JSON format. | [optional]
9
+ **url** | **str** | URL pointing to the Test Cases in JSON format to import. | [optional]
10
+
11
+ ## Example
12
+
13
+ ```python
14
+ from eval_studio_client.api.models.test_case_service_append_test_cases_request import TestCaseServiceAppendTestCasesRequest
15
+
16
+ # TODO update the JSON string below
17
+ json = "{}"
18
+ # create an instance of TestCaseServiceAppendTestCasesRequest from a JSON string
19
+ test_case_service_append_test_cases_request_instance = TestCaseServiceAppendTestCasesRequest.from_json(json)
20
+ # print the JSON string representation of the object
21
+ print(TestCaseServiceAppendTestCasesRequest.to_json())
22
+
23
+ # convert the object into a dict
24
+ test_case_service_append_test_cases_request_dict = test_case_service_append_test_cases_request_instance.to_dict()
25
+ # create an instance of TestCaseServiceAppendTestCasesRequest from a dict
26
+ test_case_service_append_test_cases_request_from_dict = TestCaseServiceAppendTestCasesRequest.from_dict(test_case_service_append_test_cases_request_dict)
27
+ ```
28
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
29
+
30
+
@@ -4,12 +4,12 @@ All URIs are relative to *http://localhost*
4
4
 
5
5
  Method | HTTP request | Description
6
6
  ------------- | ------------- | -------------
7
- [**test_class_service_get_test_class**](TestClassServiceApi.md#test_class_service_get_test_class) | **GET** /v1/{name_9} |
7
+ [**test_class_service_get_test_class**](TestClassServiceApi.md#test_class_service_get_test_class) | **GET** /v1/{name_10} |
8
8
  [**test_class_service_list_test_classes**](TestClassServiceApi.md#test_class_service_list_test_classes) | **GET** /v1/testClasses |
9
9
 
10
10
 
11
11
  # **test_class_service_get_test_class**
12
- > V1GetTestClassResponse test_class_service_get_test_class(name_9)
12
+ > V1GetTestClassResponse test_class_service_get_test_class(name_10)
13
13
 
14
14
 
15
15
 
@@ -33,10 +33,10 @@ configuration = eval_studio_client.api.Configuration(
33
33
  with eval_studio_client.api.ApiClient(configuration) as api_client:
34
34
  # Create an instance of the API class
35
35
  api_instance = eval_studio_client.api.TestClassServiceApi(api_client)
36
- name_9 = 'name_9_example' # str | The name of the TestClass to retrieve. Format: testClasses/<UUID>
36
+ name_10 = 'name_10_example' # str | The name of the TestClass to retrieve. Format: testClasses/<UUID>
37
37
 
38
38
  try:
39
- api_response = api_instance.test_class_service_get_test_class(name_9)
39
+ api_response = api_instance.test_class_service_get_test_class(name_10)
40
40
  print("The response of TestClassServiceApi->test_class_service_get_test_class:\n")
41
41
  pprint(api_response)
42
42
  except Exception as e:
@@ -50,7 +50,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
50
50
 
51
51
  Name | Type | Description | Notes
52
52
  ------------- | ------------- | ------------- | -------------
53
- **name_9** | **str**| The name of the TestClass to retrieve. Format: testClasses/&lt;UUID&gt; |
53
+ **name_10** | **str**| The name of the TestClass to retrieve. Format: testClasses/&lt;UUID&gt; |
54
54
 
55
55
  ### Return type
56
56
 
@@ -9,9 +9,10 @@ Method | HTTP request | Description
9
9
  [**test_service_batch_import_tests**](TestServiceApi.md#test_service_batch_import_tests) | **POST** /v1/tests:batchImport |
10
10
  [**test_service_clone_test**](TestServiceApi.md#test_service_clone_test) | **POST** /v1/{name}:clone |
11
11
  [**test_service_create_test**](TestServiceApi.md#test_service_create_test) | **POST** /v1/tests |
12
+ [**test_service_create_test_from_test_cases**](TestServiceApi.md#test_service_create_test_from_test_cases) | **POST** /v1/tests:createTestFromTestCases |
12
13
  [**test_service_delete_test**](TestServiceApi.md#test_service_delete_test) | **DELETE** /v1/{name_6} |
13
14
  [**test_service_generate_test_cases**](TestServiceApi.md#test_service_generate_test_cases) | **POST** /v1/{name}:generateTestCases |
14
- [**test_service_get_test**](TestServiceApi.md#test_service_get_test) | **GET** /v1/{name_10} |
15
+ [**test_service_get_test**](TestServiceApi.md#test_service_get_test) | **GET** /v1/{name_11} |
15
16
  [**test_service_grant_test_access**](TestServiceApi.md#test_service_grant_test_access) | **POST** /v1/{name_1}:grantAccess |
16
17
  [**test_service_import_test_cases_from_library**](TestServiceApi.md#test_service_import_test_cases_from_library) | **POST** /v1/{name}:importTestCasesFromLibrary |
17
18
  [**test_service_list_most_recent_tests**](TestServiceApi.md#test_service_list_most_recent_tests) | **GET** /v1/tests:mostRecent |
@@ -361,6 +362,73 @@ No authorization required
361
362
 
362
363
  [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
363
364
 
365
+ # **test_service_create_test_from_test_cases**
366
+ > V1CreateTestFromTestCasesResponse test_service_create_test_from_test_cases(body)
367
+
368
+
369
+
370
+ ### Example
371
+
372
+
373
+ ```python
374
+ import eval_studio_client.api
375
+ from eval_studio_client.api.models.v1_create_test_from_test_cases_request import V1CreateTestFromTestCasesRequest
376
+ from eval_studio_client.api.models.v1_create_test_from_test_cases_response import V1CreateTestFromTestCasesResponse
377
+ from eval_studio_client.api.rest import ApiException
378
+ from pprint import pprint
379
+
380
+ # Defining the host is optional and defaults to http://localhost
381
+ # See configuration.py for a list of all supported configuration parameters.
382
+ configuration = eval_studio_client.api.Configuration(
383
+ host = "http://localhost"
384
+ )
385
+
386
+
387
+ # Enter a context with an instance of the API client
388
+ with eval_studio_client.api.ApiClient(configuration) as api_client:
389
+ # Create an instance of the API class
390
+ api_instance = eval_studio_client.api.TestServiceApi(api_client)
391
+ body = eval_studio_client.api.V1CreateTestFromTestCasesRequest() # V1CreateTestFromTestCasesRequest |
392
+
393
+ try:
394
+ api_response = api_instance.test_service_create_test_from_test_cases(body)
395
+ print("The response of TestServiceApi->test_service_create_test_from_test_cases:\n")
396
+ pprint(api_response)
397
+ except Exception as e:
398
+ print("Exception when calling TestServiceApi->test_service_create_test_from_test_cases: %s\n" % e)
399
+ ```
400
+
401
+
402
+
403
+ ### Parameters
404
+
405
+
406
+ Name | Type | Description | Notes
407
+ ------------- | ------------- | ------------- | -------------
408
+ **body** | [**V1CreateTestFromTestCasesRequest**](V1CreateTestFromTestCasesRequest.md)| |
409
+
410
+ ### Return type
411
+
412
+ [**V1CreateTestFromTestCasesResponse**](V1CreateTestFromTestCasesResponse.md)
413
+
414
+ ### Authorization
415
+
416
+ No authorization required
417
+
418
+ ### HTTP request headers
419
+
420
+ - **Content-Type**: application/json
421
+ - **Accept**: application/json
422
+
423
+ ### HTTP response details
424
+
425
+ | Status code | Description | Response headers |
426
+ |-------------|-------------|------------------|
427
+ **200** | A successful response. | - |
428
+ **0** | An unexpected error response. | - |
429
+
430
+ [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
431
+
364
432
  # **test_service_delete_test**
365
433
  > V1DeleteTestResponse test_service_delete_test(name_6, force=force)
366
434
 
@@ -499,7 +567,7 @@ No authorization required
499
567
  [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
500
568
 
501
569
  # **test_service_get_test**
502
- > V1GetTestResponse test_service_get_test(name_10)
570
+ > V1GetTestResponse test_service_get_test(name_11)
503
571
 
504
572
 
505
573
 
@@ -523,10 +591,10 @@ configuration = eval_studio_client.api.Configuration(
523
591
  with eval_studio_client.api.ApiClient(configuration) as api_client:
524
592
  # Create an instance of the API class
525
593
  api_instance = eval_studio_client.api.TestServiceApi(api_client)
526
- name_10 = 'name_10_example' # str | Required. The name of the Test to retrieve.
594
+ name_11 = 'name_11_example' # str | Required. The name of the Test to retrieve.
527
595
 
528
596
  try:
529
- api_response = api_instance.test_service_get_test(name_10)
597
+ api_response = api_instance.test_service_get_test(name_11)
530
598
  print("The response of TestServiceApi->test_service_get_test:\n")
531
599
  pprint(api_response)
532
600
  except Exception as e:
@@ -540,7 +608,7 @@ with eval_studio_client.api.ApiClient(configuration) as api_client:
540
608
 
541
609
  Name | Type | Description | Notes
542
610
  ------------- | ------------- | ------------- | -------------
543
- **name_10** | **str**| Required. The name of the Test to retrieve. |
611
+ **name_11** | **str**| Required. The name of the Test to retrieve. |
544
612
 
545
613
  ### Return type
546
614
 
@@ -0,0 +1,30 @@
1
+ # V1ActualOutputMeta
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **tokenization** | **str** | Tokenization method. | [optional]
9
+ **data** | [**List[V1DataFragment]**](V1DataFragment.md) | Data fragments. | [optional]
10
+
11
+ ## Example
12
+
13
+ ```python
14
+ from eval_studio_client.api.models.v1_actual_output_meta import V1ActualOutputMeta
15
+
16
+ # TODO update the JSON string below
17
+ json = "{}"
18
+ # create an instance of V1ActualOutputMeta from a JSON string
19
+ v1_actual_output_meta_instance = V1ActualOutputMeta.from_json(json)
20
+ # print the JSON string representation of the object
21
+ print(V1ActualOutputMeta.to_json())
22
+
23
+ # convert the object into a dict
24
+ v1_actual_output_meta_dict = v1_actual_output_meta_instance.to_dict()
25
+ # create an instance of V1ActualOutputMeta from a dict
26
+ v1_actual_output_meta_from_dict = V1ActualOutputMeta.from_dict(v1_actual_output_meta_dict)
27
+ ```
28
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
29
+
30
+
@@ -0,0 +1,36 @@
1
+ # V1ActualOutputMetaDiff
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **sentences** | **List[str]** | Sentences. | [optional]
9
+ **sentences_count** | **int** | Sentence count. | [optional]
10
+ **common_sentences** | **List[str]** | Common sentences between baseline and current. | [optional]
11
+ **common_count** | **int** | Common sentence count. | [optional]
12
+ **unique_sentences** | **List[str]** | Unique sentences. | [optional]
13
+ **unique_count** | **int** | Unique sentence count. | [optional]
14
+ **identical** | **bool** | Whether outputs are identical. | [optional]
15
+ **sentence_similarity** | **Dict[str, float]** | Sentence similarity scores. | [optional]
16
+
17
+ ## Example
18
+
19
+ ```python
20
+ from eval_studio_client.api.models.v1_actual_output_meta_diff import V1ActualOutputMetaDiff
21
+
22
+ # TODO update the JSON string below
23
+ json = "{}"
24
+ # create an instance of V1ActualOutputMetaDiff from a JSON string
25
+ v1_actual_output_meta_diff_instance = V1ActualOutputMetaDiff.from_json(json)
26
+ # print the JSON string representation of the object
27
+ print(V1ActualOutputMetaDiff.to_json())
28
+
29
+ # convert the object into a dict
30
+ v1_actual_output_meta_diff_dict = v1_actual_output_meta_diff_instance.to_dict()
31
+ # create an instance of V1ActualOutputMetaDiff from a dict
32
+ v1_actual_output_meta_diff_from_dict = V1ActualOutputMetaDiff.from_dict(v1_actual_output_meta_diff_dict)
33
+ ```
34
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
35
+
36
+
@@ -0,0 +1,31 @@
1
+ # V1AgentChatActivityDiagram
2
+
3
+ AgentChatActivityDiagram represents the activity diagram for agent chat interactions.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **rows** | [**List[V1AgentChatActivityDiagramRow]**](V1AgentChatActivityDiagramRow.md) | Output only. List of rows in the activity diagram. | [optional] [readonly]
10
+ **edges** | [**List[V1AgentChatActivityDiagramEdge]**](V1AgentChatActivityDiagramEdge.md) | Output only. List of edges connecting nodes in the activity diagram. | [optional] [readonly]
11
+
12
+ ## Example
13
+
14
+ ```python
15
+ from eval_studio_client.api.models.v1_agent_chat_activity_diagram import V1AgentChatActivityDiagram
16
+
17
+ # TODO update the JSON string below
18
+ json = "{}"
19
+ # create an instance of V1AgentChatActivityDiagram from a JSON string
20
+ v1_agent_chat_activity_diagram_instance = V1AgentChatActivityDiagram.from_json(json)
21
+ # print the JSON string representation of the object
22
+ print(V1AgentChatActivityDiagram.to_json())
23
+
24
+ # convert the object into a dict
25
+ v1_agent_chat_activity_diagram_dict = v1_agent_chat_activity_diagram_instance.to_dict()
26
+ # create an instance of V1AgentChatActivityDiagram from a dict
27
+ v1_agent_chat_activity_diagram_from_dict = V1AgentChatActivityDiagram.from_dict(v1_agent_chat_activity_diagram_dict)
28
+ ```
29
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
30
+
31
+
@@ -0,0 +1,32 @@
1
+ # V1AgentChatActivityDiagramEdge
2
+
3
+ AgentChatActivityDiagramEdge represents an edge connecting two nodes.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **var_from** | **str** | Output only. Source node ID. | [optional] [readonly]
10
+ **to** | **str** | Output only. Target node ID. | [optional] [readonly]
11
+ **label** | **str** | Output only. Label for the edge. | [optional] [readonly]
12
+
13
+ ## Example
14
+
15
+ ```python
16
+ from eval_studio_client.api.models.v1_agent_chat_activity_diagram_edge import V1AgentChatActivityDiagramEdge
17
+
18
+ # TODO update the JSON string below
19
+ json = "{}"
20
+ # create an instance of V1AgentChatActivityDiagramEdge from a JSON string
21
+ v1_agent_chat_activity_diagram_edge_instance = V1AgentChatActivityDiagramEdge.from_json(json)
22
+ # print the JSON string representation of the object
23
+ print(V1AgentChatActivityDiagramEdge.to_json())
24
+
25
+ # convert the object into a dict
26
+ v1_agent_chat_activity_diagram_edge_dict = v1_agent_chat_activity_diagram_edge_instance.to_dict()
27
+ # create an instance of V1AgentChatActivityDiagramEdge from a dict
28
+ v1_agent_chat_activity_diagram_edge_from_dict = V1AgentChatActivityDiagramEdge.from_dict(v1_agent_chat_activity_diagram_edge_dict)
29
+ ```
30
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
31
+
32
+
@@ -0,0 +1,32 @@
1
+ # V1AgentChatActivityDiagramNode
2
+
3
+ AgentChatActivityDiagramNode represents a node in the activity diagram.
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **id** | **str** | Output only. Unique identifier for the node. | [optional] [readonly]
10
+ **role** | **str** | Output only. Role of the node (runtime, user, agent, assistant, etc.). | [optional] [readonly]
11
+ **label** | **str** | Output only. Label for the node. | [optional] [readonly]
12
+
13
+ ## Example
14
+
15
+ ```python
16
+ from eval_studio_client.api.models.v1_agent_chat_activity_diagram_node import V1AgentChatActivityDiagramNode
17
+
18
+ # TODO update the JSON string below
19
+ json = "{}"
20
+ # create an instance of V1AgentChatActivityDiagramNode from a JSON string
21
+ v1_agent_chat_activity_diagram_node_instance = V1AgentChatActivityDiagramNode.from_json(json)
22
+ # print the JSON string representation of the object
23
+ print(V1AgentChatActivityDiagramNode.to_json())
24
+
25
+ # convert the object into a dict
26
+ v1_agent_chat_activity_diagram_node_dict = v1_agent_chat_activity_diagram_node_instance.to_dict()
27
+ # create an instance of V1AgentChatActivityDiagramNode from a dict
28
+ v1_agent_chat_activity_diagram_node_from_dict = V1AgentChatActivityDiagramNode.from_dict(v1_agent_chat_activity_diagram_node_dict)
29
+ ```
30
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
31
+
32
+