eval-studio-client 0.8.0a2__py3-none-any.whl → 0.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (573) hide show
  1. eval_studio_client/__init__.py +2 -1
  2. eval_studio_client/api/__init__.py +125 -120
  3. eval_studio_client/api/api/__init__.py +1 -0
  4. eval_studio_client/api/api/dashboard_service_api.py +71 -71
  5. eval_studio_client/api/api/document_service_api.py +64 -64
  6. eval_studio_client/api/api/evaluation_service_api.py +42 -42
  7. eval_studio_client/api/api/evaluator_service_api.py +50 -50
  8. eval_studio_client/api/api/info_service_api.py +8 -8
  9. eval_studio_client/api/api/leaderboard_service_api.py +126 -126
  10. eval_studio_client/api/api/model_service_api.py +92 -92
  11. eval_studio_client/api/api/operation_progress_service_api.py +8 -8
  12. eval_studio_client/api/api/operation_service_api.py +36 -36
  13. eval_studio_client/api/api/perturbation_service_api.py +8 -8
  14. eval_studio_client/api/api/perturbator_service_api.py +15 -15
  15. eval_studio_client/api/api/prompt_generation_service_api.py +321 -0
  16. eval_studio_client/api/api/test_case_service_api.py +57 -57
  17. eval_studio_client/api/api/test_class_service_api.py +15 -15
  18. eval_studio_client/api/api/test_lab_service_api.py +22 -22
  19. eval_studio_client/api/api/test_service_api.py +376 -92
  20. eval_studio_client/api/api/who_am_i_service_api.py +8 -8
  21. eval_studio_client/api/api_client.py +1 -1
  22. eval_studio_client/api/configuration.py +1 -1
  23. eval_studio_client/api/docs/DashboardServiceApi.md +38 -38
  24. eval_studio_client/api/docs/DocumentServiceApi.md +34 -34
  25. eval_studio_client/api/docs/EvaluationServiceApi.md +22 -22
  26. eval_studio_client/api/docs/EvaluatorServiceApi.md +26 -26
  27. eval_studio_client/api/docs/InfoServiceApi.md +4 -4
  28. eval_studio_client/api/docs/LeaderboardServiceApi.md +66 -66
  29. eval_studio_client/api/docs/ModelServiceApi.md +50 -50
  30. eval_studio_client/api/docs/OperationProgressServiceApi.md +4 -4
  31. eval_studio_client/api/docs/OperationServiceApi.md +20 -20
  32. eval_studio_client/api/docs/PerturbationServiceApi.md +4 -4
  33. eval_studio_client/api/docs/PerturbationServiceCreatePerturbationRequest.md +3 -3
  34. eval_studio_client/api/docs/PerturbatorServiceApi.md +8 -8
  35. eval_studio_client/api/docs/PromptGenerationServiceApi.md +78 -0
  36. eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +35 -0
  37. eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +1 -1
  38. eval_studio_client/api/docs/RequiredTheLeaderboardToUpdate.md +4 -4
  39. eval_studio_client/api/docs/RequiredTheModelToUpdate.md +1 -1
  40. eval_studio_client/api/docs/TestCaseServiceApi.md +31 -31
  41. eval_studio_client/api/docs/TestClassServiceApi.md +8 -8
  42. eval_studio_client/api/docs/TestLabServiceApi.md +11 -11
  43. eval_studio_client/api/docs/TestServiceApi.md +119 -49
  44. eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +33 -0
  45. eval_studio_client/api/docs/TestServicePerturbTestRequest.md +1 -1
  46. eval_studio_client/api/docs/V1BatchCreateLeaderboardsRequest.md +31 -0
  47. eval_studio_client/api/docs/V1BatchCreateLeaderboardsResponse.md +29 -0
  48. eval_studio_client/api/docs/V1BatchDeleteDashboardsRequest.md +29 -0
  49. eval_studio_client/api/docs/V1BatchDeleteDashboardsResponse.md +29 -0
  50. eval_studio_client/api/docs/V1BatchDeleteDocumentsRequest.md +29 -0
  51. eval_studio_client/api/docs/V1BatchDeleteDocumentsResponse.md +29 -0
  52. eval_studio_client/api/docs/V1BatchDeleteEvaluatorsRequest.md +29 -0
  53. eval_studio_client/api/docs/V1BatchDeleteEvaluatorsResponse.md +29 -0
  54. eval_studio_client/api/docs/V1BatchDeleteLeaderboardsRequest.md +30 -0
  55. eval_studio_client/api/docs/V1BatchDeleteLeaderboardsResponse.md +29 -0
  56. eval_studio_client/api/docs/V1BatchDeleteModelsRequest.md +29 -0
  57. eval_studio_client/api/docs/V1BatchDeleteModelsResponse.md +29 -0
  58. eval_studio_client/api/docs/V1BatchDeleteTestCasesResponse.md +29 -0
  59. eval_studio_client/api/docs/{V1alphaBatchDeleteTestsRequest.md → V1BatchDeleteTestsRequest.md} +8 -8
  60. eval_studio_client/api/docs/V1BatchDeleteTestsResponse.md +29 -0
  61. eval_studio_client/api/docs/V1BatchGetDashboardsResponse.md +29 -0
  62. eval_studio_client/api/docs/V1BatchGetDocumentsResponse.md +29 -0
  63. eval_studio_client/api/docs/V1BatchGetLeaderboardsResponse.md +29 -0
  64. eval_studio_client/api/docs/V1BatchGetModelsResponse.md +29 -0
  65. eval_studio_client/api/docs/V1BatchGetOperationsResponse.md +29 -0
  66. eval_studio_client/api/docs/V1BatchGetTestsResponse.md +29 -0
  67. eval_studio_client/api/docs/{V1alphaBatchImportLeaderboardRequest.md → V1BatchImportLeaderboardRequest.md} +9 -9
  68. eval_studio_client/api/docs/V1BatchImportLeaderboardResponse.md +29 -0
  69. eval_studio_client/api/docs/{V1alphaBatchImportTestsRequest.md → V1BatchImportTestsRequest.md} +8 -8
  70. eval_studio_client/api/docs/V1BatchImportTestsResponse.md +29 -0
  71. eval_studio_client/api/docs/V1CheckBaseModelsResponse.md +30 -0
  72. eval_studio_client/api/docs/{V1alphaCollectionInfo.md → V1CollectionInfo.md} +8 -8
  73. eval_studio_client/api/docs/V1CreateDashboardResponse.md +29 -0
  74. eval_studio_client/api/docs/V1CreateDocumentResponse.md +29 -0
  75. eval_studio_client/api/docs/{V1alphaCreateEvaluationRequest.md → V1CreateEvaluationRequest.md} +10 -10
  76. eval_studio_client/api/docs/V1CreateEvaluatorResponse.md +29 -0
  77. eval_studio_client/api/docs/V1CreateLeaderboardRequest.md +29 -0
  78. eval_studio_client/api/docs/V1CreateLeaderboardResponse.md +29 -0
  79. eval_studio_client/api/docs/V1CreateLeaderboardWithoutCacheResponse.md +29 -0
  80. eval_studio_client/api/docs/V1CreateModelResponse.md +29 -0
  81. eval_studio_client/api/docs/V1CreatePerturbationResponse.md +29 -0
  82. eval_studio_client/api/docs/V1CreateTestCaseResponse.md +29 -0
  83. eval_studio_client/api/docs/V1CreateTestLabResponse.md +29 -0
  84. eval_studio_client/api/docs/V1CreateTestResponse.md +29 -0
  85. eval_studio_client/api/docs/{V1alphaDashboard.md → V1Dashboard.md} +9 -9
  86. eval_studio_client/api/docs/{V1alphaDashboardStatus.md → V1DashboardStatus.md} +1 -1
  87. eval_studio_client/api/docs/V1DeleteDashboardResponse.md +29 -0
  88. eval_studio_client/api/docs/V1DeleteDocumentResponse.md +29 -0
  89. eval_studio_client/api/docs/V1DeleteEvaluatorResponse.md +29 -0
  90. eval_studio_client/api/docs/V1DeleteLeaderboardResponse.md +29 -0
  91. eval_studio_client/api/docs/V1DeleteModelResponse.md +29 -0
  92. eval_studio_client/api/docs/V1DeleteTestCaseResponse.md +29 -0
  93. eval_studio_client/api/docs/V1DeleteTestResponse.md +29 -0
  94. eval_studio_client/api/docs/{V1alphaDocument.md → V1Document.md} +8 -8
  95. eval_studio_client/api/docs/V1EvaluationTest.md +32 -0
  96. eval_studio_client/api/docs/{V1alphaEvaluator.md → V1Evaluator.md} +10 -9
  97. eval_studio_client/api/docs/{V1alphaEvaluatorParamType.md → V1EvaluatorParamType.md} +1 -1
  98. eval_studio_client/api/docs/{V1alphaEvaluatorParameter.md → V1EvaluatorParameter.md} +9 -9
  99. eval_studio_client/api/docs/{V1alphaEvaluatorView.md → V1EvaluatorView.md} +1 -1
  100. eval_studio_client/api/docs/V1FinalizeOperationResponse.md +29 -0
  101. eval_studio_client/api/docs/V1FindAllTestCasesByIDResponse.md +29 -0
  102. eval_studio_client/api/docs/V1FindTestLabResponse.md +29 -0
  103. eval_studio_client/api/docs/V1GenerateTestCasesResponse.md +29 -0
  104. eval_studio_client/api/docs/V1GetDashboardResponse.md +29 -0
  105. eval_studio_client/api/docs/V1GetDocumentResponse.md +29 -0
  106. eval_studio_client/api/docs/V1GetEvaluatorResponse.md +29 -0
  107. eval_studio_client/api/docs/V1GetInfoResponse.md +29 -0
  108. eval_studio_client/api/docs/V1GetLeaderboardResponse.md +29 -0
  109. eval_studio_client/api/docs/V1GetModelResponse.md +29 -0
  110. eval_studio_client/api/docs/V1GetOperationProgressByParentResponse.md +29 -0
  111. eval_studio_client/api/docs/V1GetOperationResponse.md +29 -0
  112. eval_studio_client/api/docs/V1GetPerturbatorResponse.md +29 -0
  113. eval_studio_client/api/docs/V1GetTestCaseResponse.md +29 -0
  114. eval_studio_client/api/docs/V1GetTestClassResponse.md +29 -0
  115. eval_studio_client/api/docs/V1GetTestResponse.md +29 -0
  116. eval_studio_client/api/docs/{V1alphaImportEvaluationRequest.md → V1ImportEvaluationRequest.md} +9 -9
  117. eval_studio_client/api/docs/{V1alphaImportLeaderboardRequest.md → V1ImportLeaderboardRequest.md} +9 -9
  118. eval_studio_client/api/docs/V1ImportLeaderboardResponse.md +29 -0
  119. eval_studio_client/api/docs/{V1alphaInfo.md → V1Info.md} +8 -8
  120. eval_studio_client/api/docs/{V1alphaInsight.md → V1Insight.md} +8 -8
  121. eval_studio_client/api/docs/{V1alphaLeaderboard.md → V1Leaderboard.md} +12 -12
  122. eval_studio_client/api/docs/{V1alphaLeaderboardStatus.md → V1LeaderboardStatus.md} +1 -1
  123. eval_studio_client/api/docs/{V1alphaLeaderboardType.md → V1LeaderboardType.md} +1 -1
  124. eval_studio_client/api/docs/{V1alphaLeaderboardView.md → V1LeaderboardView.md} +1 -1
  125. eval_studio_client/api/docs/V1ListBaseModelsResponse.md +29 -0
  126. eval_studio_client/api/docs/V1ListDashboardsResponse.md +29 -0
  127. eval_studio_client/api/docs/V1ListDocumentsResponse.md +29 -0
  128. eval_studio_client/api/docs/V1ListEvaluatorsResponse.md +29 -0
  129. eval_studio_client/api/docs/V1ListLLMModelsResponse.md +29 -0
  130. eval_studio_client/api/docs/V1ListLeaderboardsResponse.md +30 -0
  131. eval_studio_client/api/docs/V1ListModelCollectionsResponse.md +29 -0
  132. eval_studio_client/api/docs/V1ListModelsResponse.md +29 -0
  133. eval_studio_client/api/docs/V1ListMostRecentDashboardsResponse.md +29 -0
  134. eval_studio_client/api/docs/V1ListMostRecentLeaderboardsResponse.md +29 -0
  135. eval_studio_client/api/docs/V1ListMostRecentModelsResponse.md +29 -0
  136. eval_studio_client/api/docs/V1ListMostRecentTestsResponse.md +29 -0
  137. eval_studio_client/api/docs/V1ListOperationsResponse.md +29 -0
  138. eval_studio_client/api/docs/V1ListPerturbatorsResponse.md +29 -0
  139. eval_studio_client/api/docs/V1ListRAGCollectionsResponse.md +29 -0
  140. eval_studio_client/api/docs/V1ListTestCasesResponse.md +29 -0
  141. eval_studio_client/api/docs/V1ListTestClassesResponse.md +29 -0
  142. eval_studio_client/api/docs/V1ListTestsResponse.md +29 -0
  143. eval_studio_client/api/docs/{V1alphaModel.md → V1Model.md} +9 -9
  144. eval_studio_client/api/docs/{V1alphaModelType.md → V1ModelType.md} +1 -1
  145. eval_studio_client/api/docs/{V1alphaOperation.md → V1Operation.md} +8 -8
  146. eval_studio_client/api/docs/{V1alphaOperationProgress.md → V1OperationProgress.md} +8 -8
  147. eval_studio_client/api/docs/V1PerturbTestResponse.md +29 -0
  148. eval_studio_client/api/docs/{V1alphaPerturbator.md → V1Perturbator.md} +8 -8
  149. eval_studio_client/api/docs/V1PerturbatorConfiguration.md +32 -0
  150. eval_studio_client/api/docs/{V1alphaPerturbatorIntensity.md → V1PerturbatorIntensity.md} +1 -1
  151. eval_studio_client/api/docs/{V1alphaProblemAndAction.md → V1ProblemAndAction.md} +8 -8
  152. eval_studio_client/api/docs/{V1alphaTest.md → V1Test.md} +8 -8
  153. eval_studio_client/api/docs/{V1alphaTestCase.md → V1TestCase.md} +8 -8
  154. eval_studio_client/api/docs/{V1alphaTestCaseRelationship.md → V1TestCaseRelationship.md} +8 -8
  155. eval_studio_client/api/docs/V1TestCasesGenerator.md +11 -0
  156. eval_studio_client/api/docs/{V1alphaTestClass.md → V1TestClass.md} +9 -9
  157. eval_studio_client/api/docs/{V1alphaTestClassType.md → V1TestClassType.md} +1 -1
  158. eval_studio_client/api/docs/{V1alphaTestLab.md → V1TestLab.md} +8 -8
  159. eval_studio_client/api/docs/V1UpdateDashboardResponse.md +29 -0
  160. eval_studio_client/api/docs/V1UpdateDocumentResponse.md +29 -0
  161. eval_studio_client/api/docs/V1UpdateLeaderboardResponse.md +29 -0
  162. eval_studio_client/api/docs/V1UpdateModelResponse.md +29 -0
  163. eval_studio_client/api/docs/V1UpdateOperationResponse.md +29 -0
  164. eval_studio_client/api/docs/V1UpdateTestCaseResponse.md +29 -0
  165. eval_studio_client/api/docs/V1UpdateTestResponse.md +29 -0
  166. eval_studio_client/api/docs/{V1alphaWhoAmIResponse.md → V1WhoAmIResponse.md} +8 -8
  167. eval_studio_client/api/docs/WhoAmIServiceApi.md +4 -4
  168. eval_studio_client/api/exceptions.py +1 -1
  169. eval_studio_client/api/models/__init__.py +124 -120
  170. eval_studio_client/api/models/perturbation_service_create_perturbation_request.py +10 -10
  171. eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +104 -0
  172. eval_studio_client/api/models/protobuf_any.py +1 -1
  173. eval_studio_client/api/models/required_the_dashboard_to_update.py +3 -3
  174. eval_studio_client/api/models/required_the_document_to_update.py +1 -1
  175. eval_studio_client/api/models/required_the_leaderboard_to_update.py +11 -11
  176. eval_studio_client/api/models/required_the_model_to_update.py +3 -3
  177. eval_studio_client/api/models/required_the_operation_to_finalize.py +1 -1
  178. eval_studio_client/api/models/required_the_operation_to_update.py +1 -1
  179. eval_studio_client/api/models/required_the_test_case_to_update.py +1 -1
  180. eval_studio_client/api/models/required_the_test_to_update.py +1 -1
  181. eval_studio_client/api/models/rpc_status.py +1 -1
  182. eval_studio_client/api/models/test_case_service_batch_delete_test_cases_request.py +1 -1
  183. eval_studio_client/api/models/test_service_generate_test_cases_request.py +96 -0
  184. eval_studio_client/api/models/test_service_perturb_test_request.py +4 -4
  185. eval_studio_client/api/models/{v1alpha_batch_create_leaderboards_request.py → v1_batch_create_leaderboards_request.py} +8 -8
  186. eval_studio_client/api/models/{v1alpha_update_operation_response.py → v1_batch_create_leaderboards_response.py} +8 -8
  187. eval_studio_client/api/models/{v1alpha_batch_delete_dashboards_request.py → v1_batch_delete_dashboards_request.py} +5 -5
  188. eval_studio_client/api/models/{v1alpha_list_dashboards_response.py → v1_batch_delete_dashboards_response.py} +8 -8
  189. eval_studio_client/api/models/{v1alpha_batch_delete_documents_request.py → v1_batch_delete_documents_request.py} +5 -5
  190. eval_studio_client/api/models/{v1alpha_list_documents_response.py → v1_batch_delete_documents_response.py} +8 -8
  191. eval_studio_client/api/models/{v1alpha_batch_delete_evaluators_request.py → v1_batch_delete_evaluators_request.py} +5 -5
  192. eval_studio_client/api/models/{v1alpha_list_evaluators_response.py → v1_batch_delete_evaluators_response.py} +8 -8
  193. eval_studio_client/api/models/{v1alpha_batch_delete_leaderboards_request.py → v1_batch_delete_leaderboards_request.py} +7 -7
  194. eval_studio_client/api/models/{v1alpha_batch_get_leaderboards_response.py → v1_batch_delete_leaderboards_response.py} +8 -8
  195. eval_studio_client/api/models/{v1alpha_batch_delete_models_request.py → v1_batch_delete_models_request.py} +5 -5
  196. eval_studio_client/api/models/{v1alpha_list_models_response.py → v1_batch_delete_models_response.py} +8 -8
  197. eval_studio_client/api/models/{v1alpha_list_test_cases_response.py → v1_batch_delete_test_cases_response.py} +8 -8
  198. eval_studio_client/api/models/{v1alpha_batch_delete_tests_request.py → v1_batch_delete_tests_request.py} +5 -5
  199. eval_studio_client/api/models/{v1alpha_list_tests_response.py → v1_batch_delete_tests_response.py} +8 -8
  200. eval_studio_client/api/models/{v1alpha_batch_get_dashboards_response.py → v1_batch_get_dashboards_response.py} +8 -8
  201. eval_studio_client/api/models/{v1alpha_batch_get_documents_response.py → v1_batch_get_documents_response.py} +8 -8
  202. eval_studio_client/api/models/{v1alpha_batch_delete_leaderboards_response.py → v1_batch_get_leaderboards_response.py} +8 -8
  203. eval_studio_client/api/models/{v1alpha_batch_get_models_response.py → v1_batch_get_models_response.py} +8 -8
  204. eval_studio_client/api/models/{v1alpha_list_operations_response.py → v1_batch_get_operations_response.py} +8 -8
  205. eval_studio_client/api/models/{v1alpha_batch_get_tests_response.py → v1_batch_get_tests_response.py} +8 -8
  206. eval_studio_client/api/models/{v1alpha_batch_import_leaderboard_request.py → v1_batch_import_leaderboard_request.py} +7 -7
  207. eval_studio_client/api/models/{v1alpha_import_leaderboard_response.py → v1_batch_import_leaderboard_response.py} +8 -8
  208. eval_studio_client/api/models/{v1alpha_batch_import_tests_request.py → v1_batch_import_tests_request.py} +5 -5
  209. eval_studio_client/api/models/{v1alpha_batch_delete_tests_response.py → v1_batch_import_tests_response.py} +8 -8
  210. eval_studio_client/api/models/{v1alpha_check_base_models_response.py → v1_check_base_models_response.py} +5 -5
  211. eval_studio_client/api/models/{v1alpha_collection_info.py → v1_collection_info.py} +4 -4
  212. eval_studio_client/api/models/{v1alpha_get_dashboard_response.py → v1_create_dashboard_response.py} +8 -8
  213. eval_studio_client/api/models/{v1alpha_get_document_response.py → v1_create_document_response.py} +8 -8
  214. eval_studio_client/api/models/{v1alpha_create_evaluation_request.py → v1_create_evaluation_request.py} +11 -11
  215. eval_studio_client/api/models/{v1alpha_get_evaluator_response.py → v1_create_evaluator_response.py} +8 -8
  216. eval_studio_client/api/models/{v1alpha_get_leaderboard_response.py → v1_create_leaderboard_request.py} +8 -8
  217. eval_studio_client/api/models/{v1alpha_get_operation_response.py → v1_create_leaderboard_response.py} +8 -8
  218. eval_studio_client/api/models/v1_create_leaderboard_without_cache_response.py +91 -0
  219. eval_studio_client/api/models/{v1alpha_get_model_response.py → v1_create_model_response.py} +8 -8
  220. eval_studio_client/api/models/{v1alpha_create_perturbation_response.py → v1_create_perturbation_response.py} +5 -5
  221. eval_studio_client/api/models/{v1alpha_get_test_case_response.py → v1_create_test_case_response.py} +8 -8
  222. eval_studio_client/api/models/{v1alpha_find_test_lab_response.py → v1_create_test_lab_response.py} +8 -8
  223. eval_studio_client/api/models/{v1alpha_get_test_response.py → v1_create_test_response.py} +8 -8
  224. eval_studio_client/api/models/{v1alpha_dashboard.py → v1_dashboard.py} +7 -7
  225. eval_studio_client/api/models/{v1alpha_dashboard_status.py → v1_dashboard_status.py} +3 -3
  226. eval_studio_client/api/models/{v1alpha_update_dashboard_response.py → v1_delete_dashboard_response.py} +8 -8
  227. eval_studio_client/api/models/{v1alpha_update_document_response.py → v1_delete_document_response.py} +8 -8
  228. eval_studio_client/api/models/{v1alpha_create_evaluator_response.py → v1_delete_evaluator_response.py} +8 -8
  229. eval_studio_client/api/models/{v1alpha_create_leaderboard_request.py → v1_delete_leaderboard_response.py} +8 -8
  230. eval_studio_client/api/models/{v1alpha_create_model_response.py → v1_delete_model_response.py} +8 -8
  231. eval_studio_client/api/models/{v1alpha_delete_test_case_response.py → v1_delete_test_case_response.py} +8 -8
  232. eval_studio_client/api/models/{v1alpha_create_test_response.py → v1_delete_test_response.py} +8 -8
  233. eval_studio_client/api/models/{v1alpha_document.py → v1_document.py} +5 -5
  234. eval_studio_client/api/models/{v1alpha_evaluation_test.py → v1_evaluation_test.py} +10 -10
  235. eval_studio_client/api/models/{v1alpha_evaluator.py → v1_evaluator.py} +14 -10
  236. eval_studio_client/api/models/{v1alpha_evaluator_param_type.py → v1_evaluator_param_type.py} +3 -3
  237. eval_studio_client/api/models/{v1alpha_evaluator_parameter.py → v1_evaluator_parameter.py} +7 -7
  238. eval_studio_client/api/models/{v1alpha_evaluator_view.py → v1_evaluator_view.py} +3 -3
  239. eval_studio_client/api/models/{v1alpha_create_leaderboard_response.py → v1_finalize_operation_response.py} +8 -8
  240. eval_studio_client/api/models/{v1alpha_find_all_test_cases_by_id_response.py → v1_find_all_test_cases_by_id_response.py} +8 -8
  241. eval_studio_client/api/models/{v1alpha_create_test_lab_response.py → v1_find_test_lab_response.py} +8 -8
  242. eval_studio_client/api/models/{v1alpha_finalize_operation_response.py → v1_generate_test_cases_response.py} +8 -8
  243. eval_studio_client/api/models/{v1alpha_create_dashboard_response.py → v1_get_dashboard_response.py} +8 -8
  244. eval_studio_client/api/models/{v1alpha_delete_document_response.py → v1_get_document_response.py} +8 -8
  245. eval_studio_client/api/models/{v1alpha_delete_evaluator_response.py → v1_get_evaluator_response.py} +8 -8
  246. eval_studio_client/api/models/{v1alpha_get_info_response.py → v1_get_info_response.py} +8 -8
  247. eval_studio_client/api/models/{v1alpha_update_leaderboard_response.py → v1_get_leaderboard_response.py} +8 -8
  248. eval_studio_client/api/models/{v1alpha_update_model_response.py → v1_get_model_response.py} +8 -8
  249. eval_studio_client/api/models/{v1alpha_get_operation_progress_by_parent_response.py → v1_get_operation_progress_by_parent_response.py} +8 -8
  250. eval_studio_client/api/models/v1_get_operation_response.py +91 -0
  251. eval_studio_client/api/models/{v1alpha_get_perturbator_response.py → v1_get_perturbator_response.py} +8 -8
  252. eval_studio_client/api/models/{v1alpha_create_test_case_response.py → v1_get_test_case_response.py} +8 -8
  253. eval_studio_client/api/models/{v1alpha_get_test_class_response.py → v1_get_test_class_response.py} +8 -8
  254. eval_studio_client/api/models/{v1alpha_update_test_response.py → v1_get_test_response.py} +8 -8
  255. eval_studio_client/api/models/{v1alpha_import_evaluation_request.py → v1_import_evaluation_request.py} +8 -8
  256. eval_studio_client/api/models/{v1alpha_import_leaderboard_request.py → v1_import_leaderboard_request.py} +7 -7
  257. eval_studio_client/api/models/v1_import_leaderboard_response.py +91 -0
  258. eval_studio_client/api/models/{v1alpha_info.py → v1_info.py} +5 -5
  259. eval_studio_client/api/models/{v1alpha_insight.py → v1_insight.py} +4 -4
  260. eval_studio_client/api/models/{v1alpha_leaderboard.py → v1_leaderboard.py} +15 -15
  261. eval_studio_client/api/models/{v1alpha_leaderboard_status.py → v1_leaderboard_status.py} +3 -3
  262. eval_studio_client/api/models/{v1alpha_leaderboard_type.py → v1_leaderboard_type.py} +3 -3
  263. eval_studio_client/api/models/{v1alpha_leaderboard_view.py → v1_leaderboard_view.py} +3 -3
  264. eval_studio_client/api/models/{v1alpha_list_base_models_response.py → v1_list_base_models_response.py} +5 -5
  265. eval_studio_client/api/models/{v1alpha_batch_delete_dashboards_response.py → v1_list_dashboards_response.py} +8 -8
  266. eval_studio_client/api/models/{v1alpha_batch_delete_documents_response.py → v1_list_documents_response.py} +8 -8
  267. eval_studio_client/api/models/{v1alpha_batch_delete_evaluators_response.py → v1_list_evaluators_response.py} +8 -8
  268. eval_studio_client/api/models/{v1alpha_list_leaderboards_response.py → v1_list_leaderboards_response.py} +8 -8
  269. eval_studio_client/api/models/{v1alpha_list_llm_models_response.py → v1_list_llm_models_response.py} +5 -5
  270. eval_studio_client/api/models/{v1alpha_list_model_collections_response.py → v1_list_model_collections_response.py} +8 -8
  271. eval_studio_client/api/models/{v1alpha_batch_delete_models_response.py → v1_list_models_response.py} +8 -8
  272. eval_studio_client/api/models/{v1alpha_list_most_recent_dashboards_response.py → v1_list_most_recent_dashboards_response.py} +8 -8
  273. eval_studio_client/api/models/{v1alpha_list_most_recent_leaderboards_response.py → v1_list_most_recent_leaderboards_response.py} +8 -8
  274. eval_studio_client/api/models/{v1alpha_list_most_recent_models_response.py → v1_list_most_recent_models_response.py} +8 -8
  275. eval_studio_client/api/models/{v1alpha_batch_import_tests_response.py → v1_list_most_recent_tests_response.py} +8 -8
  276. eval_studio_client/api/models/{v1alpha_batch_get_operations_response.py → v1_list_operations_response.py} +8 -8
  277. eval_studio_client/api/models/{v1alpha_list_perturbators_response.py → v1_list_perturbators_response.py} +8 -8
  278. eval_studio_client/api/models/{v1alpha_list_rag_collections_response.py → v1_list_rag_collections_response.py} +8 -8
  279. eval_studio_client/api/models/{v1alpha_batch_delete_test_cases_response.py → v1_list_test_cases_response.py} +8 -8
  280. eval_studio_client/api/models/{v1alpha_list_test_classes_response.py → v1_list_test_classes_response.py} +8 -8
  281. eval_studio_client/api/models/v1_list_tests_response.py +95 -0
  282. eval_studio_client/api/models/{v1alpha_model.py → v1_model.py} +7 -7
  283. eval_studio_client/api/models/{v1alpha_model_type.py → v1_model_type.py} +3 -3
  284. eval_studio_client/api/models/{v1alpha_operation.py → v1_operation.py} +4 -4
  285. eval_studio_client/api/models/{v1alpha_operation_progress.py → v1_operation_progress.py} +5 -5
  286. eval_studio_client/api/models/{v1alpha_delete_test_response.py → v1_perturb_test_response.py} +8 -8
  287. eval_studio_client/api/models/{v1alpha_perturbator.py → v1_perturbator.py} +5 -5
  288. eval_studio_client/api/models/{v1alpha_perturbator_configuration.py → v1_perturbator_configuration.py} +6 -6
  289. eval_studio_client/api/models/{v1alpha_perturbator_intensity.py → v1_perturbator_intensity.py} +4 -4
  290. eval_studio_client/api/models/{v1alpha_problem_and_action.py → v1_problem_and_action.py} +5 -5
  291. eval_studio_client/api/models/{v1alpha_test.py → v1_test.py} +5 -5
  292. eval_studio_client/api/models/{v1alpha_test_case.py → v1_test_case.py} +5 -5
  293. eval_studio_client/api/models/{v1alpha_test_case_relationship.py → v1_test_case_relationship.py} +5 -5
  294. eval_studio_client/api/models/v1_test_cases_generator.py +50 -0
  295. eval_studio_client/api/models/{v1alpha_test_class.py → v1_test_class.py} +7 -7
  296. eval_studio_client/api/models/{v1alpha_test_class_type.py → v1_test_class_type.py} +3 -3
  297. eval_studio_client/api/models/{v1alpha_test_lab.py → v1_test_lab.py} +5 -5
  298. eval_studio_client/api/models/{v1alpha_delete_dashboard_response.py → v1_update_dashboard_response.py} +8 -8
  299. eval_studio_client/api/models/{v1alpha_create_document_response.py → v1_update_document_response.py} +8 -8
  300. eval_studio_client/api/models/{v1alpha_delete_leaderboard_response.py → v1_update_leaderboard_response.py} +8 -8
  301. eval_studio_client/api/models/{v1alpha_delete_model_response.py → v1_update_model_response.py} +8 -8
  302. eval_studio_client/api/models/v1_update_operation_response.py +91 -0
  303. eval_studio_client/api/models/{v1alpha_update_test_case_response.py → v1_update_test_case_response.py} +8 -8
  304. eval_studio_client/api/models/v1_update_test_response.py +91 -0
  305. eval_studio_client/api/models/{v1alpha_who_am_i_response.py → v1_who_am_i_response.py} +5 -5
  306. eval_studio_client/api/rest.py +1 -1
  307. eval_studio_client/api/test/test_dashboard_service_api.py +1 -1
  308. eval_studio_client/api/test/test_document_service_api.py +1 -1
  309. eval_studio_client/api/test/test_evaluation_service_api.py +1 -1
  310. eval_studio_client/api/test/test_evaluator_service_api.py +1 -1
  311. eval_studio_client/api/test/test_info_service_api.py +1 -1
  312. eval_studio_client/api/test/test_leaderboard_service_api.py +1 -1
  313. eval_studio_client/api/test/test_model_service_api.py +1 -1
  314. eval_studio_client/api/test/test_operation_progress_service_api.py +1 -1
  315. eval_studio_client/api/test/test_operation_service_api.py +1 -1
  316. eval_studio_client/api/test/test_perturbation_service_api.py +1 -1
  317. eval_studio_client/api/test/test_perturbation_service_create_perturbation_request.py +4 -4
  318. eval_studio_client/api/test/test_perturbator_service_api.py +1 -1
  319. eval_studio_client/api/test/test_prompt_generation_service_api.py +37 -0
  320. eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +75 -0
  321. eval_studio_client/api/test/test_protobuf_any.py +1 -1
  322. eval_studio_client/api/test/test_required_the_dashboard_to_update.py +1 -1
  323. eval_studio_client/api/test/test_required_the_document_to_update.py +1 -1
  324. eval_studio_client/api/test/test_required_the_leaderboard_to_update.py +3 -3
  325. eval_studio_client/api/test/test_required_the_model_to_update.py +1 -1
  326. eval_studio_client/api/test/test_required_the_operation_to_finalize.py +1 -1
  327. eval_studio_client/api/test/test_required_the_operation_to_update.py +1 -1
  328. eval_studio_client/api/test/test_required_the_test_case_to_update.py +1 -1
  329. eval_studio_client/api/test/test_required_the_test_to_update.py +1 -1
  330. eval_studio_client/api/test/test_rpc_status.py +1 -1
  331. eval_studio_client/api/test/test_test_case_service_api.py +1 -1
  332. eval_studio_client/api/test/test_test_case_service_batch_delete_test_cases_request.py +1 -1
  333. eval_studio_client/api/test/test_test_class_service_api.py +1 -1
  334. eval_studio_client/api/test/test_test_lab_service_api.py +1 -1
  335. eval_studio_client/api/test/test_test_service_api.py +7 -1
  336. eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +57 -0
  337. eval_studio_client/api/test/test_test_service_perturb_test_request.py +2 -2
  338. eval_studio_client/api/test/{test_v1alpha_batch_create_leaderboards_request.py → test_v1_batch_create_leaderboards_request.py} +16 -16
  339. eval_studio_client/api/test/{test_v1alpha_create_leaderboard_response.py → test_v1_batch_create_leaderboards_response.py} +13 -13
  340. eval_studio_client/api/test/{test_v1alpha_batch_delete_models_request.py → test_v1_batch_delete_dashboards_request.py} +12 -12
  341. eval_studio_client/api/test/{test_v1alpha_batch_get_dashboards_response.py → test_v1_batch_delete_dashboards_response.py} +13 -13
  342. eval_studio_client/api/test/{test_v1alpha_batch_delete_documents_request.py → test_v1_batch_delete_documents_request.py} +12 -12
  343. eval_studio_client/api/test/{test_v1alpha_batch_get_documents_response.py → test_v1_batch_delete_documents_response.py} +13 -13
  344. eval_studio_client/api/test/test_v1_batch_delete_evaluators_request.py +53 -0
  345. eval_studio_client/api/test/{test_v1alpha_batch_delete_evaluators_response.py → test_v1_batch_delete_evaluators_response.py} +15 -14
  346. eval_studio_client/api/test/{test_v1alpha_batch_delete_leaderboards_request.py → test_v1_batch_delete_leaderboards_request.py} +12 -12
  347. eval_studio_client/api/test/{test_v1alpha_batch_get_leaderboards_response.py → test_v1_batch_delete_leaderboards_response.py} +15 -15
  348. eval_studio_client/api/test/test_v1_batch_delete_models_request.py +53 -0
  349. eval_studio_client/api/test/{test_v1alpha_batch_get_models_response.py → test_v1_batch_delete_models_response.py} +13 -13
  350. eval_studio_client/api/test/{test_v1alpha_batch_delete_test_cases_response.py → test_v1_batch_delete_test_cases_response.py} +13 -13
  351. eval_studio_client/api/test/{test_v1alpha_batch_delete_tests_request.py → test_v1_batch_delete_tests_request.py} +12 -12
  352. eval_studio_client/api/test/{test_v1alpha_batch_get_tests_response.py → test_v1_batch_delete_tests_response.py} +13 -13
  353. eval_studio_client/api/test/{test_v1alpha_list_dashboards_response.py → test_v1_batch_get_dashboards_response.py} +13 -13
  354. eval_studio_client/api/test/{test_v1alpha_list_documents_response.py → test_v1_batch_get_documents_response.py} +13 -13
  355. eval_studio_client/api/test/{test_v1alpha_batch_delete_leaderboards_response.py → test_v1_batch_get_leaderboards_response.py} +15 -15
  356. eval_studio_client/api/test/{test_v1alpha_list_models_response.py → test_v1_batch_get_models_response.py} +13 -13
  357. eval_studio_client/api/test/{test_v1alpha_list_operations_response.py → test_v1_batch_get_operations_response.py} +13 -13
  358. eval_studio_client/api/test/{test_v1alpha_list_tests_response.py → test_v1_batch_get_tests_response.py} +13 -13
  359. eval_studio_client/api/test/{test_v1alpha_batch_import_leaderboard_request.py → test_v1_batch_import_leaderboard_request.py} +12 -12
  360. eval_studio_client/api/test/{test_v1alpha_import_leaderboard_response.py → test_v1_batch_import_leaderboard_response.py} +13 -13
  361. eval_studio_client/api/test/{test_v1alpha_batch_import_tests_request.py → test_v1_batch_import_tests_request.py} +12 -12
  362. eval_studio_client/api/test/{test_v1alpha_batch_import_tests_response.py → test_v1_batch_import_tests_response.py} +13 -13
  363. eval_studio_client/api/test/{test_v1alpha_check_base_models_response.py → test_v1_check_base_models_response.py} +12 -12
  364. eval_studio_client/api/test/{test_v1alpha_collection_info.py → test_v1_collection_info.py} +12 -12
  365. eval_studio_client/api/test/{test_v1alpha_get_dashboard_response.py → test_v1_create_dashboard_response.py} +13 -13
  366. eval_studio_client/api/test/{test_v1alpha_get_document_response.py → test_v1_create_document_response.py} +13 -13
  367. eval_studio_client/api/test/{test_v1alpha_create_evaluation_request.py → test_v1_create_evaluation_request.py} +16 -16
  368. eval_studio_client/api/test/{test_v1alpha_get_evaluator_response.py → test_v1_create_evaluator_response.py} +15 -14
  369. eval_studio_client/api/test/{test_v1alpha_get_leaderboard_response.py → test_v1_create_leaderboard_request.py} +15 -15
  370. eval_studio_client/api/test/{test_v1alpha_update_operation_response.py → test_v1_create_leaderboard_response.py} +13 -13
  371. eval_studio_client/api/test/{test_v1alpha_batch_import_leaderboard_response.py → test_v1_create_leaderboard_without_cache_response.py} +13 -13
  372. eval_studio_client/api/test/{test_v1alpha_get_model_response.py → test_v1_create_model_response.py} +13 -13
  373. eval_studio_client/api/test/{test_v1alpha_create_perturbation_response.py → test_v1_create_perturbation_response.py} +12 -12
  374. eval_studio_client/api/test/{test_v1alpha_get_test_case_response.py → test_v1_create_test_case_response.py} +13 -13
  375. eval_studio_client/api/test/{test_v1alpha_find_test_lab_response.py → test_v1_create_test_lab_response.py} +13 -13
  376. eval_studio_client/api/test/{test_v1alpha_get_test_response.py → test_v1_create_test_response.py} +13 -13
  377. eval_studio_client/api/test/{test_v1alpha_dashboard.py → test_v1_dashboard.py} +12 -12
  378. eval_studio_client/api/test/{test_v1alpha_evaluator_view.py → test_v1_dashboard_status.py} +7 -7
  379. eval_studio_client/api/test/{test_v1alpha_update_dashboard_response.py → test_v1_delete_dashboard_response.py} +13 -13
  380. eval_studio_client/api/test/{test_v1alpha_update_document_response.py → test_v1_delete_document_response.py} +13 -13
  381. eval_studio_client/api/test/{test_v1alpha_delete_evaluator_response.py → test_v1_delete_evaluator_response.py} +15 -14
  382. eval_studio_client/api/test/{test_v1alpha_create_leaderboard_request.py → test_v1_delete_leaderboard_response.py} +15 -15
  383. eval_studio_client/api/test/{test_v1alpha_delete_model_response.py → test_v1_delete_model_response.py} +13 -13
  384. eval_studio_client/api/test/{test_v1alpha_create_test_case_response.py → test_v1_delete_test_case_response.py} +13 -13
  385. eval_studio_client/api/test/{test_v1alpha_create_test_response.py → test_v1_delete_test_response.py} +13 -13
  386. eval_studio_client/api/test/{test_v1alpha_document.py → test_v1_document.py} +12 -12
  387. eval_studio_client/api/test/{test_v1alpha_evaluation_test.py → test_v1_evaluation_test.py} +14 -14
  388. eval_studio_client/api/test/{test_v1alpha_evaluator.py → test_v1_evaluator.py} +14 -13
  389. eval_studio_client/api/test/{test_v1alpha_test_class_type.py → test_v1_evaluator_param_type.py} +7 -7
  390. eval_studio_client/api/test/{test_v1alpha_evaluator_parameter.py → test_v1_evaluator_parameter.py} +12 -12
  391. eval_studio_client/api/test/{test_v1alpha_model_type.py → test_v1_evaluator_view.py} +7 -7
  392. eval_studio_client/api/test/{test_v1alpha_get_operation_response.py → test_v1_finalize_operation_response.py} +13 -13
  393. eval_studio_client/api/test/{test_v1alpha_find_all_test_cases_by_id_response.py → test_v1_find_all_test_cases_by_id_response.py} +13 -13
  394. eval_studio_client/api/test/{test_v1alpha_create_test_lab_response.py → test_v1_find_test_lab_response.py} +13 -13
  395. eval_studio_client/api/test/{test_v1alpha_finalize_operation_response.py → test_v1_generate_test_cases_response.py} +13 -13
  396. eval_studio_client/api/test/{test_v1alpha_create_dashboard_response.py → test_v1_get_dashboard_response.py} +13 -13
  397. eval_studio_client/api/test/{test_v1alpha_delete_document_response.py → test_v1_get_document_response.py} +13 -13
  398. eval_studio_client/api/test/{test_v1alpha_create_evaluator_response.py → test_v1_get_evaluator_response.py} +15 -14
  399. eval_studio_client/api/test/{test_v1alpha_get_info_response.py → test_v1_get_info_response.py} +13 -13
  400. eval_studio_client/api/test/{test_v1alpha_delete_leaderboard_response.py → test_v1_get_leaderboard_response.py} +15 -15
  401. eval_studio_client/api/test/{test_v1alpha_create_model_response.py → test_v1_get_model_response.py} +13 -13
  402. eval_studio_client/api/test/{test_v1alpha_get_operation_progress_by_parent_response.py → test_v1_get_operation_progress_by_parent_response.py} +13 -13
  403. eval_studio_client/api/test/test_v1_get_operation_response.py +71 -0
  404. eval_studio_client/api/test/{test_v1alpha_get_perturbator_response.py → test_v1_get_perturbator_response.py} +13 -13
  405. eval_studio_client/api/test/{test_v1alpha_delete_test_case_response.py → test_v1_get_test_case_response.py} +13 -13
  406. eval_studio_client/api/test/{test_v1alpha_get_test_class_response.py → test_v1_get_test_class_response.py} +13 -13
  407. eval_studio_client/api/test/{test_v1alpha_delete_test_response.py → test_v1_get_test_response.py} +13 -13
  408. eval_studio_client/api/test/{test_v1alpha_import_evaluation_request.py → test_v1_import_evaluation_request.py} +13 -13
  409. eval_studio_client/api/test/{test_v1alpha_import_leaderboard_request.py → test_v1_import_leaderboard_request.py} +12 -12
  410. eval_studio_client/api/test/test_v1_import_leaderboard_response.py +71 -0
  411. eval_studio_client/api/test/{test_v1alpha_info.py → test_v1_info.py} +12 -12
  412. eval_studio_client/api/test/{test_v1alpha_insight.py → test_v1_insight.py} +12 -12
  413. eval_studio_client/api/test/{test_v1alpha_leaderboard.py → test_v1_leaderboard.py} +14 -14
  414. eval_studio_client/api/test/{test_v1alpha_dashboard_status.py → test_v1_leaderboard_status.py} +7 -7
  415. eval_studio_client/api/test/test_v1_leaderboard_type.py +33 -0
  416. eval_studio_client/api/test/test_v1_leaderboard_view.py +33 -0
  417. eval_studio_client/api/test/{test_v1alpha_list_base_models_response.py → test_v1_list_base_models_response.py} +12 -12
  418. eval_studio_client/api/test/{test_v1alpha_batch_delete_dashboards_response.py → test_v1_list_dashboards_response.py} +13 -13
  419. eval_studio_client/api/test/{test_v1alpha_batch_delete_documents_response.py → test_v1_list_documents_response.py} +13 -13
  420. eval_studio_client/api/test/{test_v1alpha_list_evaluators_response.py → test_v1_list_evaluators_response.py} +15 -14
  421. eval_studio_client/api/test/{test_v1alpha_list_leaderboards_response.py → test_v1_list_leaderboards_response.py} +15 -15
  422. eval_studio_client/api/test/{test_v1alpha_list_llm_models_response.py → test_v1_list_llm_models_response.py} +12 -12
  423. eval_studio_client/api/test/{test_v1alpha_list_rag_collections_response.py → test_v1_list_model_collections_response.py} +13 -13
  424. eval_studio_client/api/test/{test_v1alpha_batch_delete_models_response.py → test_v1_list_models_response.py} +13 -13
  425. eval_studio_client/api/test/{test_v1alpha_list_most_recent_dashboards_response.py → test_v1_list_most_recent_dashboards_response.py} +13 -13
  426. eval_studio_client/api/test/{test_v1alpha_list_most_recent_leaderboards_response.py → test_v1_list_most_recent_leaderboards_response.py} +15 -15
  427. eval_studio_client/api/test/{test_v1alpha_list_most_recent_models_response.py → test_v1_list_most_recent_models_response.py} +13 -13
  428. eval_studio_client/api/test/{test_v1alpha_batch_delete_tests_response.py → test_v1_list_most_recent_tests_response.py} +13 -13
  429. eval_studio_client/api/test/{test_v1alpha_batch_get_operations_response.py → test_v1_list_operations_response.py} +13 -13
  430. eval_studio_client/api/test/{test_v1alpha_list_perturbators_response.py → test_v1_list_perturbators_response.py} +13 -13
  431. eval_studio_client/api/test/{test_v1alpha_list_model_collections_response.py → test_v1_list_rag_collections_response.py} +13 -13
  432. eval_studio_client/api/test/{test_v1alpha_list_test_cases_response.py → test_v1_list_test_cases_response.py} +13 -13
  433. eval_studio_client/api/test/{test_v1alpha_list_test_classes_response.py → test_v1_list_test_classes_response.py} +13 -13
  434. eval_studio_client/api/test/test_v1_list_tests_response.py +69 -0
  435. eval_studio_client/api/test/{test_v1alpha_model.py → test_v1_model.py} +12 -12
  436. eval_studio_client/api/test/{test_v1alpha_leaderboard_view.py → test_v1_model_type.py} +7 -7
  437. eval_studio_client/api/test/{test_v1alpha_operation.py → test_v1_operation.py} +12 -12
  438. eval_studio_client/api/test/{test_v1alpha_operation_progress.py → test_v1_operation_progress.py} +12 -12
  439. eval_studio_client/api/test/{test_v1alpha_update_test_response.py → test_v1_perturb_test_response.py} +13 -13
  440. eval_studio_client/api/test/{test_v1alpha_perturbator.py → test_v1_perturbator.py} +12 -12
  441. eval_studio_client/api/test/{test_v1alpha_perturbator_configuration.py → test_v1_perturbator_configuration.py} +12 -12
  442. eval_studio_client/api/test/{test_v1alpha_leaderboard_type.py → test_v1_perturbator_intensity.py} +7 -7
  443. eval_studio_client/api/test/{test_v1alpha_problem_and_action.py → test_v1_problem_and_action.py} +12 -12
  444. eval_studio_client/api/test/{test_v1alpha_test.py → test_v1_test.py} +12 -12
  445. eval_studio_client/api/test/{test_v1alpha_test_case.py → test_v1_test_case.py} +12 -12
  446. eval_studio_client/api/test/{test_v1alpha_test_case_relationship.py → test_v1_test_case_relationship.py} +12 -12
  447. eval_studio_client/api/test/test_v1_test_cases_generator.py +33 -0
  448. eval_studio_client/api/test/{test_v1alpha_test_class.py → test_v1_test_class.py} +12 -12
  449. eval_studio_client/api/test/test_v1_test_class_type.py +33 -0
  450. eval_studio_client/api/test/{test_v1alpha_test_lab.py → test_v1_test_lab.py} +12 -12
  451. eval_studio_client/api/test/{test_v1alpha_delete_dashboard_response.py → test_v1_update_dashboard_response.py} +13 -13
  452. eval_studio_client/api/test/{test_v1alpha_create_document_response.py → test_v1_update_document_response.py} +13 -13
  453. eval_studio_client/api/test/{test_v1alpha_update_leaderboard_response.py → test_v1_update_leaderboard_response.py} +15 -15
  454. eval_studio_client/api/test/{test_v1alpha_update_model_response.py → test_v1_update_model_response.py} +13 -13
  455. eval_studio_client/api/test/test_v1_update_operation_response.py +71 -0
  456. eval_studio_client/api/test/{test_v1alpha_update_test_case_response.py → test_v1_update_test_case_response.py} +13 -13
  457. eval_studio_client/api/test/test_v1_update_test_response.py +67 -0
  458. eval_studio_client/api/test/{test_v1alpha_who_am_i_response.py → test_v1_who_am_i_response.py} +12 -12
  459. eval_studio_client/api/test/test_who_am_i_service_api.py +1 -1
  460. eval_studio_client/dashboards.py +23 -1
  461. eval_studio_client/documents.py +3 -3
  462. eval_studio_client/evaluators.py +1 -1
  463. eval_studio_client/gen/openapiv2/eval_studio.swagger.json +568 -387
  464. eval_studio_client/insights.py +1 -1
  465. eval_studio_client/leaderboards.py +11 -13
  466. eval_studio_client/models.py +61 -29
  467. eval_studio_client/perturbators.py +5 -7
  468. eval_studio_client/problems.py +1 -1
  469. eval_studio_client/test_labs.py +2 -2
  470. eval_studio_client/tests.py +225 -8
  471. {eval_studio_client-0.8.0a2.dist-info → eval_studio_client-0.8.2.dist-info}/METADATA +2 -2
  472. eval_studio_client-0.8.2.dist-info/RECORD +485 -0
  473. {eval_studio_client-0.8.0a2.dist-info → eval_studio_client-0.8.2.dist-info}/WHEEL +1 -1
  474. eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsRequest.md +0 -31
  475. eval_studio_client/api/docs/V1alphaBatchCreateLeaderboardsResponse.md +0 -29
  476. eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsRequest.md +0 -29
  477. eval_studio_client/api/docs/V1alphaBatchDeleteDashboardsResponse.md +0 -29
  478. eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsRequest.md +0 -29
  479. eval_studio_client/api/docs/V1alphaBatchDeleteDocumentsResponse.md +0 -29
  480. eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsRequest.md +0 -29
  481. eval_studio_client/api/docs/V1alphaBatchDeleteEvaluatorsResponse.md +0 -29
  482. eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsRequest.md +0 -30
  483. eval_studio_client/api/docs/V1alphaBatchDeleteLeaderboardsResponse.md +0 -29
  484. eval_studio_client/api/docs/V1alphaBatchDeleteModelsRequest.md +0 -29
  485. eval_studio_client/api/docs/V1alphaBatchDeleteModelsResponse.md +0 -29
  486. eval_studio_client/api/docs/V1alphaBatchDeleteTestCasesResponse.md +0 -29
  487. eval_studio_client/api/docs/V1alphaBatchDeleteTestsResponse.md +0 -29
  488. eval_studio_client/api/docs/V1alphaBatchGetDashboardsResponse.md +0 -29
  489. eval_studio_client/api/docs/V1alphaBatchGetDocumentsResponse.md +0 -29
  490. eval_studio_client/api/docs/V1alphaBatchGetLeaderboardsResponse.md +0 -29
  491. eval_studio_client/api/docs/V1alphaBatchGetModelsResponse.md +0 -29
  492. eval_studio_client/api/docs/V1alphaBatchGetOperationsResponse.md +0 -29
  493. eval_studio_client/api/docs/V1alphaBatchGetTestsResponse.md +0 -29
  494. eval_studio_client/api/docs/V1alphaBatchImportLeaderboardResponse.md +0 -29
  495. eval_studio_client/api/docs/V1alphaBatchImportTestsResponse.md +0 -29
  496. eval_studio_client/api/docs/V1alphaCheckBaseModelsResponse.md +0 -30
  497. eval_studio_client/api/docs/V1alphaCreateDashboardResponse.md +0 -29
  498. eval_studio_client/api/docs/V1alphaCreateDocumentResponse.md +0 -29
  499. eval_studio_client/api/docs/V1alphaCreateEvaluatorResponse.md +0 -29
  500. eval_studio_client/api/docs/V1alphaCreateLeaderboardRequest.md +0 -29
  501. eval_studio_client/api/docs/V1alphaCreateLeaderboardResponse.md +0 -29
  502. eval_studio_client/api/docs/V1alphaCreateLeaderboardWithoutCacheResponse.md +0 -29
  503. eval_studio_client/api/docs/V1alphaCreateModelResponse.md +0 -29
  504. eval_studio_client/api/docs/V1alphaCreatePerturbationResponse.md +0 -29
  505. eval_studio_client/api/docs/V1alphaCreateTestCaseResponse.md +0 -29
  506. eval_studio_client/api/docs/V1alphaCreateTestLabResponse.md +0 -29
  507. eval_studio_client/api/docs/V1alphaCreateTestResponse.md +0 -29
  508. eval_studio_client/api/docs/V1alphaDeleteDashboardResponse.md +0 -29
  509. eval_studio_client/api/docs/V1alphaDeleteDocumentResponse.md +0 -29
  510. eval_studio_client/api/docs/V1alphaDeleteEvaluatorResponse.md +0 -29
  511. eval_studio_client/api/docs/V1alphaDeleteLeaderboardResponse.md +0 -29
  512. eval_studio_client/api/docs/V1alphaDeleteModelResponse.md +0 -29
  513. eval_studio_client/api/docs/V1alphaDeleteTestCaseResponse.md +0 -29
  514. eval_studio_client/api/docs/V1alphaDeleteTestResponse.md +0 -29
  515. eval_studio_client/api/docs/V1alphaEvaluationTest.md +0 -32
  516. eval_studio_client/api/docs/V1alphaFinalizeOperationResponse.md +0 -29
  517. eval_studio_client/api/docs/V1alphaFindAllTestCasesByIDResponse.md +0 -29
  518. eval_studio_client/api/docs/V1alphaFindTestLabResponse.md +0 -29
  519. eval_studio_client/api/docs/V1alphaGetDashboardResponse.md +0 -29
  520. eval_studio_client/api/docs/V1alphaGetDocumentResponse.md +0 -29
  521. eval_studio_client/api/docs/V1alphaGetEvaluatorResponse.md +0 -29
  522. eval_studio_client/api/docs/V1alphaGetInfoResponse.md +0 -29
  523. eval_studio_client/api/docs/V1alphaGetLeaderboardResponse.md +0 -29
  524. eval_studio_client/api/docs/V1alphaGetModelResponse.md +0 -29
  525. eval_studio_client/api/docs/V1alphaGetOperationProgressByParentResponse.md +0 -29
  526. eval_studio_client/api/docs/V1alphaGetOperationResponse.md +0 -29
  527. eval_studio_client/api/docs/V1alphaGetPerturbatorResponse.md +0 -29
  528. eval_studio_client/api/docs/V1alphaGetTestCaseResponse.md +0 -29
  529. eval_studio_client/api/docs/V1alphaGetTestClassResponse.md +0 -29
  530. eval_studio_client/api/docs/V1alphaGetTestResponse.md +0 -29
  531. eval_studio_client/api/docs/V1alphaImportLeaderboardResponse.md +0 -29
  532. eval_studio_client/api/docs/V1alphaListBaseModelsResponse.md +0 -29
  533. eval_studio_client/api/docs/V1alphaListDashboardsResponse.md +0 -29
  534. eval_studio_client/api/docs/V1alphaListDocumentsResponse.md +0 -29
  535. eval_studio_client/api/docs/V1alphaListEvaluatorsResponse.md +0 -29
  536. eval_studio_client/api/docs/V1alphaListLLMModelsResponse.md +0 -29
  537. eval_studio_client/api/docs/V1alphaListLeaderboardsResponse.md +0 -30
  538. eval_studio_client/api/docs/V1alphaListModelCollectionsResponse.md +0 -29
  539. eval_studio_client/api/docs/V1alphaListModelsResponse.md +0 -29
  540. eval_studio_client/api/docs/V1alphaListMostRecentDashboardsResponse.md +0 -29
  541. eval_studio_client/api/docs/V1alphaListMostRecentLeaderboardsResponse.md +0 -29
  542. eval_studio_client/api/docs/V1alphaListMostRecentModelsResponse.md +0 -29
  543. eval_studio_client/api/docs/V1alphaListMostRecentTestsResponse.md +0 -29
  544. eval_studio_client/api/docs/V1alphaListOperationsResponse.md +0 -29
  545. eval_studio_client/api/docs/V1alphaListPerturbatorsResponse.md +0 -29
  546. eval_studio_client/api/docs/V1alphaListRAGCollectionsResponse.md +0 -29
  547. eval_studio_client/api/docs/V1alphaListTestCasesResponse.md +0 -29
  548. eval_studio_client/api/docs/V1alphaListTestClassesResponse.md +0 -29
  549. eval_studio_client/api/docs/V1alphaListTestsResponse.md +0 -29
  550. eval_studio_client/api/docs/V1alphaPerturbTestResponse.md +0 -29
  551. eval_studio_client/api/docs/V1alphaPerturbatorConfiguration.md +0 -32
  552. eval_studio_client/api/docs/V1alphaUpdateDashboardResponse.md +0 -29
  553. eval_studio_client/api/docs/V1alphaUpdateDocumentResponse.md +0 -29
  554. eval_studio_client/api/docs/V1alphaUpdateLeaderboardResponse.md +0 -29
  555. eval_studio_client/api/docs/V1alphaUpdateModelResponse.md +0 -29
  556. eval_studio_client/api/docs/V1alphaUpdateOperationResponse.md +0 -29
  557. eval_studio_client/api/docs/V1alphaUpdateTestCaseResponse.md +0 -29
  558. eval_studio_client/api/docs/V1alphaUpdateTestResponse.md +0 -29
  559. eval_studio_client/api/models/v1alpha_batch_create_leaderboards_response.py +0 -91
  560. eval_studio_client/api/models/v1alpha_batch_import_leaderboard_response.py +0 -91
  561. eval_studio_client/api/models/v1alpha_create_leaderboard_without_cache_response.py +0 -91
  562. eval_studio_client/api/models/v1alpha_list_most_recent_tests_response.py +0 -95
  563. eval_studio_client/api/models/v1alpha_perturb_test_response.py +0 -91
  564. eval_studio_client/api/test/test_v1alpha_batch_create_leaderboards_response.py +0 -71
  565. eval_studio_client/api/test/test_v1alpha_batch_delete_dashboards_request.py +0 -53
  566. eval_studio_client/api/test/test_v1alpha_batch_delete_evaluators_request.py +0 -53
  567. eval_studio_client/api/test/test_v1alpha_create_leaderboard_without_cache_response.py +0 -71
  568. eval_studio_client/api/test/test_v1alpha_evaluator_param_type.py +0 -33
  569. eval_studio_client/api/test/test_v1alpha_leaderboard_status.py +0 -33
  570. eval_studio_client/api/test/test_v1alpha_list_most_recent_tests_response.py +0 -69
  571. eval_studio_client/api/test/test_v1alpha_perturb_test_response.py +0 -67
  572. eval_studio_client/api/test/test_v1alpha_perturbator_intensity.py +0 -33
  573. eval_studio_client-0.8.0a2.dist-info/RECORD +0 -470
@@ -0,0 +1,104 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/collection.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from eval_studio_client.api.models.v1_model import V1Model
23
+ from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
24
+ from typing import Optional, Set
25
+ from typing_extensions import Self
26
+
27
+ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
28
+ """
29
+ PromptGenerationServiceAutoGeneratePromptsRequest
30
+ """ # noqa: E501
31
+ operation: Optional[StrictStr] = Field(default=None, description="Required. The Operation processing this prompt generation process.")
32
+ model: Optional[V1Model] = None
33
+ count: Optional[StrictInt] = Field(default=None, description="Required. The number of TestCases to generate.")
34
+ base_llm_model: Optional[StrictStr] = Field(default=None, description="Required. Base LLM model to use for generating the prompts.", alias="baseLlmModel")
35
+ document_urls: Optional[List[StrictStr]] = Field(default=None, description="Optional. The list of document URLs. The document URL might be a managed document URL or a public URL.", alias="documentUrls")
36
+ generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. Topics to generate TestCases for. If not specified, all topics are selected.")
37
+ h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. The ID of the h2oGPTe collection to use. If empty, new temporary collection will be created.", alias="h2ogpteCollectionId")
38
+ __properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "generators", "h2ogpteCollectionId"]
39
+
40
+ model_config = ConfigDict(
41
+ populate_by_name=True,
42
+ validate_assignment=True,
43
+ protected_namespaces=(),
44
+ )
45
+
46
+
47
+ def to_str(self) -> str:
48
+ """Returns the string representation of the model using alias"""
49
+ return pprint.pformat(self.model_dump(by_alias=True))
50
+
51
+ def to_json(self) -> str:
52
+ """Returns the JSON representation of the model using alias"""
53
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
54
+ return json.dumps(self.to_dict())
55
+
56
+ @classmethod
57
+ def from_json(cls, json_str: str) -> Optional[Self]:
58
+ """Create an instance of PromptGenerationServiceAutoGeneratePromptsRequest from a JSON string"""
59
+ return cls.from_dict(json.loads(json_str))
60
+
61
+ def to_dict(self) -> Dict[str, Any]:
62
+ """Return the dictionary representation of the model using alias.
63
+
64
+ This has the following differences from calling pydantic's
65
+ `self.model_dump(by_alias=True)`:
66
+
67
+ * `None` is only added to the output dict for nullable fields that
68
+ were set at model initialization. Other fields with value `None`
69
+ are ignored.
70
+ """
71
+ excluded_fields: Set[str] = set([
72
+ ])
73
+
74
+ _dict = self.model_dump(
75
+ by_alias=True,
76
+ exclude=excluded_fields,
77
+ exclude_none=True,
78
+ )
79
+ # override the default output from pydantic by calling `to_dict()` of model
80
+ if self.model:
81
+ _dict['model'] = self.model.to_dict()
82
+ return _dict
83
+
84
+ @classmethod
85
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
86
+ """Create an instance of PromptGenerationServiceAutoGeneratePromptsRequest from a dict"""
87
+ if obj is None:
88
+ return None
89
+
90
+ if not isinstance(obj, dict):
91
+ return cls.model_validate(obj)
92
+
93
+ _obj = cls.model_validate({
94
+ "operation": obj.get("operation"),
95
+ "model": V1Model.from_dict(obj["model"]) if obj.get("model") is not None else None,
96
+ "count": obj.get("count"),
97
+ "baseLlmModel": obj.get("baseLlmModel"),
98
+ "documentUrls": obj.get("documentUrls"),
99
+ "generators": obj.get("generators"),
100
+ "h2ogpteCollectionId": obj.get("h2ogpteCollectionId")
101
+ })
102
+ return _obj
103
+
104
+
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -20,7 +20,7 @@ import json
20
20
  from datetime import datetime
21
21
  from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
22
22
  from typing import Any, ClassVar, Dict, List, Optional
23
- from eval_studio_client.api.models.v1alpha_dashboard_status import V1alphaDashboardStatus
23
+ from eval_studio_client.api.models.v1_dashboard_status import V1DashboardStatus
24
24
  from typing import Optional, Set
25
25
  from typing_extensions import Self
26
26
 
@@ -36,7 +36,7 @@ class RequiredTheDashboardToUpdate(BaseModel):
36
36
  deleter: Optional[StrictStr] = Field(default=None, description="Output only. Optional. Name of the user or service that requested deletion of the Dashboard.")
37
37
  display_name: Optional[StrictStr] = Field(default=None, description="Human readable name of the Dashboard.", alias="displayName")
38
38
  description: Optional[StrictStr] = Field(default=None, description="Optional. Arbitrary description of the Dashboard.")
39
- status: Optional[V1alphaDashboardStatus] = None
39
+ status: Optional[V1DashboardStatus] = None
40
40
  leaderboards: Optional[List[StrictStr]] = Field(default=None, description="Immutable. Resource names of the Leaderboards used in this Dashboard.")
41
41
  create_operation: Optional[StrictStr] = Field(default=None, description="Output only. Operation resource name that created this Dashboard.", alias="createOperation")
42
42
  demo: Optional[StrictBool] = Field(default=None, description="Output only. Whether the Dashboard is a demo resource or not. Demo resources are read only.")
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -20,10 +20,10 @@ import json
20
20
  from datetime import datetime
21
21
  from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
22
22
  from typing import Any, ClassVar, Dict, List, Optional
23
- from eval_studio_client.api.models.v1alpha_insight import V1alphaInsight
24
- from eval_studio_client.api.models.v1alpha_leaderboard_status import V1alphaLeaderboardStatus
25
- from eval_studio_client.api.models.v1alpha_leaderboard_type import V1alphaLeaderboardType
26
- from eval_studio_client.api.models.v1alpha_problem_and_action import V1alphaProblemAndAction
23
+ from eval_studio_client.api.models.v1_insight import V1Insight
24
+ from eval_studio_client.api.models.v1_leaderboard_status import V1LeaderboardStatus
25
+ from eval_studio_client.api.models.v1_leaderboard_type import V1LeaderboardType
26
+ from eval_studio_client.api.models.v1_problem_and_action import V1ProblemAndAction
27
27
  from typing import Optional, Set
28
28
  from typing_extensions import Self
29
29
 
@@ -39,7 +39,7 @@ class RequiredTheLeaderboardToUpdate(BaseModel):
39
39
  deleter: Optional[StrictStr] = Field(default=None, description="Output only. Optional. Name of the user or service that requested deletion of the Leaderboard.")
40
40
  display_name: Optional[StrictStr] = Field(default=None, description="Human readable name of the Leaderboard.", alias="displayName")
41
41
  description: Optional[StrictStr] = Field(default=None, description="Optional. Arbitrary description of the Leaderboard.")
42
- status: Optional[V1alphaLeaderboardStatus] = None
42
+ status: Optional[V1LeaderboardStatus] = None
43
43
  evaluator: Optional[StrictStr] = Field(default=None, description="Immutable. Resource name of the Evaluator used in this Leaderboard.")
44
44
  tests: Optional[List[StrictStr]] = Field(default=None, description="Immutable. Resource names of the Tests used in this Leaderboard.")
45
45
  model: Optional[StrictStr] = Field(default=None, description="Immutable. Resource name of the Model used in this Leaderboard.")
@@ -48,12 +48,12 @@ class RequiredTheLeaderboardToUpdate(BaseModel):
48
48
  leaderboard_table: Optional[StrictStr] = Field(default=None, description="Output only. Leaderboard table in JSON format.", alias="leaderboardTable")
49
49
  leaderboard_summary: Optional[StrictStr] = Field(default=None, description="Output only. Leaderboard summary in Markdown format.", alias="leaderboardSummary")
50
50
  llm_models: Optional[List[StrictStr]] = Field(default=None, description="Immutable. System names of the LLM models used in this Leaderboard.", alias="llmModels")
51
- leaderboard_problems: Optional[List[V1alphaProblemAndAction]] = Field(default=None, description="Output only. Leaderboard problems and actions.", alias="leaderboardProblems")
51
+ leaderboard_problems: Optional[List[V1ProblemAndAction]] = Field(default=None, description="Output only. Leaderboard problems and actions.", alias="leaderboardProblems")
52
52
  evaluator_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Evaluator parameters setup.", alias="evaluatorParameters")
53
- insights: Optional[List[V1alphaInsight]] = Field(default=None, description="Output only. Insights from the Leaderboard.")
53
+ insights: Optional[List[V1Insight]] = Field(default=None, description="Output only. Insights from the Leaderboard.")
54
54
  model_parameters: Optional[StrictStr] = Field(default=None, description="Optional. Prameters overrides in JSON format.", alias="modelParameters")
55
55
  h2ogpte_collection: Optional[StrictStr] = Field(default=None, description="The existing collection name in H2OGPTe.", alias="h2ogpteCollection")
56
- type: Optional[V1alphaLeaderboardType] = None
56
+ type: Optional[V1LeaderboardType] = None
57
57
  demo: Optional[StrictBool] = Field(default=None, description="Output only. Whether the Leaderboard is a demo resource or not. Demo resources are read only.")
58
58
  test_lab: Optional[StrictStr] = Field(default=None, description="Optional. Resource name of the TestLab if Leaderboard was created from a imported TestLab.", alias="testLab")
59
59
  __properties: ClassVar[List[str]] = ["createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "status", "evaluator", "tests", "model", "createOperation", "leaderboardReport", "leaderboardTable", "leaderboardSummary", "llmModels", "leaderboardProblems", "evaluatorParameters", "insights", "modelParameters", "h2ogpteCollection", "type", "demo", "testLab"]
@@ -164,9 +164,9 @@ class RequiredTheLeaderboardToUpdate(BaseModel):
164
164
  "leaderboardTable": obj.get("leaderboardTable"),
165
165
  "leaderboardSummary": obj.get("leaderboardSummary"),
166
166
  "llmModels": obj.get("llmModels"),
167
- "leaderboardProblems": [V1alphaProblemAndAction.from_dict(_item) for _item in obj["leaderboardProblems"]] if obj.get("leaderboardProblems") is not None else None,
167
+ "leaderboardProblems": [V1ProblemAndAction.from_dict(_item) for _item in obj["leaderboardProblems"]] if obj.get("leaderboardProblems") is not None else None,
168
168
  "evaluatorParameters": obj.get("evaluatorParameters"),
169
- "insights": [V1alphaInsight.from_dict(_item) for _item in obj["insights"]] if obj.get("insights") is not None else None,
169
+ "insights": [V1Insight.from_dict(_item) for _item in obj["insights"]] if obj.get("insights") is not None else None,
170
170
  "modelParameters": obj.get("modelParameters"),
171
171
  "h2ogpteCollection": obj.get("h2ogpteCollection"),
172
172
  "type": obj.get("type"),
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -20,7 +20,7 @@ import json
20
20
  from datetime import datetime
21
21
  from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
22
22
  from typing import Any, ClassVar, Dict, List, Optional
23
- from eval_studio_client.api.models.v1alpha_model_type import V1alphaModelType
23
+ from eval_studio_client.api.models.v1_model_type import V1ModelType
24
24
  from typing import Optional, Set
25
25
  from typing_extensions import Self
26
26
 
@@ -38,7 +38,7 @@ class RequiredTheModelToUpdate(BaseModel):
38
38
  description: Optional[StrictStr] = Field(default=None, description="Optional. Arbitrary description of the Model.")
39
39
  url: Optional[StrictStr] = Field(default=None, description="Optional. Immutable. Absolute URL to the Model.")
40
40
  api_key: Optional[StrictStr] = Field(default=None, description="Optional. API key used to access the Model. Not set for read calls (i.e. get, list) by public clients (front-end). Set only for internal (server-to-worker) communication.", alias="apiKey")
41
- type: Optional[V1alphaModelType] = None
41
+ type: Optional[V1ModelType] = None
42
42
  parameters: Optional[StrictStr] = Field(default=None, description="Optional. Model specific parameters in JSON format.")
43
43
  demo: Optional[StrictBool] = Field(default=None, description="Output only. Whether the Model is a demo resource or not. Demo resources are read only.")
44
44
  __properties: ClassVar[List[str]] = ["createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "url", "apiKey", "type", "parameters", "demo"]
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -0,0 +1,96 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/collection.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
23
+ from typing import Optional, Set
24
+ from typing_extensions import Self
25
+
26
+ class TestServiceGenerateTestCasesRequest(BaseModel):
27
+ """
28
+ TestServiceGenerateTestCasesRequest
29
+ """ # noqa: E501
30
+ count: Optional[StrictInt] = Field(default=None, description="Required. The number of TestCases to generate.")
31
+ model: Optional[StrictStr] = Field(default=None, description="Optional. The Model to use for generating TestCases. If not specified, the default RAG h2oGPTe will be used. Error is returned, if no default model is specified and this field is not set.")
32
+ base_llm_model: Optional[StrictStr] = Field(default=None, description="Optional. The base LLM model to use for generating the prompts. Selected automatically if not specified.", alias="baseLlmModel")
33
+ generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. Generators to use for generation. If not specified, all generators are selected.")
34
+ h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. The ID of the h2oGPTe collection to use. If empty, new temporary collection will be created.", alias="h2ogpteCollectionId")
35
+ __properties: ClassVar[List[str]] = ["count", "model", "baseLlmModel", "generators", "h2ogpteCollectionId"]
36
+
37
+ model_config = ConfigDict(
38
+ populate_by_name=True,
39
+ validate_assignment=True,
40
+ protected_namespaces=(),
41
+ )
42
+
43
+
44
+ def to_str(self) -> str:
45
+ """Returns the string representation of the model using alias"""
46
+ return pprint.pformat(self.model_dump(by_alias=True))
47
+
48
+ def to_json(self) -> str:
49
+ """Returns the JSON representation of the model using alias"""
50
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
51
+ return json.dumps(self.to_dict())
52
+
53
+ @classmethod
54
+ def from_json(cls, json_str: str) -> Optional[Self]:
55
+ """Create an instance of TestServiceGenerateTestCasesRequest from a JSON string"""
56
+ return cls.from_dict(json.loads(json_str))
57
+
58
+ def to_dict(self) -> Dict[str, Any]:
59
+ """Return the dictionary representation of the model using alias.
60
+
61
+ This has the following differences from calling pydantic's
62
+ `self.model_dump(by_alias=True)`:
63
+
64
+ * `None` is only added to the output dict for nullable fields that
65
+ were set at model initialization. Other fields with value `None`
66
+ are ignored.
67
+ """
68
+ excluded_fields: Set[str] = set([
69
+ ])
70
+
71
+ _dict = self.model_dump(
72
+ by_alias=True,
73
+ exclude=excluded_fields,
74
+ exclude_none=True,
75
+ )
76
+ return _dict
77
+
78
+ @classmethod
79
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
80
+ """Create an instance of TestServiceGenerateTestCasesRequest from a dict"""
81
+ if obj is None:
82
+ return None
83
+
84
+ if not isinstance(obj, dict):
85
+ return cls.model_validate(obj)
86
+
87
+ _obj = cls.model_validate({
88
+ "count": obj.get("count"),
89
+ "model": obj.get("model"),
90
+ "baseLlmModel": obj.get("baseLlmModel"),
91
+ "generators": obj.get("generators"),
92
+ "h2ogpteCollectionId": obj.get("h2ogpteCollectionId")
93
+ })
94
+ return _obj
95
+
96
+
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -19,7 +19,7 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
- from eval_studio_client.api.models.v1alpha_perturbator_configuration import V1alphaPerturbatorConfiguration
22
+ from eval_studio_client.api.models.v1_perturbator_configuration import V1PerturbatorConfiguration
23
23
  from typing import Optional, Set
24
24
  from typing_extensions import Self
25
25
 
@@ -27,7 +27,7 @@ class TestServicePerturbTestRequest(BaseModel):
27
27
  """
28
28
  TestServicePerturbTestRequest
29
29
  """ # noqa: E501
30
- perturbator_configurations: Optional[List[V1alphaPerturbatorConfiguration]] = Field(default=None, description="Required. PerturbatorConfigurations to apply to the Test.", alias="perturbatorConfigurations")
30
+ perturbator_configurations: Optional[List[V1PerturbatorConfiguration]] = Field(default=None, description="Required. PerturbatorConfigurations to apply to the Test.", alias="perturbatorConfigurations")
31
31
  new_test_display_name: Optional[StrictStr] = Field(default=None, description="Required. Name of the newly created test.", alias="newTestDisplayName")
32
32
  new_test_description: Optional[StrictStr] = Field(default=None, description="Optional. Description of the newly created Test.", alias="newTestDescription")
33
33
  __properties: ClassVar[List[str]] = ["perturbatorConfigurations", "newTestDisplayName", "newTestDescription"]
@@ -90,7 +90,7 @@ class TestServicePerturbTestRequest(BaseModel):
90
90
  return cls.model_validate(obj)
91
91
 
92
92
  _obj = cls.model_validate({
93
- "perturbatorConfigurations": [V1alphaPerturbatorConfiguration.from_dict(_item) for _item in obj["perturbatorConfigurations"]] if obj.get("perturbatorConfigurations") is not None else None,
93
+ "perturbatorConfigurations": [V1PerturbatorConfiguration.from_dict(_item) for _item in obj["perturbatorConfigurations"]] if obj.get("perturbatorConfigurations") is not None else None,
94
94
  "newTestDisplayName": obj.get("newTestDisplayName"),
95
95
  "newTestDescription": obj.get("newTestDescription")
96
96
  })
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -19,15 +19,15 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
- from eval_studio_client.api.models.v1alpha_create_leaderboard_request import V1alphaCreateLeaderboardRequest
22
+ from eval_studio_client.api.models.v1_create_leaderboard_request import V1CreateLeaderboardRequest
23
23
  from typing import Optional, Set
24
24
  from typing_extensions import Self
25
25
 
26
- class V1alphaBatchCreateLeaderboardsRequest(BaseModel):
26
+ class V1BatchCreateLeaderboardsRequest(BaseModel):
27
27
  """
28
- V1alphaBatchCreateLeaderboardsRequest
28
+ V1BatchCreateLeaderboardsRequest
29
29
  """ # noqa: E501
30
- requests: Optional[List[V1alphaCreateLeaderboardRequest]] = Field(default=None, description="Required. Contains list of requests for leaderboards to be created.")
30
+ requests: Optional[List[V1CreateLeaderboardRequest]] = Field(default=None, description="Required. Contains list of requests for leaderboards to be created.")
31
31
  dashboard_display_name: Optional[StrictStr] = Field(default=None, description="Optional. Display name for the dashboard that will group the leaderboards.", alias="dashboardDisplayName")
32
32
  dashboard_description: Optional[StrictStr] = Field(default=None, description="Optional. Description for the dashboard that will group the leaderboards.", alias="dashboardDescription")
33
33
  __properties: ClassVar[List[str]] = ["requests", "dashboardDisplayName", "dashboardDescription"]
@@ -50,7 +50,7 @@ class V1alphaBatchCreateLeaderboardsRequest(BaseModel):
50
50
 
51
51
  @classmethod
52
52
  def from_json(cls, json_str: str) -> Optional[Self]:
53
- """Create an instance of V1alphaBatchCreateLeaderboardsRequest from a JSON string"""
53
+ """Create an instance of V1BatchCreateLeaderboardsRequest from a JSON string"""
54
54
  return cls.from_dict(json.loads(json_str))
55
55
 
56
56
  def to_dict(self) -> Dict[str, Any]:
@@ -82,7 +82,7 @@ class V1alphaBatchCreateLeaderboardsRequest(BaseModel):
82
82
 
83
83
  @classmethod
84
84
  def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
85
- """Create an instance of V1alphaBatchCreateLeaderboardsRequest from a dict"""
85
+ """Create an instance of V1BatchCreateLeaderboardsRequest from a dict"""
86
86
  if obj is None:
87
87
  return None
88
88
 
@@ -90,7 +90,7 @@ class V1alphaBatchCreateLeaderboardsRequest(BaseModel):
90
90
  return cls.model_validate(obj)
91
91
 
92
92
  _obj = cls.model_validate({
93
- "requests": [V1alphaCreateLeaderboardRequest.from_dict(_item) for _item in obj["requests"]] if obj.get("requests") is not None else None,
93
+ "requests": [V1CreateLeaderboardRequest.from_dict(_item) for _item in obj["requests"]] if obj.get("requests") is not None else None,
94
94
  "dashboardDisplayName": obj.get("dashboardDisplayName"),
95
95
  "dashboardDescription": obj.get("dashboardDescription")
96
96
  })
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -19,15 +19,15 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
- from eval_studio_client.api.models.v1alpha_operation import V1alphaOperation
22
+ from eval_studio_client.api.models.v1_operation import V1Operation
23
23
  from typing import Optional, Set
24
24
  from typing_extensions import Self
25
25
 
26
- class V1alphaUpdateOperationResponse(BaseModel):
26
+ class V1BatchCreateLeaderboardsResponse(BaseModel):
27
27
  """
28
- V1alphaUpdateOperationResponse
28
+ V1BatchCreateLeaderboardsResponse
29
29
  """ # noqa: E501
30
- operation: Optional[V1alphaOperation] = None
30
+ operation: Optional[V1Operation] = None
31
31
  __properties: ClassVar[List[str]] = ["operation"]
32
32
 
33
33
  model_config = ConfigDict(
@@ -48,7 +48,7 @@ class V1alphaUpdateOperationResponse(BaseModel):
48
48
 
49
49
  @classmethod
50
50
  def from_json(cls, json_str: str) -> Optional[Self]:
51
- """Create an instance of V1alphaUpdateOperationResponse from a JSON string"""
51
+ """Create an instance of V1BatchCreateLeaderboardsResponse from a JSON string"""
52
52
  return cls.from_dict(json.loads(json_str))
53
53
 
54
54
  def to_dict(self) -> Dict[str, Any]:
@@ -76,7 +76,7 @@ class V1alphaUpdateOperationResponse(BaseModel):
76
76
 
77
77
  @classmethod
78
78
  def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
79
- """Create an instance of V1alphaUpdateOperationResponse from a dict"""
79
+ """Create an instance of V1BatchCreateLeaderboardsResponse from a dict"""
80
80
  if obj is None:
81
81
  return None
82
82
 
@@ -84,7 +84,7 @@ class V1alphaUpdateOperationResponse(BaseModel):
84
84
  return cls.model_validate(obj)
85
85
 
86
86
  _obj = cls.model_validate({
87
- "operation": V1alphaOperation.from_dict(obj["operation"]) if obj.get("operation") is not None else None
87
+ "operation": V1Operation.from_dict(obj["operation"]) if obj.get("operation") is not None else None
88
88
  })
89
89
  return _obj
90
90
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -22,9 +22,9 @@ from typing import Any, ClassVar, Dict, List, Optional
22
22
  from typing import Optional, Set
23
23
  from typing_extensions import Self
24
24
 
25
- class V1alphaBatchDeleteDashboardsRequest(BaseModel):
25
+ class V1BatchDeleteDashboardsRequest(BaseModel):
26
26
  """
27
- V1alphaBatchDeleteDashboardsRequest
27
+ V1BatchDeleteDashboardsRequest
28
28
  """ # noqa: E501
29
29
  names: Optional[List[StrictStr]] = Field(default=None, description="Required. The names of the Dashboards to delete. A maximum of 1000 can be specified.")
30
30
  __properties: ClassVar[List[str]] = ["names"]
@@ -47,7 +47,7 @@ class V1alphaBatchDeleteDashboardsRequest(BaseModel):
47
47
 
48
48
  @classmethod
49
49
  def from_json(cls, json_str: str) -> Optional[Self]:
50
- """Create an instance of V1alphaBatchDeleteDashboardsRequest from a JSON string"""
50
+ """Create an instance of V1BatchDeleteDashboardsRequest from a JSON string"""
51
51
  return cls.from_dict(json.loads(json_str))
52
52
 
53
53
  def to_dict(self) -> Dict[str, Any]:
@@ -72,7 +72,7 @@ class V1alphaBatchDeleteDashboardsRequest(BaseModel):
72
72
 
73
73
  @classmethod
74
74
  def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
75
- """Create an instance of V1alphaBatchDeleteDashboardsRequest from a dict"""
75
+ """Create an instance of V1BatchDeleteDashboardsRequest from a dict"""
76
76
  if obj is None:
77
77
  return None
78
78
 
@@ -1,7 +1,7 @@
1
1
  # coding: utf-8
2
2
 
3
3
  """
4
- ai/h2o/eval_studio/v1alpha/collection.proto
4
+ ai/h2o/eval_studio/v1/collection.proto
5
5
 
6
6
  No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
7
 
@@ -19,15 +19,15 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
- from eval_studio_client.api.models.v1alpha_dashboard import V1alphaDashboard
22
+ from eval_studio_client.api.models.v1_dashboard import V1Dashboard
23
23
  from typing import Optional, Set
24
24
  from typing_extensions import Self
25
25
 
26
- class V1alphaListDashboardsResponse(BaseModel):
26
+ class V1BatchDeleteDashboardsResponse(BaseModel):
27
27
  """
28
- V1alphaListDashboardsResponse
28
+ V1BatchDeleteDashboardsResponse
29
29
  """ # noqa: E501
30
- dashboards: Optional[List[V1alphaDashboard]] = Field(default=None, description="The list of Dashboards.")
30
+ dashboards: Optional[List[V1Dashboard]] = Field(default=None, description="The deleted Dashboards.")
31
31
  __properties: ClassVar[List[str]] = ["dashboards"]
32
32
 
33
33
  model_config = ConfigDict(
@@ -48,7 +48,7 @@ class V1alphaListDashboardsResponse(BaseModel):
48
48
 
49
49
  @classmethod
50
50
  def from_json(cls, json_str: str) -> Optional[Self]:
51
- """Create an instance of V1alphaListDashboardsResponse from a JSON string"""
51
+ """Create an instance of V1BatchDeleteDashboardsResponse from a JSON string"""
52
52
  return cls.from_dict(json.loads(json_str))
53
53
 
54
54
  def to_dict(self) -> Dict[str, Any]:
@@ -80,7 +80,7 @@ class V1alphaListDashboardsResponse(BaseModel):
80
80
 
81
81
  @classmethod
82
82
  def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
83
- """Create an instance of V1alphaListDashboardsResponse from a dict"""
83
+ """Create an instance of V1BatchDeleteDashboardsResponse from a dict"""
84
84
  if obj is None:
85
85
  return None
86
86
 
@@ -88,7 +88,7 @@ class V1alphaListDashboardsResponse(BaseModel):
88
88
  return cls.model_validate(obj)
89
89
 
90
90
  _obj = cls.model_validate({
91
- "dashboards": [V1alphaDashboard.from_dict(_item) for _item in obj["dashboards"]] if obj.get("dashboards") is not None else None
91
+ "dashboards": [V1Dashboard.from_dict(_item) for _item in obj["dashboards"]] if obj.get("dashboards") is not None else None
92
92
  })
93
93
  return _obj
94
94