eval-studio-client 1.1.0a5__py3-none-any.whl → 1.1.0a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. eval_studio_client/api/__init__.py +7 -3
  2. eval_studio_client/api/api/__init__.py +0 -1
  3. eval_studio_client/api/api/dashboard_service_api.py +17 -0
  4. eval_studio_client/api/api/test_service_api.py +34 -0
  5. eval_studio_client/api/api/workflow_service_api.py +510 -0
  6. eval_studio_client/api/docs/DashboardServiceApi.md +4 -2
  7. eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +1 -1
  8. eval_studio_client/api/docs/RequiredTheDashboardToUpdate.md +1 -0
  9. eval_studio_client/api/docs/RequiredTheTestToUpdate.md +1 -0
  10. eval_studio_client/api/docs/TestServiceApi.md +8 -4
  11. eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +1 -1
  12. eval_studio_client/api/docs/V1Context.md +9 -4
  13. eval_studio_client/api/docs/V1Dashboard.md +1 -0
  14. eval_studio_client/api/docs/V1DashboardType.md +12 -0
  15. eval_studio_client/api/docs/V1DependencyList.md +30 -0
  16. eval_studio_client/api/docs/V1Evaluator.md +2 -0
  17. eval_studio_client/api/docs/V1GetGuardrailsConfigurationResponse.md +29 -0
  18. eval_studio_client/api/docs/V1LeaderboardReportModel.md +0 -2
  19. eval_studio_client/api/docs/V1ListWorkflowDependenciesResponse.md +30 -0
  20. eval_studio_client/api/docs/V1RepeatedContext.md +29 -0
  21. eval_studio_client/api/docs/V1Test.md +1 -0
  22. eval_studio_client/api/docs/V1TestType.md +12 -0
  23. eval_studio_client/api/docs/V1WorkflowDependency.md +30 -0
  24. eval_studio_client/api/docs/V1WorkflowEdgeType.md +1 -1
  25. eval_studio_client/api/docs/V1WorkflowNodeArtifact.md +1 -0
  26. eval_studio_client/api/docs/WorkflowServiceApi.md +135 -0
  27. eval_studio_client/api/models/__init__.py +7 -2
  28. eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +3 -2
  29. eval_studio_client/api/models/required_the_dashboard_to_update.py +5 -2
  30. eval_studio_client/api/models/required_the_test_to_update.py +5 -2
  31. eval_studio_client/api/models/test_service_generate_test_cases_request.py +10 -2
  32. eval_studio_client/api/models/v1_context.py +16 -6
  33. eval_studio_client/api/models/v1_dashboard.py +5 -2
  34. eval_studio_client/api/models/v1_dashboard_type.py +38 -0
  35. eval_studio_client/api/models/{generated_questions_validation_service_validate_generated_questions_request.py → v1_dependency_list.py} +14 -14
  36. eval_studio_client/api/models/v1_evaluator.py +11 -3
  37. eval_studio_client/api/models/v1_get_guardrails_configuration_response.py +87 -0
  38. eval_studio_client/api/models/v1_leaderboard_report_model.py +2 -10
  39. eval_studio_client/api/models/v1_list_workflow_dependencies_response.py +105 -0
  40. eval_studio_client/api/models/{v1_generated_test_case.py → v1_repeated_context.py} +12 -18
  41. eval_studio_client/api/models/v1_test.py +5 -2
  42. eval_studio_client/api/models/v1_test_type.py +38 -0
  43. eval_studio_client/api/models/v1_workflow_dependency.py +89 -0
  44. eval_studio_client/api/models/v1_workflow_edge_type.py +3 -2
  45. eval_studio_client/api/models/v1_workflow_node_artifact.py +6 -2
  46. eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +10 -2
  47. eval_studio_client/api/test/test_required_the_dashboard_to_update.py +2 -1
  48. eval_studio_client/api/test/test_required_the_test_to_update.py +2 -1
  49. eval_studio_client/api/test/test_required_the_updated_workflow.py +2 -1
  50. eval_studio_client/api/test/test_required_the_updated_workflow_node.py +2 -1
  51. eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +10 -1
  52. eval_studio_client/api/test/test_v1_batch_delete_dashboards_response.py +2 -1
  53. eval_studio_client/api/test/test_v1_batch_delete_evaluators_response.py +3 -1
  54. eval_studio_client/api/test/test_v1_batch_delete_tests_response.py +2 -1
  55. eval_studio_client/api/test/test_v1_batch_get_dashboards_response.py +2 -1
  56. eval_studio_client/api/test/test_v1_batch_get_tests_response.py +2 -1
  57. eval_studio_client/api/test/test_v1_batch_import_tests_response.py +2 -1
  58. eval_studio_client/api/test/test_v1_clone_test_response.py +2 -1
  59. eval_studio_client/api/test/test_v1_context.py +6 -1
  60. eval_studio_client/api/test/test_v1_create_dashboard_response.py +2 -1
  61. eval_studio_client/api/test/test_v1_create_evaluator_response.py +3 -1
  62. eval_studio_client/api/test/test_v1_create_test_response.py +2 -1
  63. eval_studio_client/api/test/test_v1_dashboard.py +2 -1
  64. eval_studio_client/api/test/test_v1_dashboard_type.py +33 -0
  65. eval_studio_client/api/test/test_v1_delete_dashboard_response.py +2 -1
  66. eval_studio_client/api/test/test_v1_delete_evaluator_response.py +3 -1
  67. eval_studio_client/api/test/test_v1_delete_test_response.py +2 -1
  68. eval_studio_client/api/test/test_v1_dependency_list.py +56 -0
  69. eval_studio_client/api/test/test_v1_evaluator.py +3 -1
  70. eval_studio_client/api/test/test_v1_get_dashboard_response.py +2 -1
  71. eval_studio_client/api/test/test_v1_get_evaluator_response.py +3 -1
  72. eval_studio_client/api/test/test_v1_get_guardrails_configuration_response.py +51 -0
  73. eval_studio_client/api/test/test_v1_get_leaderboard_report_response.py +1 -3
  74. eval_studio_client/api/test/test_v1_get_test_response.py +2 -1
  75. eval_studio_client/api/test/test_v1_leaderboard_report.py +1 -3
  76. eval_studio_client/api/test/test_v1_leaderboard_report_model.py +1 -3
  77. eval_studio_client/api/test/test_v1_list_dashboards_response.py +2 -1
  78. eval_studio_client/api/test/test_v1_list_evaluators_response.py +3 -1
  79. eval_studio_client/api/test/test_v1_list_most_recent_dashboards_response.py +2 -1
  80. eval_studio_client/api/test/test_v1_list_most_recent_tests_response.py +2 -1
  81. eval_studio_client/api/test/test_v1_list_tests_response.py +2 -1
  82. eval_studio_client/api/test/test_v1_list_workflow_dependencies_response.py +93 -0
  83. eval_studio_client/api/test/test_v1_perturb_test_in_place_response.py +2 -1
  84. eval_studio_client/api/test/test_v1_perturb_test_response.py +2 -1
  85. eval_studio_client/api/test/test_v1_repeated_context.py +62 -0
  86. eval_studio_client/api/test/test_v1_test.py +2 -1
  87. eval_studio_client/api/test/test_v1_test_type.py +33 -0
  88. eval_studio_client/api/test/test_v1_update_dashboard_response.py +2 -1
  89. eval_studio_client/api/test/test_v1_update_test_response.py +2 -1
  90. eval_studio_client/api/test/test_v1_workflow.py +2 -1
  91. eval_studio_client/api/test/test_v1_workflow_dependency.py +52 -0
  92. eval_studio_client/api/test/test_v1_workflow_node.py +2 -1
  93. eval_studio_client/api/test/test_v1_workflow_node_artifact.py +2 -1
  94. eval_studio_client/api/test/test_v1_workflow_node_artifacts.py +2 -1
  95. eval_studio_client/api/test/test_workflow_service_api.py +13 -0
  96. eval_studio_client/client.py +11 -2
  97. eval_studio_client/gen/openapiv2/eval_studio.swagger.json +241 -92
  98. {eval_studio_client-1.1.0a5.dist-info → eval_studio_client-1.1.0a7.dist-info}/METADATA +1 -1
  99. {eval_studio_client-1.1.0a5.dist-info → eval_studio_client-1.1.0a7.dist-info}/RECORD +100 -88
  100. eval_studio_client/api/api/generated_questions_validation_service_api.py +0 -321
  101. eval_studio_client/api/docs/GeneratedQuestionsValidationServiceApi.md +0 -78
  102. eval_studio_client/api/docs/GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest.md +0 -30
  103. eval_studio_client/api/docs/V1GeneratedTestCase.md +0 -30
  104. eval_studio_client/api/test/test_generated_questions_validation_service_api.py +0 -37
  105. eval_studio_client/api/test/test_generated_questions_validation_service_validate_generated_questions_request.py +0 -83
  106. eval_studio_client/api/test/test_v1_generated_test_case.py +0 -79
  107. {eval_studio_client-1.1.0a5.dist-info → eval_studio_client-1.1.0a7.dist-info}/WHEEL +0 -0
@@ -21,6 +21,7 @@ from datetime import datetime
21
21
  from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
22
22
  from typing import Any, ClassVar, Dict, List, Optional
23
23
  from eval_studio_client.api.models.v1_dashboard_status import V1DashboardStatus
24
+ from eval_studio_client.api.models.v1_dashboard_type import V1DashboardType
24
25
  from typing import Optional, Set
25
26
  from typing_extensions import Self
26
27
 
@@ -41,7 +42,8 @@ class V1Dashboard(BaseModel):
41
42
  leaderboards: Optional[List[StrictStr]] = Field(default=None, description="Immutable. Resource names of the Leaderboards used in this Dashboard.")
42
43
  create_operation: Optional[StrictStr] = Field(default=None, description="Output only. Operation resource name that created this Dashboard.", alias="createOperation")
43
44
  demo: Optional[StrictBool] = Field(default=None, description="Output only. Whether the Dashboard is a demo resource or not. Demo resources are read only.")
44
- __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "status", "leaderboards", "createOperation", "demo"]
45
+ type: Optional[V1DashboardType] = None
46
+ __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "status", "leaderboards", "createOperation", "demo", "type"]
45
47
 
46
48
  model_config = ConfigDict(
47
49
  populate_by_name=True,
@@ -124,7 +126,8 @@ class V1Dashboard(BaseModel):
124
126
  "status": obj.get("status"),
125
127
  "leaderboards": obj.get("leaderboards"),
126
128
  "createOperation": obj.get("createOperation"),
127
- "demo": obj.get("demo")
129
+ "demo": obj.get("demo"),
130
+ "type": obj.get("type")
128
131
  })
129
132
  return _obj
130
133
 
@@ -0,0 +1,38 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import json
17
+ from enum import Enum
18
+ from typing_extensions import Self
19
+
20
+
21
+ class V1DashboardType(str, Enum):
22
+ """
23
+ - DASHBOARD_TYPE_UNSPECIFIED: Unspecified type. - DASHBOARD_TYPE_STANDALONE: Standalone dashboard. - DASHBOARD_TYPE_WORKFLOW: Dashboard is part of a dashboard group.
24
+ """
25
+
26
+ """
27
+ allowed enum values
28
+ """
29
+ DASHBOARD_TYPE_UNSPECIFIED = 'DASHBOARD_TYPE_UNSPECIFIED'
30
+ DASHBOARD_TYPE_STANDALONE = 'DASHBOARD_TYPE_STANDALONE'
31
+ DASHBOARD_TYPE_WORKFLOW = 'DASHBOARD_TYPE_WORKFLOW'
32
+
33
+ @classmethod
34
+ def from_json(cls, json_str: str) -> Self:
35
+ """Create an instance of V1DashboardType from a JSON string"""
36
+ return cls(json.loads(json_str))
37
+
38
+
@@ -19,17 +19,17 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
- from eval_studio_client.api.models.v1_generated_test_case import V1GeneratedTestCase
22
+ from eval_studio_client.api.models.v1_workflow_dependency import V1WorkflowDependency
23
23
  from typing import Optional, Set
24
24
  from typing_extensions import Self
25
25
 
26
- class GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest(BaseModel):
26
+ class V1DependencyList(BaseModel):
27
27
  """
28
- GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest
28
+ V1DependencyList
29
29
  """ # noqa: E501
30
- operation: Optional[StrictStr] = Field(default=None, description="Required. The Operation processing this question validation process.")
31
- test_cases: Optional[List[V1GeneratedTestCase]] = Field(default=None, description="Required. Generated Test Cases, i.e., Test cases with context that was used for their generation.", alias="testCases")
32
- __properties: ClassVar[List[str]] = ["operation", "testCases"]
30
+ node: Optional[StrictStr] = Field(default=None, description="The name of the Workflow Node to retrieve dependencies for.")
31
+ dependencies: Optional[List[V1WorkflowDependency]] = Field(default=None, description="The names of the Workflow Node dependencies.")
32
+ __properties: ClassVar[List[str]] = ["node", "dependencies"]
33
33
 
34
34
  model_config = ConfigDict(
35
35
  populate_by_name=True,
@@ -49,7 +49,7 @@ class GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest(BaseM
49
49
 
50
50
  @classmethod
51
51
  def from_json(cls, json_str: str) -> Optional[Self]:
52
- """Create an instance of GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest from a JSON string"""
52
+ """Create an instance of V1DependencyList from a JSON string"""
53
53
  return cls.from_dict(json.loads(json_str))
54
54
 
55
55
  def to_dict(self) -> Dict[str, Any]:
@@ -70,18 +70,18 @@ class GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest(BaseM
70
70
  exclude=excluded_fields,
71
71
  exclude_none=True,
72
72
  )
73
- # override the default output from pydantic by calling `to_dict()` of each item in test_cases (list)
73
+ # override the default output from pydantic by calling `to_dict()` of each item in dependencies (list)
74
74
  _items = []
75
- if self.test_cases:
76
- for _item in self.test_cases:
75
+ if self.dependencies:
76
+ for _item in self.dependencies:
77
77
  if _item:
78
78
  _items.append(_item.to_dict())
79
- _dict['testCases'] = _items
79
+ _dict['dependencies'] = _items
80
80
  return _dict
81
81
 
82
82
  @classmethod
83
83
  def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
84
- """Create an instance of GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest from a dict"""
84
+ """Create an instance of V1DependencyList from a dict"""
85
85
  if obj is None:
86
86
  return None
87
87
 
@@ -89,8 +89,8 @@ class GeneratedQuestionsValidationServiceValidateGeneratedQuestionsRequest(BaseM
89
89
  return cls.model_validate(obj)
90
90
 
91
91
  _obj = cls.model_validate({
92
- "operation": obj.get("operation"),
93
- "testCases": [V1GeneratedTestCase.from_dict(_item) for _item in obj["testCases"]] if obj.get("testCases") is not None else None
92
+ "node": obj.get("node"),
93
+ "dependencies": [V1WorkflowDependency.from_dict(_item) for _item in obj["dependencies"]] if obj.get("dependencies") is not None else None
94
94
  })
95
95
  return _obj
96
96
 
@@ -18,7 +18,7 @@ import re # noqa: F401
18
18
  import json
19
19
 
20
20
  from datetime import datetime
21
- from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr, field_validator
21
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictFloat, StrictInt, StrictStr, field_validator
22
22
  from typing import Any, ClassVar, Dict, List, Optional, Union
23
23
  from typing_extensions import Annotated
24
24
  from eval_studio_client.api.models.v1_evaluator_parameter import V1EvaluatorParameter
@@ -47,7 +47,9 @@ class V1Evaluator(BaseModel):
47
47
  brief_description: Optional[StrictStr] = Field(default=None, description="Optional. Short preview of the Evaluator's description.", alias="briefDescription")
48
48
  enabled: Optional[StrictBool] = Field(default=None, description="Output only. Whether this Evaluator can be used for creating evaluations. Evaluator might be disabled because it has some external requirements that are not met.")
49
49
  tagline: Optional[StrictStr] = Field(default=None, description="Output only. Tagline is a short (single-line) and high-level description of the evaluator.")
50
- __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "content", "mimeType", "filename", "identifier", "tags", "parameters", "briefDescription", "enabled", "tagline"]
50
+ primary_metric: Optional[StrictStr] = Field(default=None, description="Output only. Optional. The name of the primary metric.", alias="primaryMetric")
51
+ primary_metric_default_threshold: Optional[Union[StrictFloat, StrictInt]] = Field(default=None, description="Output only. Optional. Default threshold of the primary metric. Value must be ignored if primary_metric is invalid.", alias="primaryMetricDefaultThreshold")
52
+ __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "content", "mimeType", "filename", "identifier", "tags", "parameters", "briefDescription", "enabled", "tagline", "primaryMetric", "primaryMetricDefaultThreshold"]
51
53
 
52
54
  @field_validator('content')
53
55
  def content_validate_regular_expression(cls, value):
@@ -98,6 +100,8 @@ class V1Evaluator(BaseModel):
98
100
  * OpenAPI `readOnly` fields are excluded.
99
101
  * OpenAPI `readOnly` fields are excluded.
100
102
  * OpenAPI `readOnly` fields are excluded.
103
+ * OpenAPI `readOnly` fields are excluded.
104
+ * OpenAPI `readOnly` fields are excluded.
101
105
  """
102
106
  excluded_fields: Set[str] = set([
103
107
  "name",
@@ -109,6 +113,8 @@ class V1Evaluator(BaseModel):
109
113
  "deleter",
110
114
  "enabled",
111
115
  "tagline",
116
+ "primary_metric",
117
+ "primary_metric_default_threshold",
112
118
  ])
113
119
 
114
120
  _dict = self.model_dump(
@@ -152,7 +158,9 @@ class V1Evaluator(BaseModel):
152
158
  "parameters": [V1EvaluatorParameter.from_dict(_item) for _item in obj["parameters"]] if obj.get("parameters") is not None else None,
153
159
  "briefDescription": obj.get("briefDescription"),
154
160
  "enabled": obj.get("enabled"),
155
- "tagline": obj.get("tagline")
161
+ "tagline": obj.get("tagline"),
162
+ "primaryMetric": obj.get("primaryMetric"),
163
+ "primaryMetricDefaultThreshold": obj.get("primaryMetricDefaultThreshold")
156
164
  })
157
165
  return _obj
158
166
 
@@ -0,0 +1,87 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class V1GetGuardrailsConfigurationResponse(BaseModel):
26
+ """
27
+ V1GetGuardrailsConfigurationResponse
28
+ """ # noqa: E501
29
+ guardrails_configuration_json: Optional[StrictStr] = Field(default=None, description="The guardrails configuration for the Workflow requested in JSON format. This configuration is usable by the guardrails client as is.", alias="guardrailsConfigurationJson")
30
+ __properties: ClassVar[List[str]] = ["guardrailsConfigurationJson"]
31
+
32
+ model_config = ConfigDict(
33
+ populate_by_name=True,
34
+ validate_assignment=True,
35
+ protected_namespaces=(),
36
+ )
37
+
38
+
39
+ def to_str(self) -> str:
40
+ """Returns the string representation of the model using alias"""
41
+ return pprint.pformat(self.model_dump(by_alias=True))
42
+
43
+ def to_json(self) -> str:
44
+ """Returns the JSON representation of the model using alias"""
45
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
46
+ return json.dumps(self.to_dict())
47
+
48
+ @classmethod
49
+ def from_json(cls, json_str: str) -> Optional[Self]:
50
+ """Create an instance of V1GetGuardrailsConfigurationResponse from a JSON string"""
51
+ return cls.from_dict(json.loads(json_str))
52
+
53
+ def to_dict(self) -> Dict[str, Any]:
54
+ """Return the dictionary representation of the model using alias.
55
+
56
+ This has the following differences from calling pydantic's
57
+ `self.model_dump(by_alias=True)`:
58
+
59
+ * `None` is only added to the output dict for nullable fields that
60
+ were set at model initialization. Other fields with value `None`
61
+ are ignored.
62
+ """
63
+ excluded_fields: Set[str] = set([
64
+ ])
65
+
66
+ _dict = self.model_dump(
67
+ by_alias=True,
68
+ exclude=excluded_fields,
69
+ exclude_none=True,
70
+ )
71
+ return _dict
72
+
73
+ @classmethod
74
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
75
+ """Create an instance of V1GetGuardrailsConfigurationResponse from a dict"""
76
+ if obj is None:
77
+ return None
78
+
79
+ if not isinstance(obj, dict):
80
+ return cls.model_validate(obj)
81
+
82
+ _obj = cls.model_validate({
83
+ "guardrailsConfigurationJson": obj.get("guardrailsConfigurationJson")
84
+ })
85
+ return _obj
86
+
87
+
@@ -33,10 +33,8 @@ class V1LeaderboardReportModel(BaseModel):
33
33
  collection_name: Optional[StrictStr] = Field(default=None, description="Optional. Collection name.", alias="collectionName")
34
34
  llm_model_name: Optional[StrictStr] = Field(default=None, description="Output only. LLM model name.", alias="llmModelName")
35
35
  documents: Optional[List[StrictStr]] = Field(default=None, description="Output only. List of documents.")
36
- model_cfg: Optional[Dict[str, Any]] = Field(default=None, description="Output only. Model configuration.", alias="modelCfg")
37
36
  key: Optional[StrictStr] = Field(default=None, description="Output only. Model key.")
38
- llm_model_meta: Optional[Dict[str, Any]] = Field(default=None, description="Output only. LLM model metadata - recursive dictionaries with an additional info like performance stats.", alias="llmModelMeta")
39
- __properties: ClassVar[List[str]] = ["connection", "modelType", "name", "collectionId", "collectionName", "llmModelName", "documents", "modelCfg", "key", "llmModelMeta"]
37
+ __properties: ClassVar[List[str]] = ["connection", "modelType", "name", "collectionId", "collectionName", "llmModelName", "documents", "key"]
40
38
 
41
39
  model_config = ConfigDict(
42
40
  populate_by_name=True,
@@ -74,8 +72,6 @@ class V1LeaderboardReportModel(BaseModel):
74
72
  * OpenAPI `readOnly` fields are excluded.
75
73
  * OpenAPI `readOnly` fields are excluded.
76
74
  * OpenAPI `readOnly` fields are excluded.
77
- * OpenAPI `readOnly` fields are excluded.
78
- * OpenAPI `readOnly` fields are excluded.
79
75
  """
80
76
  excluded_fields: Set[str] = set([
81
77
  "connection",
@@ -83,9 +79,7 @@ class V1LeaderboardReportModel(BaseModel):
83
79
  "name",
84
80
  "llm_model_name",
85
81
  "documents",
86
- "model_cfg",
87
82
  "key",
88
- "llm_model_meta",
89
83
  ])
90
84
 
91
85
  _dict = self.model_dump(
@@ -112,9 +106,7 @@ class V1LeaderboardReportModel(BaseModel):
112
106
  "collectionName": obj.get("collectionName"),
113
107
  "llmModelName": obj.get("llmModelName"),
114
108
  "documents": obj.get("documents"),
115
- "modelCfg": obj.get("modelCfg"),
116
- "key": obj.get("key"),
117
- "llmModelMeta": obj.get("llmModelMeta")
109
+ "key": obj.get("key")
118
110
  })
119
111
  return _obj
120
112
 
@@ -0,0 +1,105 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from eval_studio_client.api.models.v1_dependency_list import V1DependencyList
23
+ from eval_studio_client.api.models.v1_workflow_node import V1WorkflowNode
24
+ from typing import Optional, Set
25
+ from typing_extensions import Self
26
+
27
+ class V1ListWorkflowDependenciesResponse(BaseModel):
28
+ """
29
+ V1ListWorkflowDependenciesResponse
30
+ """ # noqa: E501
31
+ nodes: Optional[List[V1WorkflowNode]] = Field(default=None, description="The list of the WorkflowNodes related to requested workflow.")
32
+ dependencies: Optional[List[V1DependencyList]] = Field(default=None, description="The dependency map for the workflow.")
33
+ __properties: ClassVar[List[str]] = ["nodes", "dependencies"]
34
+
35
+ model_config = ConfigDict(
36
+ populate_by_name=True,
37
+ validate_assignment=True,
38
+ protected_namespaces=(),
39
+ )
40
+
41
+
42
+ def to_str(self) -> str:
43
+ """Returns the string representation of the model using alias"""
44
+ return pprint.pformat(self.model_dump(by_alias=True))
45
+
46
+ def to_json(self) -> str:
47
+ """Returns the JSON representation of the model using alias"""
48
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
49
+ return json.dumps(self.to_dict())
50
+
51
+ @classmethod
52
+ def from_json(cls, json_str: str) -> Optional[Self]:
53
+ """Create an instance of V1ListWorkflowDependenciesResponse from a JSON string"""
54
+ return cls.from_dict(json.loads(json_str))
55
+
56
+ def to_dict(self) -> Dict[str, Any]:
57
+ """Return the dictionary representation of the model using alias.
58
+
59
+ This has the following differences from calling pydantic's
60
+ `self.model_dump(by_alias=True)`:
61
+
62
+ * `None` is only added to the output dict for nullable fields that
63
+ were set at model initialization. Other fields with value `None`
64
+ are ignored.
65
+ """
66
+ excluded_fields: Set[str] = set([
67
+ ])
68
+
69
+ _dict = self.model_dump(
70
+ by_alias=True,
71
+ exclude=excluded_fields,
72
+ exclude_none=True,
73
+ )
74
+ # override the default output from pydantic by calling `to_dict()` of each item in nodes (list)
75
+ _items = []
76
+ if self.nodes:
77
+ for _item in self.nodes:
78
+ if _item:
79
+ _items.append(_item.to_dict())
80
+ _dict['nodes'] = _items
81
+ # override the default output from pydantic by calling `to_dict()` of each item in dependencies (list)
82
+ _items = []
83
+ if self.dependencies:
84
+ for _item in self.dependencies:
85
+ if _item:
86
+ _items.append(_item.to_dict())
87
+ _dict['dependencies'] = _items
88
+ return _dict
89
+
90
+ @classmethod
91
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
92
+ """Create an instance of V1ListWorkflowDependenciesResponse from a dict"""
93
+ if obj is None:
94
+ return None
95
+
96
+ if not isinstance(obj, dict):
97
+ return cls.model_validate(obj)
98
+
99
+ _obj = cls.model_validate({
100
+ "nodes": [V1WorkflowNode.from_dict(_item) for _item in obj["nodes"]] if obj.get("nodes") is not None else None,
101
+ "dependencies": [V1DependencyList.from_dict(_item) for _item in obj["dependencies"]] if obj.get("dependencies") is not None else None
102
+ })
103
+ return _obj
104
+
105
+
@@ -17,20 +17,18 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, Field
20
+ from pydantic import BaseModel, ConfigDict
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
22
  from eval_studio_client.api.models.v1_context import V1Context
23
- from eval_studio_client.api.models.v1_test_case import V1TestCase
24
23
  from typing import Optional, Set
25
24
  from typing_extensions import Self
26
25
 
27
- class V1GeneratedTestCase(BaseModel):
26
+ class V1RepeatedContext(BaseModel):
28
27
  """
29
- V1GeneratedTestCase
28
+ V1RepeatedContext
30
29
  """ # noqa: E501
31
- test_case: Optional[V1TestCase] = Field(default=None, alias="testCase")
32
- context: Optional[List[V1Context]] = Field(default=None, description="Context used for test_case generation.")
33
- __properties: ClassVar[List[str]] = ["testCase", "context"]
30
+ content: Optional[List[V1Context]] = None
31
+ __properties: ClassVar[List[str]] = ["content"]
34
32
 
35
33
  model_config = ConfigDict(
36
34
  populate_by_name=True,
@@ -50,7 +48,7 @@ class V1GeneratedTestCase(BaseModel):
50
48
 
51
49
  @classmethod
52
50
  def from_json(cls, json_str: str) -> Optional[Self]:
53
- """Create an instance of V1GeneratedTestCase from a JSON string"""
51
+ """Create an instance of V1RepeatedContext from a JSON string"""
54
52
  return cls.from_dict(json.loads(json_str))
55
53
 
56
54
  def to_dict(self) -> Dict[str, Any]:
@@ -71,21 +69,18 @@ class V1GeneratedTestCase(BaseModel):
71
69
  exclude=excluded_fields,
72
70
  exclude_none=True,
73
71
  )
74
- # override the default output from pydantic by calling `to_dict()` of test_case
75
- if self.test_case:
76
- _dict['testCase'] = self.test_case.to_dict()
77
- # override the default output from pydantic by calling `to_dict()` of each item in context (list)
72
+ # override the default output from pydantic by calling `to_dict()` of each item in content (list)
78
73
  _items = []
79
- if self.context:
80
- for _item in self.context:
74
+ if self.content:
75
+ for _item in self.content:
81
76
  if _item:
82
77
  _items.append(_item.to_dict())
83
- _dict['context'] = _items
78
+ _dict['content'] = _items
84
79
  return _dict
85
80
 
86
81
  @classmethod
87
82
  def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
88
- """Create an instance of V1GeneratedTestCase from a dict"""
83
+ """Create an instance of V1RepeatedContext from a dict"""
89
84
  if obj is None:
90
85
  return None
91
86
 
@@ -93,8 +88,7 @@ class V1GeneratedTestCase(BaseModel):
93
88
  return cls.model_validate(obj)
94
89
 
95
90
  _obj = cls.model_validate({
96
- "testCase": V1TestCase.from_dict(obj["testCase"]) if obj.get("testCase") is not None else None,
97
- "context": [V1Context.from_dict(_item) for _item in obj["context"]] if obj.get("context") is not None else None
91
+ "content": [V1Context.from_dict(_item) for _item in obj["content"]] if obj.get("content") is not None else None
98
92
  })
99
93
  return _obj
100
94
 
@@ -20,6 +20,7 @@ import json
20
20
  from datetime import datetime
21
21
  from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
22
22
  from typing import Any, ClassVar, Dict, List, Optional
23
+ from eval_studio_client.api.models.v1_test_type import V1TestType
23
24
  from typing import Optional, Set
24
25
  from typing_extensions import Self
25
26
 
@@ -39,7 +40,8 @@ class V1Test(BaseModel):
39
40
  documents: Optional[List[StrictStr]] = Field(default=None, description="Immutable. Resource names of Documents assigned to the Test.")
40
41
  tags: Optional[List[StrictStr]] = Field(default=None, description="Tags assigned to the Test.")
41
42
  demo: Optional[StrictBool] = Field(default=None, description="Output only. Whether the Test is a demo resource or not. Demo resources are read only.")
42
- __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "documents", "tags", "demo"]
43
+ type: Optional[V1TestType] = None
44
+ __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "displayName", "description", "documents", "tags", "demo", "type"]
43
45
 
44
46
  model_config = ConfigDict(
45
47
  populate_by_name=True,
@@ -119,7 +121,8 @@ class V1Test(BaseModel):
119
121
  "description": obj.get("description"),
120
122
  "documents": obj.get("documents"),
121
123
  "tags": obj.get("tags"),
122
- "demo": obj.get("demo")
124
+ "demo": obj.get("demo"),
125
+ "type": obj.get("type")
123
126
  })
124
127
  return _obj
125
128
 
@@ -0,0 +1,38 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import json
17
+ from enum import Enum
18
+ from typing_extensions import Self
19
+
20
+
21
+ class V1TestType(str, Enum):
22
+ """
23
+ - TEST_TYPE_UNSPECIFIED: Unspecified type. - TEST_TYPE_STANDALONE: Standalone test. - TEST_TYPE_WORKFLOW: Test is part of a dashboard.
24
+ """
25
+
26
+ """
27
+ allowed enum values
28
+ """
29
+ TEST_TYPE_UNSPECIFIED = 'TEST_TYPE_UNSPECIFIED'
30
+ TEST_TYPE_STANDALONE = 'TEST_TYPE_STANDALONE'
31
+ TEST_TYPE_WORKFLOW = 'TEST_TYPE_WORKFLOW'
32
+
33
+ @classmethod
34
+ def from_json(cls, json_str: str) -> Self:
35
+ """Create an instance of V1TestType from a JSON string"""
36
+ return cls(json.loads(json_str))
37
+
38
+
@@ -0,0 +1,89 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class V1WorkflowDependency(BaseModel):
26
+ """
27
+ V1WorkflowDependency
28
+ """ # noqa: E501
29
+ name: Optional[StrictStr] = Field(default=None, description="Required. The name of the Workflow Dependency node.")
30
+ optional: Optional[StrictBool] = Field(default=None, description="Optional. Whether the dependency is optional.")
31
+ __properties: ClassVar[List[str]] = ["name", "optional"]
32
+
33
+ model_config = ConfigDict(
34
+ populate_by_name=True,
35
+ validate_assignment=True,
36
+ protected_namespaces=(),
37
+ )
38
+
39
+
40
+ def to_str(self) -> str:
41
+ """Returns the string representation of the model using alias"""
42
+ return pprint.pformat(self.model_dump(by_alias=True))
43
+
44
+ def to_json(self) -> str:
45
+ """Returns the JSON representation of the model using alias"""
46
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
+ return json.dumps(self.to_dict())
48
+
49
+ @classmethod
50
+ def from_json(cls, json_str: str) -> Optional[Self]:
51
+ """Create an instance of V1WorkflowDependency from a JSON string"""
52
+ return cls.from_dict(json.loads(json_str))
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ """Return the dictionary representation of the model using alias.
56
+
57
+ This has the following differences from calling pydantic's
58
+ `self.model_dump(by_alias=True)`:
59
+
60
+ * `None` is only added to the output dict for nullable fields that
61
+ were set at model initialization. Other fields with value `None`
62
+ are ignored.
63
+ """
64
+ excluded_fields: Set[str] = set([
65
+ ])
66
+
67
+ _dict = self.model_dump(
68
+ by_alias=True,
69
+ exclude=excluded_fields,
70
+ exclude_none=True,
71
+ )
72
+ return _dict
73
+
74
+ @classmethod
75
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
76
+ """Create an instance of V1WorkflowDependency from a dict"""
77
+ if obj is None:
78
+ return None
79
+
80
+ if not isinstance(obj, dict):
81
+ return cls.model_validate(obj)
82
+
83
+ _obj = cls.model_validate({
84
+ "name": obj.get("name"),
85
+ "optional": obj.get("optional")
86
+ })
87
+ return _obj
88
+
89
+