eval-studio-client 1.3.0a1__py3-none-any.whl → 1.3.1a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. eval_studio_client/api/__init__.py +3 -0
  2. eval_studio_client/api/api/__init__.py +1 -0
  3. eval_studio_client/api/api/h2_ogpte_collection_service_api.py +304 -0
  4. eval_studio_client/api/api/test_lab_service_api.py +18 -1
  5. eval_studio_client/api/docs/AdversarialInputsServiceTestAdversarialInputsRobustnessRequest.md +2 -0
  6. eval_studio_client/api/docs/H2OGPTeCollectionServiceApi.md +77 -0
  7. eval_studio_client/api/docs/PromptGenerationServiceAutoGeneratePromptsRequest.md +2 -0
  8. eval_studio_client/api/docs/TestLabServiceApi.md +4 -2
  9. eval_studio_client/api/docs/TestServiceGenerateTestCasesRequest.md +1 -0
  10. eval_studio_client/api/docs/V1CreateEvaluationRequest.md +1 -0
  11. eval_studio_client/api/docs/V1DeleteWorkflowCollectionsRequest.md +30 -0
  12. eval_studio_client/api/docs/V1DeleteWorkflowCollectionsResponse.md +29 -0
  13. eval_studio_client/api/docs/V1TestLab.md +1 -0
  14. eval_studio_client/api/models/__init__.py +2 -0
  15. eval_studio_client/api/models/adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +14 -2
  16. eval_studio_client/api/models/prompt_generation_service_auto_generate_prompts_request.py +7 -3
  17. eval_studio_client/api/models/test_service_generate_test_cases_request.py +5 -3
  18. eval_studio_client/api/models/v1_create_evaluation_request.py +4 -2
  19. eval_studio_client/api/models/v1_delete_workflow_collections_request.py +97 -0
  20. eval_studio_client/api/models/v1_delete_workflow_collections_response.py +87 -0
  21. eval_studio_client/api/models/v1_test_lab.py +5 -2
  22. eval_studio_client/api/test/test_adversarial_inputs_service_test_adversarial_inputs_robustness_request.py +26 -1
  23. eval_studio_client/api/test/test_h2_ogpte_collection_service_api.py +38 -0
  24. eval_studio_client/api/test/test_prompt_generation_service_auto_generate_prompts_request.py +3 -1
  25. eval_studio_client/api/test/test_test_service_generate_test_cases_request.py +2 -1
  26. eval_studio_client/api/test/test_v1_create_evaluation_request.py +2 -1
  27. eval_studio_client/api/test/test_v1_create_test_lab_response.py +2 -1
  28. eval_studio_client/api/test/test_v1_delete_workflow_collections_request.py +108 -0
  29. eval_studio_client/api/test/test_v1_delete_workflow_collections_response.py +53 -0
  30. eval_studio_client/api/test/test_v1_find_test_lab_response.py +2 -1
  31. eval_studio_client/api/test/test_v1_test_lab.py +2 -1
  32. eval_studio_client/gen/openapiv2/eval_studio.swagger.json +107 -0
  33. {eval_studio_client-1.3.0a1.dist-info → eval_studio_client-1.3.1a1.dist-info}/METADATA +2 -2
  34. {eval_studio_client-1.3.0a1.dist-info → eval_studio_client-1.3.1a1.dist-info}/RECORD +35 -26
  35. {eval_studio_client-1.3.0a1.dist-info → eval_studio_client-1.3.1a1.dist-info}/WHEEL +0 -0
@@ -22,6 +22,7 @@ from typing import Any, ClassVar, Dict, List, Optional
22
22
  from eval_studio_client.api.models.v1_all_metric_scores import V1AllMetricScores
23
23
  from eval_studio_client.api.models.v1_metric_scores import V1MetricScores
24
24
  from eval_studio_client.api.models.v1_model import V1Model
25
+ from eval_studio_client.api.models.v1_test_case import V1TestCase
25
26
  from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
26
27
  from typing import Optional, Set
27
28
  from typing_extensions import Self
@@ -48,7 +49,9 @@ class AdversarialInputsServiceTestAdversarialInputsRobustnessRequest(BaseModel):
48
49
  baseline_eval: Optional[StrictStr] = Field(default=None, description="Required. Baseline evaluation name.", alias="baselineEval")
49
50
  baseline_metrics: Optional[Dict[str, V1MetricScores]] = Field(default=None, description="Required. Map of baseline metrics from the evaluator to the average metric scores for the evaluator.", alias="baselineMetrics")
50
51
  all_baseline_metrics_scores: Optional[Dict[str, V1AllMetricScores]] = Field(default=None, description="Required. Map of baseline metric to all and every test case metric score.", alias="allBaselineMetricsScores")
51
- __properties: ClassVar[List[str]] = ["operation", "generatorInputTypes", "generatorDocumentUrls", "generatorModel", "generatorBaseLlmModel", "generatorCount", "generatorTopics", "generatorChunks", "generatorH2ogpteCollectionId", "evaluatorIdentifiers", "evaluatorsParameters", "model", "baseLlmModel", "modelParameters", "defaultH2ogpteModel", "baselineEval", "baselineMetrics", "allBaselineMetricsScores"]
52
+ test_cases: Optional[List[V1TestCase]] = Field(default=None, description="Optional. Existing adversarial test cases. If provided, these will be merged with newly generated adversarial test cases (if any) and used for evaluation.", alias="testCases")
53
+ workflow: Optional[StrictStr] = Field(default=None, description="Optional. Resource name of the parent workflow.")
54
+ __properties: ClassVar[List[str]] = ["operation", "generatorInputTypes", "generatorDocumentUrls", "generatorModel", "generatorBaseLlmModel", "generatorCount", "generatorTopics", "generatorChunks", "generatorH2ogpteCollectionId", "evaluatorIdentifiers", "evaluatorsParameters", "model", "baseLlmModel", "modelParameters", "defaultH2ogpteModel", "baselineEval", "baselineMetrics", "allBaselineMetricsScores", "testCases", "workflow"]
52
55
 
53
56
  model_config = ConfigDict(
54
57
  populate_by_name=True,
@@ -112,6 +115,13 @@ class AdversarialInputsServiceTestAdversarialInputsRobustnessRequest(BaseModel):
112
115
  if self.all_baseline_metrics_scores[_key_all_baseline_metrics_scores]:
113
116
  _field_dict[_key_all_baseline_metrics_scores] = self.all_baseline_metrics_scores[_key_all_baseline_metrics_scores].to_dict()
114
117
  _dict['allBaselineMetricsScores'] = _field_dict
118
+ # override the default output from pydantic by calling `to_dict()` of each item in test_cases (list)
119
+ _items = []
120
+ if self.test_cases:
121
+ for _item_test_cases in self.test_cases:
122
+ if _item_test_cases:
123
+ _items.append(_item_test_cases.to_dict())
124
+ _dict['testCases'] = _items
115
125
  return _dict
116
126
 
117
127
  @classmethod
@@ -151,7 +161,9 @@ class AdversarialInputsServiceTestAdversarialInputsRobustnessRequest(BaseModel):
151
161
  for _k, _v in obj["allBaselineMetricsScores"].items()
152
162
  )
153
163
  if obj.get("allBaselineMetricsScores") is not None
154
- else None
164
+ else None,
165
+ "testCases": [V1TestCase.from_dict(_item) for _item in obj["testCases"]] if obj.get("testCases") is not None else None,
166
+ "workflow": obj.get("workflow")
155
167
  }, strict=False)
156
168
  return _obj
157
169
 
@@ -17,7 +17,7 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
22
  from eval_studio_client.api.models.v1_model import V1Model
23
23
  from eval_studio_client.api.models.v1_repeated_context import V1RepeatedContext
@@ -39,7 +39,9 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
39
39
  generators: Optional[List[V1TestCasesGenerator]] = Field(default=None, description="Optional. Type of questions to generate TestCases for. If not specified, all types of questions are selected.")
40
40
  h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. The ID of the h2oGPTe collection to use. If empty, new temporary collection will be created.", alias="h2ogpteCollectionId")
41
41
  topics: Optional[List[StrictStr]] = Field(default=None, description="Optional. Topics to generate questions for. If not specified, use document summarization as topic generation.")
42
- __properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "chunks", "generators", "h2ogpteCollectionId", "topics"]
42
+ allow_duplicates: Optional[StrictBool] = Field(default=None, description="Optional. Whether to allow duplicates in the generated TestCases.", alias="allowDuplicates")
43
+ workflow: Optional[StrictStr] = Field(default=None, description="Optional. The workflow resource name to associate with the generated prompts.")
44
+ __properties: ClassVar[List[str]] = ["operation", "model", "count", "baseLlmModel", "documentUrls", "chunks", "generators", "h2ogpteCollectionId", "topics", "allowDuplicates", "workflow"]
43
45
 
44
46
  model_config = ConfigDict(
45
47
  populate_by_name=True,
@@ -109,7 +111,9 @@ class PromptGenerationServiceAutoGeneratePromptsRequest(BaseModel):
109
111
  "chunks": V1RepeatedContext.from_dict(obj["chunks"]) if obj.get("chunks") is not None else None,
110
112
  "generators": obj.get("generators"),
111
113
  "h2ogpteCollectionId": obj.get("h2ogpteCollectionId"),
112
- "topics": obj.get("topics")
114
+ "topics": obj.get("topics"),
115
+ "allowDuplicates": obj.get("allowDuplicates"),
116
+ "workflow": obj.get("workflow")
113
117
  }, strict=False)
114
118
  return _obj
115
119
 
@@ -17,7 +17,7 @@ import pprint
17
17
  import re # noqa: F401
18
18
  import json
19
19
 
20
- from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
22
  from eval_studio_client.api.models.v1_context import V1Context
23
23
  from eval_studio_client.api.models.v1_test_cases_generator import V1TestCasesGenerator
@@ -35,7 +35,8 @@ class TestServiceGenerateTestCasesRequest(BaseModel):
35
35
  h2ogpte_collection_id: Optional[StrictStr] = Field(default=None, description="Optional. ID of the h2oGPTe collection to use. If provided, documents referenced by Test and any specified chunks are ignored. This field is required if Test does not reference any documents and no chunks are provided. If this field is left empty, a temporary collection will be created.", alias="h2ogpteCollectionId")
36
36
  topics: Optional[List[StrictStr]] = Field(default=None, description="Optional. Topics to generate questions for. If not specified, use document summarization as topic generation.")
37
37
  chunks: Optional[List[V1Context]] = Field(default=None, description="Optional. The list of chunks to use for generation. If set, the Documents assigned to the Test and h2ogpte_collection_id are ignored.")
38
- __properties: ClassVar[List[str]] = ["count", "model", "baseLlmModel", "generators", "h2ogpteCollectionId", "topics", "chunks"]
38
+ allow_duplicates: Optional[StrictBool] = Field(default=None, description="Optional. Whether to allow duplicates in the generated TestCases.", alias="allowDuplicates")
39
+ __properties: ClassVar[List[str]] = ["count", "model", "baseLlmModel", "generators", "h2ogpteCollectionId", "topics", "chunks", "allowDuplicates"]
39
40
 
40
41
  model_config = ConfigDict(
41
42
  populate_by_name=True,
@@ -101,7 +102,8 @@ class TestServiceGenerateTestCasesRequest(BaseModel):
101
102
  "generators": obj.get("generators"),
102
103
  "h2ogpteCollectionId": obj.get("h2ogpteCollectionId"),
103
104
  "topics": obj.get("topics"),
104
- "chunks": [V1Context.from_dict(_item) for _item in obj["chunks"]] if obj.get("chunks") is not None else None
105
+ "chunks": [V1Context.from_dict(_item) for _item in obj["chunks"]] if obj.get("chunks") is not None else None,
106
+ "allowDuplicates": obj.get("allowDuplicates")
105
107
  }, strict=False)
106
108
  return _obj
107
109
 
@@ -40,7 +40,8 @@ class V1CreateEvaluationRequest(BaseModel):
40
40
  h2ogpte_collection: Optional[StrictStr] = Field(default=None, description="The existing collection name in H2OGPTe.", alias="h2ogpteCollection")
41
41
  default_h2ogpte_model: Optional[V1Model] = Field(default=None, alias="defaultH2ogpteModel")
42
42
  evaluation_type: Optional[V1EvaluationType] = Field(default=None, alias="evaluationType")
43
- __properties: ClassVar[List[str]] = ["evaluatorIdentifiers", "model", "evaluationTests", "operation", "llmModels", "useCache", "evaluatorsParameters", "modelParameters", "h2ogpteCollection", "defaultH2ogpteModel", "evaluationType"]
43
+ workflow: Optional[StrictStr] = Field(default=None, description="Optional. The resource name of the workflow to which the evaluation belongs to.")
44
+ __properties: ClassVar[List[str]] = ["evaluatorIdentifiers", "model", "evaluationTests", "operation", "llmModels", "useCache", "evaluatorsParameters", "modelParameters", "h2ogpteCollection", "defaultH2ogpteModel", "evaluationType", "workflow"]
44
45
 
45
46
  model_config = ConfigDict(
46
47
  populate_by_name=True,
@@ -116,7 +117,8 @@ class V1CreateEvaluationRequest(BaseModel):
116
117
  "modelParameters": obj.get("modelParameters"),
117
118
  "h2ogpteCollection": obj.get("h2ogpteCollection"),
118
119
  "defaultH2ogpteModel": V1Model.from_dict(obj["defaultH2ogpteModel"]) if obj.get("defaultH2ogpteModel") is not None else None,
119
- "evaluationType": obj.get("evaluationType")
120
+ "evaluationType": obj.get("evaluationType"),
121
+ "workflow": obj.get("workflow")
120
122
  }, strict=False)
121
123
  return _obj
122
124
 
@@ -0,0 +1,97 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from eval_studio_client.api.models.v1_model import V1Model
23
+ from eval_studio_client.api.models.v1_workflow import V1Workflow
24
+ from typing import Optional, Set
25
+ from typing_extensions import Self
26
+
27
+ class V1DeleteWorkflowCollectionsRequest(BaseModel):
28
+ """
29
+ V1DeleteWorkflowCollectionsRequest
30
+ """ # noqa: E501
31
+ model: Optional[V1Model] = None
32
+ workflow: Optional[V1Workflow] = None
33
+ __properties: ClassVar[List[str]] = ["model", "workflow"]
34
+
35
+ model_config = ConfigDict(
36
+ populate_by_name=True,
37
+ validate_assignment=True,
38
+ protected_namespaces=(),
39
+ )
40
+
41
+
42
+ def to_str(self) -> str:
43
+ """Returns the string representation of the model using alias"""
44
+ return pprint.pformat(self.model_dump(by_alias=True))
45
+
46
+ def to_json(self) -> str:
47
+ """Returns the JSON representation of the model using alias"""
48
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
49
+ return json.dumps(self.to_dict())
50
+
51
+ @classmethod
52
+ def from_json(cls, json_str: str) -> Optional[Self]:
53
+ """Create an instance of V1DeleteWorkflowCollectionsRequest from a JSON string"""
54
+ return cls.from_dict(json.loads(json_str))
55
+
56
+ def to_dict(self) -> Dict[str, Any]:
57
+ """Return the dictionary representation of the model using alias.
58
+
59
+ This has the following differences from calling pydantic's
60
+ `self.model_dump(by_alias=True)`:
61
+
62
+ * `None` is only added to the output dict for nullable fields that
63
+ were set at model initialization. Other fields with value `None`
64
+ are ignored.
65
+ """
66
+ excluded_fields: Set[str] = set([
67
+ ])
68
+
69
+ _dict = self.model_dump(
70
+ by_alias=True,
71
+ exclude=excluded_fields,
72
+ exclude_none=True,
73
+ )
74
+ # override the default output from pydantic by calling `to_dict()` of model
75
+ if self.model:
76
+ _dict['model'] = self.model.to_dict()
77
+ # override the default output from pydantic by calling `to_dict()` of workflow
78
+ if self.workflow:
79
+ _dict['workflow'] = self.workflow.to_dict()
80
+ return _dict
81
+
82
+ @classmethod
83
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
84
+ """Create an instance of V1DeleteWorkflowCollectionsRequest from a dict"""
85
+ if obj is None:
86
+ return None
87
+
88
+ if not isinstance(obj, dict):
89
+ return cls.model_validate(obj, strict=False)
90
+
91
+ _obj = cls.model_validate({
92
+ "model": V1Model.from_dict(obj["model"]) if obj.get("model") is not None else None,
93
+ "workflow": V1Workflow.from_dict(obj["workflow"]) if obj.get("workflow") is not None else None
94
+ }, strict=False)
95
+ return _obj
96
+
97
+
@@ -0,0 +1,87 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class V1DeleteWorkflowCollectionsResponse(BaseModel):
26
+ """
27
+ V1DeleteWorkflowCollectionsResponse
28
+ """ # noqa: E501
29
+ collections: Optional[List[StrictStr]] = Field(default=None, description="Output Only. The deleted collections ID.")
30
+ __properties: ClassVar[List[str]] = ["collections"]
31
+
32
+ model_config = ConfigDict(
33
+ populate_by_name=True,
34
+ validate_assignment=True,
35
+ protected_namespaces=(),
36
+ )
37
+
38
+
39
+ def to_str(self) -> str:
40
+ """Returns the string representation of the model using alias"""
41
+ return pprint.pformat(self.model_dump(by_alias=True))
42
+
43
+ def to_json(self) -> str:
44
+ """Returns the JSON representation of the model using alias"""
45
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
46
+ return json.dumps(self.to_dict())
47
+
48
+ @classmethod
49
+ def from_json(cls, json_str: str) -> Optional[Self]:
50
+ """Create an instance of V1DeleteWorkflowCollectionsResponse from a JSON string"""
51
+ return cls.from_dict(json.loads(json_str))
52
+
53
+ def to_dict(self) -> Dict[str, Any]:
54
+ """Return the dictionary representation of the model using alias.
55
+
56
+ This has the following differences from calling pydantic's
57
+ `self.model_dump(by_alias=True)`:
58
+
59
+ * `None` is only added to the output dict for nullable fields that
60
+ were set at model initialization. Other fields with value `None`
61
+ are ignored.
62
+ """
63
+ excluded_fields: Set[str] = set([
64
+ ])
65
+
66
+ _dict = self.model_dump(
67
+ by_alias=True,
68
+ exclude=excluded_fields,
69
+ exclude_none=True,
70
+ )
71
+ return _dict
72
+
73
+ @classmethod
74
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
75
+ """Create an instance of V1DeleteWorkflowCollectionsResponse from a dict"""
76
+ if obj is None:
77
+ return None
78
+
79
+ if not isinstance(obj, dict):
80
+ return cls.model_validate(obj, strict=False)
81
+
82
+ _obj = cls.model_validate({
83
+ "collections": obj.get("collections")
84
+ }, strict=False)
85
+ return _obj
86
+
87
+
@@ -21,6 +21,7 @@ from datetime import datetime
21
21
  from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator
22
22
  from typing import Any, ClassVar, Dict, List, Optional, Union
23
23
  from typing_extensions import Annotated
24
+ from eval_studio_client.api.models.v1_evaluation_type import V1EvaluationType
24
25
  from typing import Optional, Set
25
26
  from typing_extensions import Self
26
27
 
@@ -41,7 +42,8 @@ class V1TestLab(BaseModel):
41
42
  llm_models: Optional[List[StrictStr]] = Field(default=None, description="Immutable. List of LLM models used.", alias="llmModels")
42
43
  model_parameters: Optional[StrictStr] = Field(default=None, description="Immutable. Optional. Parameters overrides in JSON format.", alias="modelParameters")
43
44
  h2ogpte_collection: Optional[StrictStr] = Field(default=None, description="The existing collection name in H2OGPTe.", alias="h2ogpteCollection")
44
- __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "model", "testCases", "content", "llmModels", "modelParameters", "h2ogpteCollection"]
45
+ evaluation_type: Optional[V1EvaluationType] = Field(default=None, alias="evaluationType")
46
+ __properties: ClassVar[List[str]] = ["name", "createTime", "creator", "updateTime", "updater", "deleteTime", "deleter", "model", "testCases", "content", "llmModels", "modelParameters", "h2ogpteCollection", "evaluationType"]
45
47
 
46
48
  @field_validator('content')
47
49
  def content_validate_regular_expression(cls, value):
@@ -130,7 +132,8 @@ class V1TestLab(BaseModel):
130
132
  "content": obj.get("content"),
131
133
  "llmModels": obj.get("llmModels"),
132
134
  "modelParameters": obj.get("modelParameters"),
133
- "h2ogpteCollection": obj.get("h2ogpteCollection")
135
+ "h2ogpteCollection": obj.get("h2ogpteCollection"),
136
+ "evaluationType": obj.get("evaluationType")
134
137
  }, strict=False)
135
138
  return _obj
136
139
 
@@ -118,7 +118,32 @@ class TestAdversarialInputsServiceTestAdversarialInputsRobustnessRequest(unittes
118
118
  scores = [
119
119
  1.337
120
120
  ], )
121
- }
121
+ },
122
+ test_cases = [
123
+ eval_studio_client.api.models.v1_test_case.v1TestCase(
124
+ name = '',
125
+ create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
126
+ creator = '',
127
+ update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
128
+ updater = '',
129
+ delete_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
130
+ deleter = '',
131
+ parent = '',
132
+ prompt = '',
133
+ answer = '',
134
+ constraints = [
135
+ ''
136
+ ],
137
+ condition = '',
138
+ perturbed_by = [
139
+ ''
140
+ ],
141
+ topics = [
142
+ ''
143
+ ],
144
+ generator = 'TEST_CASES_GENERATOR_UNSPECIFIED', )
145
+ ],
146
+ workflow = ''
122
147
  )
123
148
  else:
124
149
  return AdversarialInputsServiceTestAdversarialInputsRobustnessRequest(
@@ -0,0 +1,38 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ import unittest
16
+
17
+ from eval_studio_client.api.api.h2_ogpte_collection_service_api import H2OGPTeCollectionServiceApi
18
+
19
+
20
+ class TestH2OGPTeCollectionServiceApi(unittest.TestCase):
21
+ """H2OGPTeCollectionServiceApi unit test stubs"""
22
+
23
+ def setUp(self) -> None:
24
+ self.api = H2OGPTeCollectionServiceApi()
25
+
26
+ def tearDown(self) -> None:
27
+ pass
28
+
29
+ def test_h2_ogpte_collection_service_delete_workflow_collections(self) -> None:
30
+ """Test case for h2_ogpte_collection_service_delete_workflow_collections
31
+
32
+ DeleteCollection deletes existing managed h2oGPTe collection on the h2oGPTe server.
33
+ """
34
+ pass
35
+
36
+
37
+ if __name__ == '__main__':
38
+ unittest.main()
@@ -75,7 +75,9 @@ class TestPromptGenerationServiceAutoGeneratePromptsRequest(unittest.TestCase):
75
75
  h2ogpte_collection_id = '',
76
76
  topics = [
77
77
  ''
78
- ]
78
+ ],
79
+ allow_duplicates = True,
80
+ workflow = ''
79
81
  )
80
82
  else:
81
83
  return PromptGenerationServiceAutoGeneratePromptsRequest(
@@ -56,7 +56,8 @@ class TestTestServiceGenerateTestCasesRequest(unittest.TestCase):
56
56
  pages = '',
57
57
  topic = '',
58
58
  topic_id = 56, )
59
- ]
59
+ ],
60
+ allow_duplicates = True
60
61
  )
61
62
  else:
62
63
  return TestServiceGenerateTestCasesRequest(
@@ -114,7 +114,8 @@ class TestV1CreateEvaluationRequest(unittest.TestCase):
114
114
  type = 'MODEL_TYPE_UNSPECIFIED',
115
115
  parameters = '',
116
116
  demo = True, ),
117
- evaluation_type = 'EVALUATION_TYPE_UNSPECIFIED'
117
+ evaluation_type = 'EVALUATION_TYPE_UNSPECIFIED',
118
+ workflow = ''
118
119
  )
119
120
  else:
120
121
  return V1CreateEvaluationRequest(
@@ -52,7 +52,8 @@ class TestV1CreateTestLabResponse(unittest.TestCase):
52
52
  ''
53
53
  ],
54
54
  model_parameters = '',
55
- h2ogpte_collection = '', )
55
+ h2ogpte_collection = '',
56
+ evaluation_type = 'EVALUATION_TYPE_UNSPECIFIED', )
56
57
  )
57
58
  else:
58
59
  return V1CreateTestLabResponse(
@@ -0,0 +1,108 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ import unittest
16
+
17
+ from eval_studio_client.api.models.v1_delete_workflow_collections_request import V1DeleteWorkflowCollectionsRequest
18
+
19
+ class TestV1DeleteWorkflowCollectionsRequest(unittest.TestCase):
20
+ """V1DeleteWorkflowCollectionsRequest unit test stubs"""
21
+
22
+ def setUp(self):
23
+ pass
24
+
25
+ def tearDown(self):
26
+ pass
27
+
28
+ def make_instance(self, include_optional) -> V1DeleteWorkflowCollectionsRequest:
29
+ """Test V1DeleteWorkflowCollectionsRequest
30
+ include_option is a boolean, when False only required
31
+ params are included, when True both required and
32
+ optional params are included """
33
+ # uncomment below to create an instance of `V1DeleteWorkflowCollectionsRequest`
34
+ """
35
+ model = V1DeleteWorkflowCollectionsRequest()
36
+ if include_optional:
37
+ return V1DeleteWorkflowCollectionsRequest(
38
+ model = eval_studio_client.api.models.v1_model.v1Model(
39
+ name = '',
40
+ create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
41
+ creator = '',
42
+ update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
43
+ updater = '',
44
+ delete_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
45
+ deleter = '',
46
+ display_name = '',
47
+ description = '',
48
+ url = '',
49
+ api_key = '',
50
+ type = 'MODEL_TYPE_UNSPECIFIED',
51
+ parameters = '',
52
+ demo = True, ),
53
+ workflow = eval_studio_client.api.models.v1_workflow.v1Workflow(
54
+ name = '',
55
+ display_name = '',
56
+ description = '',
57
+ create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
58
+ creator = '',
59
+ update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
60
+ updater = '',
61
+ delete_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
62
+ deleter = '',
63
+ type = 'WORKFLOW_TYPE_UNSPECIFIED',
64
+ model = '',
65
+ nodes = [
66
+ ''
67
+ ],
68
+ edges = [
69
+ ''
70
+ ],
71
+ outputs = {
72
+ 'key' : None
73
+ },
74
+ output_artifacts = {
75
+ 'key' : eval_studio_client.api.models.v1_workflow_node_artifacts.v1WorkflowNodeArtifacts(
76
+ artifacts = [
77
+ eval_studio_client.api.models.v1_workflow_node_artifact.v1WorkflowNodeArtifact(
78
+ name = '',
79
+ parent = '',
80
+ display_name = '',
81
+ description = '',
82
+ create_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
83
+ creator = '',
84
+ update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
85
+ updater = '',
86
+ delete_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
87
+ deleter = '',
88
+ mime_type = '', )
89
+ ], )
90
+ },
91
+ llm_model = '',
92
+ model_parameters = '',
93
+ document = '',
94
+ h2ogpte_collection = '',
95
+ cloned_from_workflow = '', )
96
+ )
97
+ else:
98
+ return V1DeleteWorkflowCollectionsRequest(
99
+ )
100
+ """
101
+
102
+ def testV1DeleteWorkflowCollectionsRequest(self):
103
+ """Test V1DeleteWorkflowCollectionsRequest"""
104
+ # inst_req_only = self.make_instance(include_optional=False)
105
+ # inst_req_and_optional = self.make_instance(include_optional=True)
106
+
107
+ if __name__ == '__main__':
108
+ unittest.main()
@@ -0,0 +1,53 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ ai/h2o/eval_studio/v1/insight.proto
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: version not set
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ import unittest
16
+
17
+ from eval_studio_client.api.models.v1_delete_workflow_collections_response import V1DeleteWorkflowCollectionsResponse
18
+
19
+ class TestV1DeleteWorkflowCollectionsResponse(unittest.TestCase):
20
+ """V1DeleteWorkflowCollectionsResponse unit test stubs"""
21
+
22
+ def setUp(self):
23
+ pass
24
+
25
+ def tearDown(self):
26
+ pass
27
+
28
+ def make_instance(self, include_optional) -> V1DeleteWorkflowCollectionsResponse:
29
+ """Test V1DeleteWorkflowCollectionsResponse
30
+ include_option is a boolean, when False only required
31
+ params are included, when True both required and
32
+ optional params are included """
33
+ # uncomment below to create an instance of `V1DeleteWorkflowCollectionsResponse`
34
+ """
35
+ model = V1DeleteWorkflowCollectionsResponse()
36
+ if include_optional:
37
+ return V1DeleteWorkflowCollectionsResponse(
38
+ collections = [
39
+ ''
40
+ ]
41
+ )
42
+ else:
43
+ return V1DeleteWorkflowCollectionsResponse(
44
+ )
45
+ """
46
+
47
+ def testV1DeleteWorkflowCollectionsResponse(self):
48
+ """Test V1DeleteWorkflowCollectionsResponse"""
49
+ # inst_req_only = self.make_instance(include_optional=False)
50
+ # inst_req_and_optional = self.make_instance(include_optional=True)
51
+
52
+ if __name__ == '__main__':
53
+ unittest.main()
@@ -52,7 +52,8 @@ class TestV1FindTestLabResponse(unittest.TestCase):
52
52
  ''
53
53
  ],
54
54
  model_parameters = '',
55
- h2ogpte_collection = '', )
55
+ h2ogpte_collection = '',
56
+ evaluation_type = 'EVALUATION_TYPE_UNSPECIFIED', )
56
57
  )
57
58
  else:
58
59
  return V1FindTestLabResponse(
@@ -51,7 +51,8 @@ class TestV1TestLab(unittest.TestCase):
51
51
  ''
52
52
  ],
53
53
  model_parameters = '',
54
- h2ogpte_collection = ''
54
+ h2ogpte_collection = '',
55
+ evaluation_type = 'EVALUATION_TYPE_UNSPECIFIED'
55
56
  )
56
57
  else:
57
58
  return V1TestLab(