agenta 0.27.0a9__py3-none-any.whl → 0.27.0a13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agenta might be problematic. Click here for more details.
- agenta/__init__.py +21 -3
- agenta/client/backend/__init__.py +14 -0
- agenta/client/backend/apps/client.py +28 -20
- agenta/client/backend/client.py +25 -2
- agenta/client/backend/containers/client.py +5 -1
- agenta/client/backend/core/__init__.py +2 -1
- agenta/client/backend/core/client_wrapper.py +6 -6
- agenta/client/backend/core/file.py +33 -11
- agenta/client/backend/core/http_client.py +24 -18
- agenta/client/backend/core/pydantic_utilities.py +144 -29
- agenta/client/backend/core/request_options.py +3 -0
- agenta/client/backend/core/serialization.py +139 -42
- agenta/client/backend/evaluations/client.py +7 -2
- agenta/client/backend/evaluators/client.py +349 -1
- agenta/client/backend/observability/client.py +11 -2
- agenta/client/backend/testsets/client.py +10 -10
- agenta/client/backend/types/__init__.py +14 -0
- agenta/client/backend/types/app.py +1 -0
- agenta/client/backend/types/app_variant_response.py +3 -1
- agenta/client/backend/types/config_dto.py +32 -0
- agenta/client/backend/types/config_response_model.py +32 -0
- agenta/client/backend/types/create_span.py +3 -2
- agenta/client/backend/types/environment_output.py +1 -0
- agenta/client/backend/types/environment_output_extended.py +1 -0
- agenta/client/backend/types/evaluation.py +1 -2
- agenta/client/backend/types/evaluator.py +2 -0
- agenta/client/backend/types/evaluator_config.py +1 -0
- agenta/client/backend/types/evaluator_mapping_output_interface.py +21 -0
- agenta/client/backend/types/evaluator_output_interface.py +21 -0
- agenta/client/backend/types/human_evaluation.py +1 -2
- agenta/client/backend/types/lifecycle_dto.py +24 -0
- agenta/client/backend/types/llm_tokens.py +2 -2
- agenta/client/backend/types/reference_dto.py +23 -0
- agenta/client/backend/types/reference_request_model.py +23 -0
- agenta/client/backend/types/span.py +1 -0
- agenta/client/backend/types/span_detail.py +7 -1
- agenta/client/backend/types/test_set_output_response.py +5 -2
- agenta/client/backend/types/trace_detail.py +7 -1
- agenta/client/backend/types/with_pagination.py +4 -2
- agenta/client/backend/variants/client.py +1565 -272
- agenta/docker/docker-assets/Dockerfile.cloud.template +1 -1
- agenta/sdk/__init__.py +19 -5
- agenta/sdk/agenta_init.py +21 -7
- agenta/sdk/context/routing.py +6 -5
- agenta/sdk/decorators/routing.py +16 -5
- agenta/sdk/decorators/tracing.py +16 -9
- agenta/sdk/litellm/litellm.py +47 -36
- agenta/sdk/managers/__init__.py +6 -0
- agenta/sdk/managers/config.py +318 -0
- agenta/sdk/managers/deployment.py +45 -0
- agenta/sdk/managers/shared.py +639 -0
- agenta/sdk/managers/variant.py +182 -0
- agenta/sdk/tracing/exporters.py +0 -1
- agenta/sdk/tracing/inline.py +46 -1
- agenta/sdk/tracing/processors.py +0 -1
- agenta/sdk/types.py +47 -2
- agenta/sdk/utils/exceptions.py +31 -1
- {agenta-0.27.0a9.dist-info → agenta-0.27.0a13.dist-info}/METADATA +1 -1
- {agenta-0.27.0a9.dist-info → agenta-0.27.0a13.dist-info}/RECORD +61 -50
- agenta/sdk/config_manager.py +0 -205
- {agenta-0.27.0a9.dist-info → agenta-0.27.0a13.dist-info}/WHEEL +0 -0
- {agenta-0.27.0a9.dist-info → agenta-0.27.0a13.dist-info}/entry_points.txt +0 -0
|
@@ -7,10 +7,12 @@ from ..types.evaluator import Evaluator
|
|
|
7
7
|
from ..core.pydantic_utilities import parse_obj_as
|
|
8
8
|
from json.decoder import JSONDecodeError
|
|
9
9
|
from ..core.api_error import ApiError
|
|
10
|
-
from ..types.
|
|
10
|
+
from ..types.evaluator_mapping_output_interface import EvaluatorMappingOutputInterface
|
|
11
11
|
from ..errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
12
|
from ..types.http_validation_error import HttpValidationError
|
|
13
|
+
from ..types.evaluator_output_interface import EvaluatorOutputInterface
|
|
13
14
|
from ..core.jsonable_encoder import jsonable_encoder
|
|
15
|
+
from ..types.evaluator_config import EvaluatorConfig
|
|
14
16
|
from ..core.client_wrapper import AsyncClientWrapper
|
|
15
17
|
|
|
16
18
|
# this is used as the default value for optional parameters
|
|
@@ -69,6 +71,171 @@ class EvaluatorsClient:
|
|
|
69
71
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
70
72
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
71
73
|
|
|
74
|
+
def evaluator_data_map(
|
|
75
|
+
self,
|
|
76
|
+
*,
|
|
77
|
+
inputs: typing.Dict[str, typing.Optional[typing.Any]],
|
|
78
|
+
mapping: typing.Dict[str, typing.Optional[typing.Any]],
|
|
79
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
80
|
+
) -> EvaluatorMappingOutputInterface:
|
|
81
|
+
"""
|
|
82
|
+
Endpoint to map the experiment data tree to evaluator interface.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
request (Request): The request object.
|
|
86
|
+
payload (EvaluatorMappingInputInterface): The payload containing the request data.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
EvaluatorMappingOutputInterface: the evaluator mapping output object
|
|
90
|
+
|
|
91
|
+
Parameters
|
|
92
|
+
----------
|
|
93
|
+
inputs : typing.Dict[str, typing.Optional[typing.Any]]
|
|
94
|
+
|
|
95
|
+
mapping : typing.Dict[str, typing.Optional[typing.Any]]
|
|
96
|
+
|
|
97
|
+
request_options : typing.Optional[RequestOptions]
|
|
98
|
+
Request-specific configuration.
|
|
99
|
+
|
|
100
|
+
Returns
|
|
101
|
+
-------
|
|
102
|
+
EvaluatorMappingOutputInterface
|
|
103
|
+
Successful Response
|
|
104
|
+
|
|
105
|
+
Examples
|
|
106
|
+
--------
|
|
107
|
+
from agenta import AgentaApi
|
|
108
|
+
|
|
109
|
+
client = AgentaApi(
|
|
110
|
+
api_key="YOUR_API_KEY",
|
|
111
|
+
base_url="https://yourhost.com/path/to/api",
|
|
112
|
+
)
|
|
113
|
+
client.evaluators.evaluator_data_map(
|
|
114
|
+
inputs={"key": "value"},
|
|
115
|
+
mapping={"key": "value"},
|
|
116
|
+
)
|
|
117
|
+
"""
|
|
118
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
119
|
+
"evaluators/map",
|
|
120
|
+
method="POST",
|
|
121
|
+
json={
|
|
122
|
+
"inputs": inputs,
|
|
123
|
+
"mapping": mapping,
|
|
124
|
+
},
|
|
125
|
+
request_options=request_options,
|
|
126
|
+
omit=OMIT,
|
|
127
|
+
)
|
|
128
|
+
try:
|
|
129
|
+
if 200 <= _response.status_code < 300:
|
|
130
|
+
return typing.cast(
|
|
131
|
+
EvaluatorMappingOutputInterface,
|
|
132
|
+
parse_obj_as(
|
|
133
|
+
type_=EvaluatorMappingOutputInterface, # type: ignore
|
|
134
|
+
object_=_response.json(),
|
|
135
|
+
),
|
|
136
|
+
)
|
|
137
|
+
if _response.status_code == 422:
|
|
138
|
+
raise UnprocessableEntityError(
|
|
139
|
+
typing.cast(
|
|
140
|
+
HttpValidationError,
|
|
141
|
+
parse_obj_as(
|
|
142
|
+
type_=HttpValidationError, # type: ignore
|
|
143
|
+
object_=_response.json(),
|
|
144
|
+
),
|
|
145
|
+
)
|
|
146
|
+
)
|
|
147
|
+
_response_json = _response.json()
|
|
148
|
+
except JSONDecodeError:
|
|
149
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
150
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
151
|
+
|
|
152
|
+
def evaluator_run(
|
|
153
|
+
self,
|
|
154
|
+
evaluator_key: str,
|
|
155
|
+
*,
|
|
156
|
+
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
|
157
|
+
settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
|
158
|
+
credentials: typing.Optional[
|
|
159
|
+
typing.Dict[str, typing.Optional[typing.Any]]
|
|
160
|
+
] = OMIT,
|
|
161
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
162
|
+
) -> EvaluatorOutputInterface:
|
|
163
|
+
"""
|
|
164
|
+
Endpoint to evaluate LLM app run
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
request (Request): The request object.
|
|
168
|
+
evaluator_key (str): The key of the evaluator.
|
|
169
|
+
payload (EvaluatorInputInterface): The payload containing the request data.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
result: EvaluatorOutputInterface object containing the outputs.
|
|
173
|
+
|
|
174
|
+
Parameters
|
|
175
|
+
----------
|
|
176
|
+
evaluator_key : str
|
|
177
|
+
|
|
178
|
+
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
179
|
+
|
|
180
|
+
settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
181
|
+
|
|
182
|
+
credentials : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
183
|
+
|
|
184
|
+
request_options : typing.Optional[RequestOptions]
|
|
185
|
+
Request-specific configuration.
|
|
186
|
+
|
|
187
|
+
Returns
|
|
188
|
+
-------
|
|
189
|
+
EvaluatorOutputInterface
|
|
190
|
+
Successful Response
|
|
191
|
+
|
|
192
|
+
Examples
|
|
193
|
+
--------
|
|
194
|
+
from agenta import AgentaApi
|
|
195
|
+
|
|
196
|
+
client = AgentaApi(
|
|
197
|
+
api_key="YOUR_API_KEY",
|
|
198
|
+
base_url="https://yourhost.com/path/to/api",
|
|
199
|
+
)
|
|
200
|
+
client.evaluators.evaluator_run(
|
|
201
|
+
evaluator_key="evaluator_key",
|
|
202
|
+
)
|
|
203
|
+
"""
|
|
204
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
205
|
+
f"evaluators/{jsonable_encoder(evaluator_key)}/run",
|
|
206
|
+
method="POST",
|
|
207
|
+
json={
|
|
208
|
+
"inputs": inputs,
|
|
209
|
+
"settings": settings,
|
|
210
|
+
"credentials": credentials,
|
|
211
|
+
},
|
|
212
|
+
request_options=request_options,
|
|
213
|
+
omit=OMIT,
|
|
214
|
+
)
|
|
215
|
+
try:
|
|
216
|
+
if 200 <= _response.status_code < 300:
|
|
217
|
+
return typing.cast(
|
|
218
|
+
EvaluatorOutputInterface,
|
|
219
|
+
parse_obj_as(
|
|
220
|
+
type_=EvaluatorOutputInterface, # type: ignore
|
|
221
|
+
object_=_response.json(),
|
|
222
|
+
),
|
|
223
|
+
)
|
|
224
|
+
if _response.status_code == 422:
|
|
225
|
+
raise UnprocessableEntityError(
|
|
226
|
+
typing.cast(
|
|
227
|
+
HttpValidationError,
|
|
228
|
+
parse_obj_as(
|
|
229
|
+
type_=HttpValidationError, # type: ignore
|
|
230
|
+
object_=_response.json(),
|
|
231
|
+
),
|
|
232
|
+
)
|
|
233
|
+
)
|
|
234
|
+
_response_json = _response.json()
|
|
235
|
+
except JSONDecodeError:
|
|
236
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
237
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
238
|
+
|
|
72
239
|
def get_evaluator_configs(
|
|
73
240
|
self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None
|
|
74
241
|
) -> typing.List[EvaluatorConfig]:
|
|
@@ -500,6 +667,187 @@ class AsyncEvaluatorsClient:
|
|
|
500
667
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
501
668
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
502
669
|
|
|
670
|
+
async def evaluator_data_map(
|
|
671
|
+
self,
|
|
672
|
+
*,
|
|
673
|
+
inputs: typing.Dict[str, typing.Optional[typing.Any]],
|
|
674
|
+
mapping: typing.Dict[str, typing.Optional[typing.Any]],
|
|
675
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
676
|
+
) -> EvaluatorMappingOutputInterface:
|
|
677
|
+
"""
|
|
678
|
+
Endpoint to map the experiment data tree to evaluator interface.
|
|
679
|
+
|
|
680
|
+
Args:
|
|
681
|
+
request (Request): The request object.
|
|
682
|
+
payload (EvaluatorMappingInputInterface): The payload containing the request data.
|
|
683
|
+
|
|
684
|
+
Returns:
|
|
685
|
+
EvaluatorMappingOutputInterface: the evaluator mapping output object
|
|
686
|
+
|
|
687
|
+
Parameters
|
|
688
|
+
----------
|
|
689
|
+
inputs : typing.Dict[str, typing.Optional[typing.Any]]
|
|
690
|
+
|
|
691
|
+
mapping : typing.Dict[str, typing.Optional[typing.Any]]
|
|
692
|
+
|
|
693
|
+
request_options : typing.Optional[RequestOptions]
|
|
694
|
+
Request-specific configuration.
|
|
695
|
+
|
|
696
|
+
Returns
|
|
697
|
+
-------
|
|
698
|
+
EvaluatorMappingOutputInterface
|
|
699
|
+
Successful Response
|
|
700
|
+
|
|
701
|
+
Examples
|
|
702
|
+
--------
|
|
703
|
+
import asyncio
|
|
704
|
+
|
|
705
|
+
from agenta import AsyncAgentaApi
|
|
706
|
+
|
|
707
|
+
client = AsyncAgentaApi(
|
|
708
|
+
api_key="YOUR_API_KEY",
|
|
709
|
+
base_url="https://yourhost.com/path/to/api",
|
|
710
|
+
)
|
|
711
|
+
|
|
712
|
+
|
|
713
|
+
async def main() -> None:
|
|
714
|
+
await client.evaluators.evaluator_data_map(
|
|
715
|
+
inputs={"key": "value"},
|
|
716
|
+
mapping={"key": "value"},
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
|
|
720
|
+
asyncio.run(main())
|
|
721
|
+
"""
|
|
722
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
723
|
+
"evaluators/map",
|
|
724
|
+
method="POST",
|
|
725
|
+
json={
|
|
726
|
+
"inputs": inputs,
|
|
727
|
+
"mapping": mapping,
|
|
728
|
+
},
|
|
729
|
+
request_options=request_options,
|
|
730
|
+
omit=OMIT,
|
|
731
|
+
)
|
|
732
|
+
try:
|
|
733
|
+
if 200 <= _response.status_code < 300:
|
|
734
|
+
return typing.cast(
|
|
735
|
+
EvaluatorMappingOutputInterface,
|
|
736
|
+
parse_obj_as(
|
|
737
|
+
type_=EvaluatorMappingOutputInterface, # type: ignore
|
|
738
|
+
object_=_response.json(),
|
|
739
|
+
),
|
|
740
|
+
)
|
|
741
|
+
if _response.status_code == 422:
|
|
742
|
+
raise UnprocessableEntityError(
|
|
743
|
+
typing.cast(
|
|
744
|
+
HttpValidationError,
|
|
745
|
+
parse_obj_as(
|
|
746
|
+
type_=HttpValidationError, # type: ignore
|
|
747
|
+
object_=_response.json(),
|
|
748
|
+
),
|
|
749
|
+
)
|
|
750
|
+
)
|
|
751
|
+
_response_json = _response.json()
|
|
752
|
+
except JSONDecodeError:
|
|
753
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
754
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
755
|
+
|
|
756
|
+
async def evaluator_run(
|
|
757
|
+
self,
|
|
758
|
+
evaluator_key: str,
|
|
759
|
+
*,
|
|
760
|
+
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
|
761
|
+
settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
|
762
|
+
credentials: typing.Optional[
|
|
763
|
+
typing.Dict[str, typing.Optional[typing.Any]]
|
|
764
|
+
] = OMIT,
|
|
765
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
766
|
+
) -> EvaluatorOutputInterface:
|
|
767
|
+
"""
|
|
768
|
+
Endpoint to evaluate LLM app run
|
|
769
|
+
|
|
770
|
+
Args:
|
|
771
|
+
request (Request): The request object.
|
|
772
|
+
evaluator_key (str): The key of the evaluator.
|
|
773
|
+
payload (EvaluatorInputInterface): The payload containing the request data.
|
|
774
|
+
|
|
775
|
+
Returns:
|
|
776
|
+
result: EvaluatorOutputInterface object containing the outputs.
|
|
777
|
+
|
|
778
|
+
Parameters
|
|
779
|
+
----------
|
|
780
|
+
evaluator_key : str
|
|
781
|
+
|
|
782
|
+
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
783
|
+
|
|
784
|
+
settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
785
|
+
|
|
786
|
+
credentials : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
|
787
|
+
|
|
788
|
+
request_options : typing.Optional[RequestOptions]
|
|
789
|
+
Request-specific configuration.
|
|
790
|
+
|
|
791
|
+
Returns
|
|
792
|
+
-------
|
|
793
|
+
EvaluatorOutputInterface
|
|
794
|
+
Successful Response
|
|
795
|
+
|
|
796
|
+
Examples
|
|
797
|
+
--------
|
|
798
|
+
import asyncio
|
|
799
|
+
|
|
800
|
+
from agenta import AsyncAgentaApi
|
|
801
|
+
|
|
802
|
+
client = AsyncAgentaApi(
|
|
803
|
+
api_key="YOUR_API_KEY",
|
|
804
|
+
base_url="https://yourhost.com/path/to/api",
|
|
805
|
+
)
|
|
806
|
+
|
|
807
|
+
|
|
808
|
+
async def main() -> None:
|
|
809
|
+
await client.evaluators.evaluator_run(
|
|
810
|
+
evaluator_key="evaluator_key",
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
|
|
814
|
+
asyncio.run(main())
|
|
815
|
+
"""
|
|
816
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
817
|
+
f"evaluators/{jsonable_encoder(evaluator_key)}/run",
|
|
818
|
+
method="POST",
|
|
819
|
+
json={
|
|
820
|
+
"inputs": inputs,
|
|
821
|
+
"settings": settings,
|
|
822
|
+
"credentials": credentials,
|
|
823
|
+
},
|
|
824
|
+
request_options=request_options,
|
|
825
|
+
omit=OMIT,
|
|
826
|
+
)
|
|
827
|
+
try:
|
|
828
|
+
if 200 <= _response.status_code < 300:
|
|
829
|
+
return typing.cast(
|
|
830
|
+
EvaluatorOutputInterface,
|
|
831
|
+
parse_obj_as(
|
|
832
|
+
type_=EvaluatorOutputInterface, # type: ignore
|
|
833
|
+
object_=_response.json(),
|
|
834
|
+
),
|
|
835
|
+
)
|
|
836
|
+
if _response.status_code == 422:
|
|
837
|
+
raise UnprocessableEntityError(
|
|
838
|
+
typing.cast(
|
|
839
|
+
HttpValidationError,
|
|
840
|
+
parse_obj_as(
|
|
841
|
+
type_=HttpValidationError, # type: ignore
|
|
842
|
+
object_=_response.json(),
|
|
843
|
+
),
|
|
844
|
+
)
|
|
845
|
+
)
|
|
846
|
+
_response_json = _response.json()
|
|
847
|
+
except JSONDecodeError:
|
|
848
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
849
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
850
|
+
|
|
503
851
|
async def get_evaluator_configs(
|
|
504
852
|
self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None
|
|
505
853
|
) -> typing.List[EvaluatorConfig]:
|
|
@@ -10,6 +10,7 @@ from json.decoder import JSONDecodeError
|
|
|
10
10
|
from ..core.api_error import ApiError
|
|
11
11
|
from ..types.create_span import CreateSpan
|
|
12
12
|
from ..types.create_trace_response import CreateTraceResponse
|
|
13
|
+
from ..core.serialization import convert_and_respect_annotation_metadata
|
|
13
14
|
from ..types.with_pagination import WithPagination
|
|
14
15
|
from ..types.trace_detail import TraceDetail
|
|
15
16
|
from ..core.jsonable_encoder import jsonable_encoder
|
|
@@ -155,7 +156,11 @@ class ObservabilityClient:
|
|
|
155
156
|
method="POST",
|
|
156
157
|
json={
|
|
157
158
|
"trace": trace,
|
|
158
|
-
"spans":
|
|
159
|
+
"spans": convert_and_respect_annotation_metadata(
|
|
160
|
+
object_=spans,
|
|
161
|
+
annotation=typing.Sequence[CreateSpan],
|
|
162
|
+
direction="write",
|
|
163
|
+
),
|
|
159
164
|
},
|
|
160
165
|
request_options=request_options,
|
|
161
166
|
omit=OMIT,
|
|
@@ -764,7 +769,11 @@ class AsyncObservabilityClient:
|
|
|
764
769
|
method="POST",
|
|
765
770
|
json={
|
|
766
771
|
"trace": trace,
|
|
767
|
-
"spans":
|
|
772
|
+
"spans": convert_and_respect_annotation_metadata(
|
|
773
|
+
object_=spans,
|
|
774
|
+
annotation=typing.Sequence[CreateSpan],
|
|
775
|
+
direction="write",
|
|
776
|
+
),
|
|
768
777
|
},
|
|
769
778
|
request_options=request_options,
|
|
770
779
|
omit=OMIT,
|
|
@@ -26,9 +26,9 @@ class TestsetsClient:
|
|
|
26
26
|
self,
|
|
27
27
|
*,
|
|
28
28
|
file: core.File,
|
|
29
|
-
upload_type: typing.Optional[str] =
|
|
30
|
-
testset_name: typing.Optional[str] =
|
|
31
|
-
app_id: typing.Optional[str] =
|
|
29
|
+
upload_type: typing.Optional[str] = OMIT,
|
|
30
|
+
testset_name: typing.Optional[str] = OMIT,
|
|
31
|
+
app_id: typing.Optional[str] = OMIT,
|
|
32
32
|
request_options: typing.Optional[RequestOptions] = None,
|
|
33
33
|
) -> TestSetSimpleResponse:
|
|
34
34
|
"""
|
|
@@ -261,10 +261,10 @@ class TestsetsClient:
|
|
|
261
261
|
request_options: typing.Optional[RequestOptions] = None,
|
|
262
262
|
) -> typing.Optional[typing.Any]:
|
|
263
263
|
"""
|
|
264
|
-
Fetch a specific testset in a MongoDB collection using its
|
|
264
|
+
Fetch a specific testset in a MongoDB collection using its \_id.
|
|
265
265
|
|
|
266
266
|
Args:
|
|
267
|
-
testset_id (str): The
|
|
267
|
+
testset_id (str): The \_id of the testset to fetch.
|
|
268
268
|
|
|
269
269
|
Returns:
|
|
270
270
|
The requested testset if found, else an HTTPException.
|
|
@@ -555,9 +555,9 @@ class AsyncTestsetsClient:
|
|
|
555
555
|
self,
|
|
556
556
|
*,
|
|
557
557
|
file: core.File,
|
|
558
|
-
upload_type: typing.Optional[str] =
|
|
559
|
-
testset_name: typing.Optional[str] =
|
|
560
|
-
app_id: typing.Optional[str] =
|
|
558
|
+
upload_type: typing.Optional[str] = OMIT,
|
|
559
|
+
testset_name: typing.Optional[str] = OMIT,
|
|
560
|
+
app_id: typing.Optional[str] = OMIT,
|
|
561
561
|
request_options: typing.Optional[RequestOptions] = None,
|
|
562
562
|
) -> TestSetSimpleResponse:
|
|
563
563
|
"""
|
|
@@ -814,10 +814,10 @@ class AsyncTestsetsClient:
|
|
|
814
814
|
request_options: typing.Optional[RequestOptions] = None,
|
|
815
815
|
) -> typing.Optional[typing.Any]:
|
|
816
816
|
"""
|
|
817
|
-
Fetch a specific testset in a MongoDB collection using its
|
|
817
|
+
Fetch a specific testset in a MongoDB collection using its \_id.
|
|
818
818
|
|
|
819
819
|
Args:
|
|
820
|
-
testset_id (str): The
|
|
820
|
+
testset_id (str): The \_id of the testset to fetch.
|
|
821
821
|
|
|
822
822
|
Returns:
|
|
823
823
|
The requested testset if found, else an HTTPException.
|
|
@@ -8,6 +8,8 @@ from .app_variant_revision import AppVariantRevision
|
|
|
8
8
|
from .base_output import BaseOutput
|
|
9
9
|
from .body_import_testset import BodyImportTestset
|
|
10
10
|
from .config_db import ConfigDb
|
|
11
|
+
from .config_dto import ConfigDto
|
|
12
|
+
from .config_response_model import ConfigResponseModel
|
|
11
13
|
from .correct_answer import CorrectAnswer
|
|
12
14
|
from .create_app_output import CreateAppOutput
|
|
13
15
|
from .create_span import CreateSpan
|
|
@@ -27,6 +29,8 @@ from .evaluation_status_enum import EvaluationStatusEnum
|
|
|
27
29
|
from .evaluation_type import EvaluationType
|
|
28
30
|
from .evaluator import Evaluator
|
|
29
31
|
from .evaluator_config import EvaluatorConfig
|
|
32
|
+
from .evaluator_mapping_output_interface import EvaluatorMappingOutputInterface
|
|
33
|
+
from .evaluator_output_interface import EvaluatorOutputInterface
|
|
30
34
|
from .get_config_response import GetConfigResponse
|
|
31
35
|
from .http_validation_error import HttpValidationError
|
|
32
36
|
from .human_evaluation import HumanEvaluation
|
|
@@ -37,6 +41,7 @@ from .human_evaluation_scenario_update import HumanEvaluationScenarioUpdate
|
|
|
37
41
|
from .human_evaluation_update import HumanEvaluationUpdate
|
|
38
42
|
from .image import Image
|
|
39
43
|
from .invite_request import InviteRequest
|
|
44
|
+
from .lifecycle_dto import LifecycleDto
|
|
40
45
|
from .list_api_keys_response import ListApiKeysResponse
|
|
41
46
|
from .llm_run_rate_limit import LlmRunRateLimit
|
|
42
47
|
from .llm_tokens import LlmTokens
|
|
@@ -47,6 +52,8 @@ from .organization import Organization
|
|
|
47
52
|
from .organization_output import OrganizationOutput
|
|
48
53
|
from .outputs import Outputs
|
|
49
54
|
from .permission import Permission
|
|
55
|
+
from .reference_dto import ReferenceDto
|
|
56
|
+
from .reference_request_model import ReferenceRequestModel
|
|
50
57
|
from .result import Result
|
|
51
58
|
from .score import Score
|
|
52
59
|
from .simple_evaluation_output import SimpleEvaluationOutput
|
|
@@ -81,6 +88,8 @@ __all__ = [
|
|
|
81
88
|
"BaseOutput",
|
|
82
89
|
"BodyImportTestset",
|
|
83
90
|
"ConfigDb",
|
|
91
|
+
"ConfigDto",
|
|
92
|
+
"ConfigResponseModel",
|
|
84
93
|
"CorrectAnswer",
|
|
85
94
|
"CreateAppOutput",
|
|
86
95
|
"CreateSpan",
|
|
@@ -100,6 +109,8 @@ __all__ = [
|
|
|
100
109
|
"EvaluationType",
|
|
101
110
|
"Evaluator",
|
|
102
111
|
"EvaluatorConfig",
|
|
112
|
+
"EvaluatorMappingOutputInterface",
|
|
113
|
+
"EvaluatorOutputInterface",
|
|
103
114
|
"GetConfigResponse",
|
|
104
115
|
"HttpValidationError",
|
|
105
116
|
"HumanEvaluation",
|
|
@@ -110,6 +121,7 @@ __all__ = [
|
|
|
110
121
|
"HumanEvaluationUpdate",
|
|
111
122
|
"Image",
|
|
112
123
|
"InviteRequest",
|
|
124
|
+
"LifecycleDto",
|
|
113
125
|
"ListApiKeysResponse",
|
|
114
126
|
"LlmRunRateLimit",
|
|
115
127
|
"LlmTokens",
|
|
@@ -120,6 +132,8 @@ __all__ = [
|
|
|
120
132
|
"OrganizationOutput",
|
|
121
133
|
"Outputs",
|
|
122
134
|
"Permission",
|
|
135
|
+
"ReferenceDto",
|
|
136
|
+
"ReferenceRequestModel",
|
|
123
137
|
"Result",
|
|
124
138
|
"Score",
|
|
125
139
|
"SimpleEvaluationOutput",
|
|
@@ -11,8 +11,8 @@ class AppVariantResponse(UniversalBaseModel):
|
|
|
11
11
|
app_name: str
|
|
12
12
|
variant_id: str
|
|
13
13
|
variant_name: str
|
|
14
|
-
parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
|
|
15
14
|
project_id: str
|
|
15
|
+
parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
|
|
16
16
|
base_name: str
|
|
17
17
|
base_id: str
|
|
18
18
|
config_name: str
|
|
@@ -21,6 +21,8 @@ class AppVariantResponse(UniversalBaseModel):
|
|
|
21
21
|
created_at: typing.Optional[str] = None
|
|
22
22
|
updated_at: typing.Optional[str] = None
|
|
23
23
|
modified_by_id: typing.Optional[str] = None
|
|
24
|
+
organization_id: typing.Optional[str] = None
|
|
25
|
+
workspace_id: typing.Optional[str] = None
|
|
24
26
|
|
|
25
27
|
if IS_PYDANTIC_V2:
|
|
26
28
|
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
from ..core.pydantic_utilities import UniversalBaseModel
|
|
4
|
+
import typing
|
|
5
|
+
from .reference_dto import ReferenceDto
|
|
6
|
+
from .lifecycle_dto import LifecycleDto
|
|
7
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2
|
|
8
|
+
import pydantic
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ConfigDto(UniversalBaseModel):
|
|
12
|
+
params: typing.Dict[str, typing.Optional[typing.Any]]
|
|
13
|
+
url: typing.Optional[str] = None
|
|
14
|
+
application_ref: typing.Optional[ReferenceDto] = None
|
|
15
|
+
service_ref: typing.Optional[ReferenceDto] = None
|
|
16
|
+
variant_ref: typing.Optional[ReferenceDto] = None
|
|
17
|
+
environment_ref: typing.Optional[ReferenceDto] = None
|
|
18
|
+
application_lifecycle: typing.Optional[LifecycleDto] = None
|
|
19
|
+
service_lifecycle: typing.Optional[LifecycleDto] = None
|
|
20
|
+
variant_lifecycle: typing.Optional[LifecycleDto] = None
|
|
21
|
+
environment_lifecycle: typing.Optional[LifecycleDto] = None
|
|
22
|
+
|
|
23
|
+
if IS_PYDANTIC_V2:
|
|
24
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
|
25
|
+
extra="allow", frozen=True
|
|
26
|
+
) # type: ignore # Pydantic v2
|
|
27
|
+
else:
|
|
28
|
+
|
|
29
|
+
class Config:
|
|
30
|
+
frozen = True
|
|
31
|
+
smart_union = True
|
|
32
|
+
extra = pydantic.Extra.allow
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
from ..core.pydantic_utilities import UniversalBaseModel
|
|
4
|
+
import typing
|
|
5
|
+
from .reference_dto import ReferenceDto
|
|
6
|
+
from .lifecycle_dto import LifecycleDto
|
|
7
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2
|
|
8
|
+
import pydantic
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ConfigResponseModel(UniversalBaseModel):
|
|
12
|
+
params: typing.Dict[str, typing.Optional[typing.Any]]
|
|
13
|
+
url: typing.Optional[str] = None
|
|
14
|
+
application_ref: typing.Optional[ReferenceDto] = None
|
|
15
|
+
service_ref: typing.Optional[ReferenceDto] = None
|
|
16
|
+
variant_ref: typing.Optional[ReferenceDto] = None
|
|
17
|
+
environment_ref: typing.Optional[ReferenceDto] = None
|
|
18
|
+
application_lifecycle: typing.Optional[LifecycleDto] = None
|
|
19
|
+
service_lifecycle: typing.Optional[LifecycleDto] = None
|
|
20
|
+
variant_lifecycle: typing.Optional[LifecycleDto] = None
|
|
21
|
+
environment_lifecycle: typing.Optional[LifecycleDto] = None
|
|
22
|
+
|
|
23
|
+
if IS_PYDANTIC_V2:
|
|
24
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
|
25
|
+
extra="allow", frozen=True
|
|
26
|
+
) # type: ignore # Pydantic v2
|
|
27
|
+
else:
|
|
28
|
+
|
|
29
|
+
class Config:
|
|
30
|
+
frozen = True
|
|
31
|
+
smart_union = True
|
|
32
|
+
extra = pydantic.Extra.allow
|
|
@@ -12,6 +12,7 @@ import pydantic
|
|
|
12
12
|
class CreateSpan(UniversalBaseModel):
|
|
13
13
|
id: str
|
|
14
14
|
app_id: str
|
|
15
|
+
project_id: typing.Optional[str] = None
|
|
15
16
|
variant_id: typing.Optional[str] = None
|
|
16
17
|
variant_name: typing.Optional[str] = None
|
|
17
18
|
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
|
|
@@ -34,11 +35,11 @@ class CreateSpan(UniversalBaseModel):
|
|
|
34
35
|
|
|
35
36
|
if IS_PYDANTIC_V2:
|
|
36
37
|
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
|
37
|
-
extra="allow", frozen=
|
|
38
|
+
extra="allow", frozen=True
|
|
38
39
|
) # type: ignore # Pydantic v2
|
|
39
40
|
else:
|
|
40
41
|
|
|
41
42
|
class Config:
|
|
42
|
-
frozen =
|
|
43
|
+
frozen = True
|
|
43
44
|
smart_union = True
|
|
44
45
|
extra = pydantic.Extra.allow
|
|
@@ -9,6 +9,7 @@ import pydantic
|
|
|
9
9
|
class EnvironmentOutput(UniversalBaseModel):
|
|
10
10
|
name: str
|
|
11
11
|
app_id: str
|
|
12
|
+
project_id: str
|
|
12
13
|
deployed_app_variant_id: typing.Optional[str] = None
|
|
13
14
|
deployed_variant_name: typing.Optional[str] = None
|
|
14
15
|
deployed_app_variant_revision_id: typing.Optional[str] = None
|
|
@@ -10,6 +10,7 @@ import pydantic
|
|
|
10
10
|
class EnvironmentOutputExtended(UniversalBaseModel):
|
|
11
11
|
name: str
|
|
12
12
|
app_id: str
|
|
13
|
+
project_id: str
|
|
13
14
|
deployed_app_variant_id: typing.Optional[str] = None
|
|
14
15
|
deployed_variant_name: typing.Optional[str] = None
|
|
15
16
|
deployed_app_variant_revision_id: typing.Optional[str] = None
|
|
@@ -13,6 +13,8 @@ class Evaluator(UniversalBaseModel):
|
|
|
13
13
|
settings_template: typing.Dict[str, typing.Optional[typing.Any]]
|
|
14
14
|
description: typing.Optional[str] = None
|
|
15
15
|
oss: typing.Optional[bool] = None
|
|
16
|
+
requires_llm_api_keys: typing.Optional[bool] = None
|
|
17
|
+
tags: typing.List[str]
|
|
16
18
|
|
|
17
19
|
if IS_PYDANTIC_V2:
|
|
18
20
|
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|