agenta 0.27.0__py3-none-any.whl → 0.27.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agenta might be problematic. Click here for more details.

Files changed (68) hide show
  1. agenta/__init__.py +3 -22
  2. agenta/cli/helper.py +1 -5
  3. agenta/client/backend/__init__.py +0 -14
  4. agenta/client/backend/apps/client.py +20 -28
  5. agenta/client/backend/client.py +2 -25
  6. agenta/client/backend/containers/client.py +1 -5
  7. agenta/client/backend/core/__init__.py +1 -2
  8. agenta/client/backend/core/client_wrapper.py +6 -6
  9. agenta/client/backend/core/file.py +11 -33
  10. agenta/client/backend/core/http_client.py +18 -24
  11. agenta/client/backend/core/pydantic_utilities.py +29 -144
  12. agenta/client/backend/core/request_options.py +0 -3
  13. agenta/client/backend/core/serialization.py +42 -139
  14. agenta/client/backend/evaluations/client.py +2 -7
  15. agenta/client/backend/evaluators/client.py +1 -349
  16. agenta/client/backend/observability/client.py +2 -11
  17. agenta/client/backend/testsets/client.py +10 -10
  18. agenta/client/backend/types/__init__.py +0 -14
  19. agenta/client/backend/types/app.py +0 -1
  20. agenta/client/backend/types/app_variant_response.py +1 -3
  21. agenta/client/backend/types/create_span.py +2 -3
  22. agenta/client/backend/types/environment_output.py +0 -1
  23. agenta/client/backend/types/environment_output_extended.py +0 -1
  24. agenta/client/backend/types/evaluation.py +2 -1
  25. agenta/client/backend/types/evaluator.py +0 -2
  26. agenta/client/backend/types/evaluator_config.py +0 -1
  27. agenta/client/backend/types/human_evaluation.py +2 -1
  28. agenta/client/backend/types/llm_tokens.py +2 -2
  29. agenta/client/backend/types/span.py +0 -1
  30. agenta/client/backend/types/span_detail.py +1 -7
  31. agenta/client/backend/types/test_set_output_response.py +2 -5
  32. agenta/client/backend/types/trace_detail.py +1 -7
  33. agenta/client/backend/types/with_pagination.py +2 -4
  34. agenta/client/backend/variants/client.py +273 -1566
  35. agenta/docker/docker-assets/Dockerfile.cloud.template +1 -1
  36. agenta/sdk/__init__.py +5 -20
  37. agenta/sdk/agenta_init.py +26 -30
  38. agenta/sdk/config_manager.py +205 -0
  39. agenta/sdk/context/routing.py +5 -6
  40. agenta/sdk/decorators/routing.py +135 -142
  41. agenta/sdk/decorators/tracing.py +245 -206
  42. agenta/sdk/litellm/litellm.py +36 -47
  43. agenta/sdk/tracing/attributes.py +2 -7
  44. agenta/sdk/tracing/context.py +2 -5
  45. agenta/sdk/tracing/conventions.py +8 -10
  46. agenta/sdk/tracing/exporters.py +6 -15
  47. agenta/sdk/tracing/inline.py +98 -70
  48. agenta/sdk/tracing/processors.py +14 -55
  49. agenta/sdk/tracing/spans.py +4 -16
  50. agenta/sdk/tracing/tracing.py +50 -54
  51. agenta/sdk/types.py +2 -61
  52. agenta/sdk/utils/exceptions.py +1 -31
  53. {agenta-0.27.0.dist-info → agenta-0.27.0a1.dist-info}/METADATA +1 -1
  54. {agenta-0.27.0.dist-info → agenta-0.27.0a1.dist-info}/RECORD +56 -67
  55. agenta/client/backend/types/config_dto.py +0 -32
  56. agenta/client/backend/types/config_response_model.py +0 -32
  57. agenta/client/backend/types/evaluator_mapping_output_interface.py +0 -21
  58. agenta/client/backend/types/evaluator_output_interface.py +0 -21
  59. agenta/client/backend/types/lifecycle_dto.py +0 -24
  60. agenta/client/backend/types/reference_dto.py +0 -23
  61. agenta/client/backend/types/reference_request_model.py +0 -23
  62. agenta/sdk/managers/__init__.py +0 -6
  63. agenta/sdk/managers/config.py +0 -318
  64. agenta/sdk/managers/deployment.py +0 -45
  65. agenta/sdk/managers/shared.py +0 -639
  66. agenta/sdk/managers/variant.py +0 -182
  67. {agenta-0.27.0.dist-info → agenta-0.27.0a1.dist-info}/WHEEL +0 -0
  68. {agenta-0.27.0.dist-info → agenta-0.27.0a1.dist-info}/entry_points.txt +0 -0
@@ -7,12 +7,10 @@ from ..types.evaluator import Evaluator
7
7
  from ..core.pydantic_utilities import parse_obj_as
8
8
  from json.decoder import JSONDecodeError
9
9
  from ..core.api_error import ApiError
10
- from ..types.evaluator_mapping_output_interface import EvaluatorMappingOutputInterface
10
+ from ..types.evaluator_config import EvaluatorConfig
11
11
  from ..errors.unprocessable_entity_error import UnprocessableEntityError
12
12
  from ..types.http_validation_error import HttpValidationError
13
- from ..types.evaluator_output_interface import EvaluatorOutputInterface
14
13
  from ..core.jsonable_encoder import jsonable_encoder
15
- from ..types.evaluator_config import EvaluatorConfig
16
14
  from ..core.client_wrapper import AsyncClientWrapper
17
15
 
18
16
  # this is used as the default value for optional parameters
@@ -71,171 +69,6 @@ class EvaluatorsClient:
71
69
  raise ApiError(status_code=_response.status_code, body=_response.text)
72
70
  raise ApiError(status_code=_response.status_code, body=_response_json)
73
71
 
74
- def evaluator_data_map(
75
- self,
76
- *,
77
- inputs: typing.Dict[str, typing.Optional[typing.Any]],
78
- mapping: typing.Dict[str, typing.Optional[typing.Any]],
79
- request_options: typing.Optional[RequestOptions] = None,
80
- ) -> EvaluatorMappingOutputInterface:
81
- """
82
- Endpoint to map the experiment data tree to evaluator interface.
83
-
84
- Args:
85
- request (Request): The request object.
86
- payload (EvaluatorMappingInputInterface): The payload containing the request data.
87
-
88
- Returns:
89
- EvaluatorMappingOutputInterface: the evaluator mapping output object
90
-
91
- Parameters
92
- ----------
93
- inputs : typing.Dict[str, typing.Optional[typing.Any]]
94
-
95
- mapping : typing.Dict[str, typing.Optional[typing.Any]]
96
-
97
- request_options : typing.Optional[RequestOptions]
98
- Request-specific configuration.
99
-
100
- Returns
101
- -------
102
- EvaluatorMappingOutputInterface
103
- Successful Response
104
-
105
- Examples
106
- --------
107
- from agenta import AgentaApi
108
-
109
- client = AgentaApi(
110
- api_key="YOUR_API_KEY",
111
- base_url="https://yourhost.com/path/to/api",
112
- )
113
- client.evaluators.evaluator_data_map(
114
- inputs={"key": "value"},
115
- mapping={"key": "value"},
116
- )
117
- """
118
- _response = self._client_wrapper.httpx_client.request(
119
- "evaluators/map",
120
- method="POST",
121
- json={
122
- "inputs": inputs,
123
- "mapping": mapping,
124
- },
125
- request_options=request_options,
126
- omit=OMIT,
127
- )
128
- try:
129
- if 200 <= _response.status_code < 300:
130
- return typing.cast(
131
- EvaluatorMappingOutputInterface,
132
- parse_obj_as(
133
- type_=EvaluatorMappingOutputInterface, # type: ignore
134
- object_=_response.json(),
135
- ),
136
- )
137
- if _response.status_code == 422:
138
- raise UnprocessableEntityError(
139
- typing.cast(
140
- HttpValidationError,
141
- parse_obj_as(
142
- type_=HttpValidationError, # type: ignore
143
- object_=_response.json(),
144
- ),
145
- )
146
- )
147
- _response_json = _response.json()
148
- except JSONDecodeError:
149
- raise ApiError(status_code=_response.status_code, body=_response.text)
150
- raise ApiError(status_code=_response.status_code, body=_response_json)
151
-
152
- def evaluator_run(
153
- self,
154
- evaluator_key: str,
155
- *,
156
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
157
- settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
158
- credentials: typing.Optional[
159
- typing.Dict[str, typing.Optional[typing.Any]]
160
- ] = OMIT,
161
- request_options: typing.Optional[RequestOptions] = None,
162
- ) -> EvaluatorOutputInterface:
163
- """
164
- Endpoint to evaluate LLM app run
165
-
166
- Args:
167
- request (Request): The request object.
168
- evaluator_key (str): The key of the evaluator.
169
- payload (EvaluatorInputInterface): The payload containing the request data.
170
-
171
- Returns:
172
- result: EvaluatorOutputInterface object containing the outputs.
173
-
174
- Parameters
175
- ----------
176
- evaluator_key : str
177
-
178
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
179
-
180
- settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
181
-
182
- credentials : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
183
-
184
- request_options : typing.Optional[RequestOptions]
185
- Request-specific configuration.
186
-
187
- Returns
188
- -------
189
- EvaluatorOutputInterface
190
- Successful Response
191
-
192
- Examples
193
- --------
194
- from agenta import AgentaApi
195
-
196
- client = AgentaApi(
197
- api_key="YOUR_API_KEY",
198
- base_url="https://yourhost.com/path/to/api",
199
- )
200
- client.evaluators.evaluator_run(
201
- evaluator_key="evaluator_key",
202
- )
203
- """
204
- _response = self._client_wrapper.httpx_client.request(
205
- f"evaluators/{jsonable_encoder(evaluator_key)}/run",
206
- method="POST",
207
- json={
208
- "inputs": inputs,
209
- "settings": settings,
210
- "credentials": credentials,
211
- },
212
- request_options=request_options,
213
- omit=OMIT,
214
- )
215
- try:
216
- if 200 <= _response.status_code < 300:
217
- return typing.cast(
218
- EvaluatorOutputInterface,
219
- parse_obj_as(
220
- type_=EvaluatorOutputInterface, # type: ignore
221
- object_=_response.json(),
222
- ),
223
- )
224
- if _response.status_code == 422:
225
- raise UnprocessableEntityError(
226
- typing.cast(
227
- HttpValidationError,
228
- parse_obj_as(
229
- type_=HttpValidationError, # type: ignore
230
- object_=_response.json(),
231
- ),
232
- )
233
- )
234
- _response_json = _response.json()
235
- except JSONDecodeError:
236
- raise ApiError(status_code=_response.status_code, body=_response.text)
237
- raise ApiError(status_code=_response.status_code, body=_response_json)
238
-
239
72
  def get_evaluator_configs(
240
73
  self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None
241
74
  ) -> typing.List[EvaluatorConfig]:
@@ -667,187 +500,6 @@ class AsyncEvaluatorsClient:
667
500
  raise ApiError(status_code=_response.status_code, body=_response.text)
668
501
  raise ApiError(status_code=_response.status_code, body=_response_json)
669
502
 
670
- async def evaluator_data_map(
671
- self,
672
- *,
673
- inputs: typing.Dict[str, typing.Optional[typing.Any]],
674
- mapping: typing.Dict[str, typing.Optional[typing.Any]],
675
- request_options: typing.Optional[RequestOptions] = None,
676
- ) -> EvaluatorMappingOutputInterface:
677
- """
678
- Endpoint to map the experiment data tree to evaluator interface.
679
-
680
- Args:
681
- request (Request): The request object.
682
- payload (EvaluatorMappingInputInterface): The payload containing the request data.
683
-
684
- Returns:
685
- EvaluatorMappingOutputInterface: the evaluator mapping output object
686
-
687
- Parameters
688
- ----------
689
- inputs : typing.Dict[str, typing.Optional[typing.Any]]
690
-
691
- mapping : typing.Dict[str, typing.Optional[typing.Any]]
692
-
693
- request_options : typing.Optional[RequestOptions]
694
- Request-specific configuration.
695
-
696
- Returns
697
- -------
698
- EvaluatorMappingOutputInterface
699
- Successful Response
700
-
701
- Examples
702
- --------
703
- import asyncio
704
-
705
- from agenta import AsyncAgentaApi
706
-
707
- client = AsyncAgentaApi(
708
- api_key="YOUR_API_KEY",
709
- base_url="https://yourhost.com/path/to/api",
710
- )
711
-
712
-
713
- async def main() -> None:
714
- await client.evaluators.evaluator_data_map(
715
- inputs={"key": "value"},
716
- mapping={"key": "value"},
717
- )
718
-
719
-
720
- asyncio.run(main())
721
- """
722
- _response = await self._client_wrapper.httpx_client.request(
723
- "evaluators/map",
724
- method="POST",
725
- json={
726
- "inputs": inputs,
727
- "mapping": mapping,
728
- },
729
- request_options=request_options,
730
- omit=OMIT,
731
- )
732
- try:
733
- if 200 <= _response.status_code < 300:
734
- return typing.cast(
735
- EvaluatorMappingOutputInterface,
736
- parse_obj_as(
737
- type_=EvaluatorMappingOutputInterface, # type: ignore
738
- object_=_response.json(),
739
- ),
740
- )
741
- if _response.status_code == 422:
742
- raise UnprocessableEntityError(
743
- typing.cast(
744
- HttpValidationError,
745
- parse_obj_as(
746
- type_=HttpValidationError, # type: ignore
747
- object_=_response.json(),
748
- ),
749
- )
750
- )
751
- _response_json = _response.json()
752
- except JSONDecodeError:
753
- raise ApiError(status_code=_response.status_code, body=_response.text)
754
- raise ApiError(status_code=_response.status_code, body=_response_json)
755
-
756
- async def evaluator_run(
757
- self,
758
- evaluator_key: str,
759
- *,
760
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
761
- settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
762
- credentials: typing.Optional[
763
- typing.Dict[str, typing.Optional[typing.Any]]
764
- ] = OMIT,
765
- request_options: typing.Optional[RequestOptions] = None,
766
- ) -> EvaluatorOutputInterface:
767
- """
768
- Endpoint to evaluate LLM app run
769
-
770
- Args:
771
- request (Request): The request object.
772
- evaluator_key (str): The key of the evaluator.
773
- payload (EvaluatorInputInterface): The payload containing the request data.
774
-
775
- Returns:
776
- result: EvaluatorOutputInterface object containing the outputs.
777
-
778
- Parameters
779
- ----------
780
- evaluator_key : str
781
-
782
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
783
-
784
- settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
785
-
786
- credentials : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
787
-
788
- request_options : typing.Optional[RequestOptions]
789
- Request-specific configuration.
790
-
791
- Returns
792
- -------
793
- EvaluatorOutputInterface
794
- Successful Response
795
-
796
- Examples
797
- --------
798
- import asyncio
799
-
800
- from agenta import AsyncAgentaApi
801
-
802
- client = AsyncAgentaApi(
803
- api_key="YOUR_API_KEY",
804
- base_url="https://yourhost.com/path/to/api",
805
- )
806
-
807
-
808
- async def main() -> None:
809
- await client.evaluators.evaluator_run(
810
- evaluator_key="evaluator_key",
811
- )
812
-
813
-
814
- asyncio.run(main())
815
- """
816
- _response = await self._client_wrapper.httpx_client.request(
817
- f"evaluators/{jsonable_encoder(evaluator_key)}/run",
818
- method="POST",
819
- json={
820
- "inputs": inputs,
821
- "settings": settings,
822
- "credentials": credentials,
823
- },
824
- request_options=request_options,
825
- omit=OMIT,
826
- )
827
- try:
828
- if 200 <= _response.status_code < 300:
829
- return typing.cast(
830
- EvaluatorOutputInterface,
831
- parse_obj_as(
832
- type_=EvaluatorOutputInterface, # type: ignore
833
- object_=_response.json(),
834
- ),
835
- )
836
- if _response.status_code == 422:
837
- raise UnprocessableEntityError(
838
- typing.cast(
839
- HttpValidationError,
840
- parse_obj_as(
841
- type_=HttpValidationError, # type: ignore
842
- object_=_response.json(),
843
- ),
844
- )
845
- )
846
- _response_json = _response.json()
847
- except JSONDecodeError:
848
- raise ApiError(status_code=_response.status_code, body=_response.text)
849
- raise ApiError(status_code=_response.status_code, body=_response_json)
850
-
851
503
  async def get_evaluator_configs(
852
504
  self, *, app_id: str, request_options: typing.Optional[RequestOptions] = None
853
505
  ) -> typing.List[EvaluatorConfig]:
@@ -10,7 +10,6 @@ from json.decoder import JSONDecodeError
10
10
  from ..core.api_error import ApiError
11
11
  from ..types.create_span import CreateSpan
12
12
  from ..types.create_trace_response import CreateTraceResponse
13
- from ..core.serialization import convert_and_respect_annotation_metadata
14
13
  from ..types.with_pagination import WithPagination
15
14
  from ..types.trace_detail import TraceDetail
16
15
  from ..core.jsonable_encoder import jsonable_encoder
@@ -156,11 +155,7 @@ class ObservabilityClient:
156
155
  method="POST",
157
156
  json={
158
157
  "trace": trace,
159
- "spans": convert_and_respect_annotation_metadata(
160
- object_=spans,
161
- annotation=typing.Sequence[CreateSpan],
162
- direction="write",
163
- ),
158
+ "spans": spans,
164
159
  },
165
160
  request_options=request_options,
166
161
  omit=OMIT,
@@ -769,11 +764,7 @@ class AsyncObservabilityClient:
769
764
  method="POST",
770
765
  json={
771
766
  "trace": trace,
772
- "spans": convert_and_respect_annotation_metadata(
773
- object_=spans,
774
- annotation=typing.Sequence[CreateSpan],
775
- direction="write",
776
- ),
767
+ "spans": spans,
777
768
  },
778
769
  request_options=request_options,
779
770
  omit=OMIT,
@@ -26,9 +26,9 @@ class TestsetsClient:
26
26
  self,
27
27
  *,
28
28
  file: core.File,
29
- upload_type: typing.Optional[str] = OMIT,
30
- testset_name: typing.Optional[str] = OMIT,
31
- app_id: typing.Optional[str] = OMIT,
29
+ upload_type: typing.Optional[str] = None,
30
+ testset_name: typing.Optional[str] = None,
31
+ app_id: typing.Optional[str] = None,
32
32
  request_options: typing.Optional[RequestOptions] = None,
33
33
  ) -> TestSetSimpleResponse:
34
34
  """
@@ -261,10 +261,10 @@ class TestsetsClient:
261
261
  request_options: typing.Optional[RequestOptions] = None,
262
262
  ) -> typing.Optional[typing.Any]:
263
263
  """
264
- Fetch a specific testset in a MongoDB collection using its \_id.
264
+ Fetch a specific testset in a MongoDB collection using its id.
265
265
 
266
266
  Args:
267
- testset_id (str): The \_id of the testset to fetch.
267
+ testset_id (str): The id of the testset to fetch.
268
268
 
269
269
  Returns:
270
270
  The requested testset if found, else an HTTPException.
@@ -555,9 +555,9 @@ class AsyncTestsetsClient:
555
555
  self,
556
556
  *,
557
557
  file: core.File,
558
- upload_type: typing.Optional[str] = OMIT,
559
- testset_name: typing.Optional[str] = OMIT,
560
- app_id: typing.Optional[str] = OMIT,
558
+ upload_type: typing.Optional[str] = None,
559
+ testset_name: typing.Optional[str] = None,
560
+ app_id: typing.Optional[str] = None,
561
561
  request_options: typing.Optional[RequestOptions] = None,
562
562
  ) -> TestSetSimpleResponse:
563
563
  """
@@ -814,10 +814,10 @@ class AsyncTestsetsClient:
814
814
  request_options: typing.Optional[RequestOptions] = None,
815
815
  ) -> typing.Optional[typing.Any]:
816
816
  """
817
- Fetch a specific testset in a MongoDB collection using its \_id.
817
+ Fetch a specific testset in a MongoDB collection using its id.
818
818
 
819
819
  Args:
820
- testset_id (str): The \_id of the testset to fetch.
820
+ testset_id (str): The id of the testset to fetch.
821
821
 
822
822
  Returns:
823
823
  The requested testset if found, else an HTTPException.
@@ -8,8 +8,6 @@ from .app_variant_revision import AppVariantRevision
8
8
  from .base_output import BaseOutput
9
9
  from .body_import_testset import BodyImportTestset
10
10
  from .config_db import ConfigDb
11
- from .config_dto import ConfigDto
12
- from .config_response_model import ConfigResponseModel
13
11
  from .correct_answer import CorrectAnswer
14
12
  from .create_app_output import CreateAppOutput
15
13
  from .create_span import CreateSpan
@@ -29,8 +27,6 @@ from .evaluation_status_enum import EvaluationStatusEnum
29
27
  from .evaluation_type import EvaluationType
30
28
  from .evaluator import Evaluator
31
29
  from .evaluator_config import EvaluatorConfig
32
- from .evaluator_mapping_output_interface import EvaluatorMappingOutputInterface
33
- from .evaluator_output_interface import EvaluatorOutputInterface
34
30
  from .get_config_response import GetConfigResponse
35
31
  from .http_validation_error import HttpValidationError
36
32
  from .human_evaluation import HumanEvaluation
@@ -41,7 +37,6 @@ from .human_evaluation_scenario_update import HumanEvaluationScenarioUpdate
41
37
  from .human_evaluation_update import HumanEvaluationUpdate
42
38
  from .image import Image
43
39
  from .invite_request import InviteRequest
44
- from .lifecycle_dto import LifecycleDto
45
40
  from .list_api_keys_response import ListApiKeysResponse
46
41
  from .llm_run_rate_limit import LlmRunRateLimit
47
42
  from .llm_tokens import LlmTokens
@@ -52,8 +47,6 @@ from .organization import Organization
52
47
  from .organization_output import OrganizationOutput
53
48
  from .outputs import Outputs
54
49
  from .permission import Permission
55
- from .reference_dto import ReferenceDto
56
- from .reference_request_model import ReferenceRequestModel
57
50
  from .result import Result
58
51
  from .score import Score
59
52
  from .simple_evaluation_output import SimpleEvaluationOutput
@@ -88,8 +81,6 @@ __all__ = [
88
81
  "BaseOutput",
89
82
  "BodyImportTestset",
90
83
  "ConfigDb",
91
- "ConfigDto",
92
- "ConfigResponseModel",
93
84
  "CorrectAnswer",
94
85
  "CreateAppOutput",
95
86
  "CreateSpan",
@@ -109,8 +100,6 @@ __all__ = [
109
100
  "EvaluationType",
110
101
  "Evaluator",
111
102
  "EvaluatorConfig",
112
- "EvaluatorMappingOutputInterface",
113
- "EvaluatorOutputInterface",
114
103
  "GetConfigResponse",
115
104
  "HttpValidationError",
116
105
  "HumanEvaluation",
@@ -121,7 +110,6 @@ __all__ = [
121
110
  "HumanEvaluationUpdate",
122
111
  "Image",
123
112
  "InviteRequest",
124
- "LifecycleDto",
125
113
  "ListApiKeysResponse",
126
114
  "LlmRunRateLimit",
127
115
  "LlmTokens",
@@ -132,8 +120,6 @@ __all__ = [
132
120
  "OrganizationOutput",
133
121
  "Outputs",
134
122
  "Permission",
135
- "ReferenceDto",
136
- "ReferenceRequestModel",
137
123
  "Result",
138
124
  "Score",
139
125
  "SimpleEvaluationOutput",
@@ -9,7 +9,6 @@ import pydantic
9
9
  class App(UniversalBaseModel):
10
10
  app_id: str
11
11
  app_name: str
12
- updated_at: str
13
12
 
14
13
  if IS_PYDANTIC_V2:
15
14
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
@@ -11,8 +11,8 @@ class AppVariantResponse(UniversalBaseModel):
11
11
  app_name: str
12
12
  variant_id: str
13
13
  variant_name: str
14
- project_id: str
15
14
  parameters: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
15
+ project_id: str
16
16
  base_name: str
17
17
  base_id: str
18
18
  config_name: str
@@ -21,8 +21,6 @@ class AppVariantResponse(UniversalBaseModel):
21
21
  created_at: typing.Optional[str] = None
22
22
  updated_at: typing.Optional[str] = None
23
23
  modified_by_id: typing.Optional[str] = None
24
- organization_id: typing.Optional[str] = None
25
- workspace_id: typing.Optional[str] = None
26
24
 
27
25
  if IS_PYDANTIC_V2:
28
26
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
@@ -12,7 +12,6 @@ import pydantic
12
12
  class CreateSpan(UniversalBaseModel):
13
13
  id: str
14
14
  app_id: str
15
- project_id: typing.Optional[str] = None
16
15
  variant_id: typing.Optional[str] = None
17
16
  variant_name: typing.Optional[str] = None
18
17
  inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
@@ -35,11 +34,11 @@ class CreateSpan(UniversalBaseModel):
35
34
 
36
35
  if IS_PYDANTIC_V2:
37
36
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
38
- extra="allow", frozen=True
37
+ extra="allow", frozen=False
39
38
  ) # type: ignore # Pydantic v2
40
39
  else:
41
40
 
42
41
  class Config:
43
- frozen = True
42
+ frozen = False
44
43
  smart_union = True
45
44
  extra = pydantic.Extra.allow
@@ -9,7 +9,6 @@ import pydantic
9
9
  class EnvironmentOutput(UniversalBaseModel):
10
10
  name: str
11
11
  app_id: str
12
- project_id: str
13
12
  deployed_app_variant_id: typing.Optional[str] = None
14
13
  deployed_variant_name: typing.Optional[str] = None
15
14
  deployed_app_variant_revision_id: typing.Optional[str] = None
@@ -10,7 +10,6 @@ import pydantic
10
10
  class EnvironmentOutputExtended(UniversalBaseModel):
11
11
  name: str
12
12
  app_id: str
13
- project_id: str
14
13
  deployed_app_variant_id: typing.Optional[str] = None
15
14
  deployed_variant_name: typing.Optional[str] = None
16
15
  deployed_app_variant_revision_id: typing.Optional[str] = None
@@ -12,7 +12,8 @@ import pydantic
12
12
  class Evaluation(UniversalBaseModel):
13
13
  id: str
14
14
  app_id: str
15
- project_id: str
15
+ user_id: str
16
+ user_username: str
16
17
  variant_ids: typing.List[str]
17
18
  variant_names: typing.List[str]
18
19
  variant_revision_ids: typing.List[str]
@@ -13,8 +13,6 @@ class Evaluator(UniversalBaseModel):
13
13
  settings_template: typing.Dict[str, typing.Optional[typing.Any]]
14
14
  description: typing.Optional[str] = None
15
15
  oss: typing.Optional[bool] = None
16
- requires_llm_api_keys: typing.Optional[bool] = None
17
- tags: typing.List[str]
18
16
 
19
17
  if IS_PYDANTIC_V2:
20
18
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
@@ -9,7 +9,6 @@ import pydantic
9
9
  class EvaluatorConfig(UniversalBaseModel):
10
10
  id: str
11
11
  name: str
12
- project_id: str
13
12
  evaluator_key: str
14
13
  settings_values: typing.Optional[
15
14
  typing.Dict[str, typing.Optional[typing.Any]]
@@ -9,7 +9,8 @@ import pydantic
9
9
  class HumanEvaluation(UniversalBaseModel):
10
10
  id: str
11
11
  app_id: str
12
- project_id: str
12
+ user_id: str
13
+ user_username: str
13
14
  evaluation_type: str
14
15
  variant_ids: typing.List[str]
15
16
  variant_names: typing.List[str]
@@ -13,11 +13,11 @@ class LlmTokens(UniversalBaseModel):
13
13
 
14
14
  if IS_PYDANTIC_V2:
15
15
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
16
- extra="allow", frozen=True
16
+ extra="allow", frozen=False
17
17
  ) # type: ignore # Pydantic v2
18
18
  else:
19
19
 
20
20
  class Config:
21
- frozen = True
21
+ frozen = False
22
22
  smart_union = True
23
23
  extra = pydantic.Extra.allow
@@ -14,7 +14,6 @@ from ..core.pydantic_utilities import update_forward_refs
14
14
  class Span(UniversalBaseModel):
15
15
  id: str
16
16
  name: str
17
- project_id: typing.Optional[str] = None
18
17
  parent_span_id: typing.Optional[str] = None
19
18
  created_at: dt.datetime
20
19
  variant: SpanVariant
@@ -1,21 +1,18 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from __future__ import annotations
4
3
  from ..core.pydantic_utilities import UniversalBaseModel
5
- from .span import Span
6
4
  import typing
7
5
  import datetime as dt
8
6
  from .span_variant import SpanVariant
9
7
  from .span_status_code import SpanStatusCode
8
+ from .span import Span
10
9
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
11
10
  import pydantic
12
- from ..core.pydantic_utilities import update_forward_refs
13
11
 
14
12
 
15
13
  class SpanDetail(UniversalBaseModel):
16
14
  id: str
17
15
  name: str
18
- project_id: typing.Optional[str] = None
19
16
  parent_span_id: typing.Optional[str] = None
20
17
  created_at: dt.datetime
21
18
  variant: SpanVariant
@@ -39,6 +36,3 @@ class SpanDetail(UniversalBaseModel):
39
36
  frozen = True
40
37
  smart_union = True
41
38
  extra = pydantic.Extra.allow
42
-
43
-
44
- update_forward_refs(Span, SpanDetail=SpanDetail)