agenta 0.9.0__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agenta might be problematic. Click here for more details.

Files changed (32) hide show
  1. agenta/client/backend/__init__.py +32 -14
  2. agenta/client/backend/client.py +1462 -654
  3. agenta/client/backend/types/__init__.py +32 -14
  4. agenta/client/backend/types/aggregated_result.py +39 -0
  5. agenta/client/backend/types/app_variant_output.py +0 -1
  6. agenta/client/backend/types/app_variant_output_extended.py +50 -0
  7. agenta/client/backend/types/app_variant_revision.py +40 -0
  8. agenta/client/backend/types/{custom_evaluation_output.py → config_db.py} +3 -5
  9. agenta/client/backend/types/{custom_evaluation_names.py → delete_evaluation.py} +2 -3
  10. agenta/client/backend/types/environment_output.py +2 -0
  11. agenta/client/backend/types/evaluation.py +4 -4
  12. agenta/client/backend/types/evaluation_scenario.py +2 -3
  13. agenta/client/backend/types/evaluation_scenario_input.py +3 -2
  14. agenta/client/backend/types/evaluation_scenario_output.py +2 -2
  15. agenta/client/backend/types/evaluation_scenario_result.py +38 -0
  16. agenta/client/backend/types/evaluation_status_enum.py +4 -4
  17. agenta/client/backend/types/evaluation_type.py +0 -28
  18. agenta/client/backend/types/evaluator.py +39 -0
  19. agenta/client/backend/types/{custom_evaluation_detail.py → evaluator_config.py} +4 -4
  20. agenta/client/backend/types/human_evaluation.py +49 -0
  21. agenta/client/backend/types/human_evaluation_scenario.py +48 -0
  22. agenta/client/backend/types/{create_custom_evaluation.py → human_evaluation_scenario_input.py} +3 -4
  23. agenta/client/backend/types/human_evaluation_scenario_output.py +37 -0
  24. agenta/client/backend/types/{evaluation_scenario_score.py → human_evaluation_scenario_score.py} +1 -1
  25. agenta/client/backend/types/{evaluation_scenario_update_score.py → human_evaluation_scenario_update_score.py} +1 -1
  26. agenta/client/backend/types/llm_run_rate_limit.py +39 -0
  27. agenta/client/backend/types/result.py +37 -0
  28. {agenta-0.9.0.dist-info → agenta-0.10.0.dist-info}/METADATA +1 -1
  29. {agenta-0.9.0.dist-info → agenta-0.10.0.dist-info}/RECORD +31 -22
  30. agenta/client/backend/types/evaluation_type_settings.py +0 -42
  31. {agenta-0.9.0.dist-info → agenta-0.10.0.dist-info}/WHEEL +0 -0
  32. {agenta-0.9.0.dist-info → agenta-0.10.0.dist-info}/entry_points.txt +0 -0
@@ -2,37 +2,46 @@
2
2
 
3
3
  from .types import (
4
4
  AddVariantFromBaseAndConfigResponse,
5
+ AggregatedResult,
5
6
  App,
6
7
  AppVariantOutput,
8
+ AppVariantOutputExtended,
9
+ AppVariantRevision,
7
10
  BaseOutput,
8
11
  BodyImportTestset,
12
+ ConfigDb,
9
13
  ContainerTemplatesResponse,
10
14
  CreateAppOutput,
11
- CreateCustomEvaluation,
12
- CustomEvaluationDetail,
13
- CustomEvaluationNames,
14
- CustomEvaluationOutput,
15
+ DeleteEvaluation,
15
16
  DockerEnvVars,
16
17
  EnvironmentOutput,
17
18
  Evaluation,
18
19
  EvaluationScenario,
19
20
  EvaluationScenarioInput,
20
21
  EvaluationScenarioOutput,
21
- EvaluationScenarioScore,
22
- EvaluationScenarioUpdateScore,
22
+ EvaluationScenarioResult,
23
23
  EvaluationStatusEnum,
24
24
  EvaluationType,
25
- EvaluationTypeSettings,
26
25
  EvaluationWebhook,
26
+ Evaluator,
27
+ EvaluatorConfig,
27
28
  Feedback,
28
29
  GetConfigReponse,
29
30
  HttpValidationError,
31
+ HumanEvaluation,
32
+ HumanEvaluationScenario,
33
+ HumanEvaluationScenarioInput,
34
+ HumanEvaluationScenarioOutput,
35
+ HumanEvaluationScenarioScore,
36
+ HumanEvaluationScenarioUpdateScore,
30
37
  Image,
31
38
  InviteRequest,
32
39
  ListApiKeysOutput,
40
+ LlmRunRateLimit,
33
41
  NewTestset,
34
42
  Organization,
35
43
  OrganizationOutput,
44
+ Result,
36
45
  SimpleEvaluationOutput,
37
46
  Span,
38
47
  Template,
@@ -50,37 +59,46 @@ from .errors import UnprocessableEntityError
50
59
 
51
60
  __all__ = [
52
61
  "AddVariantFromBaseAndConfigResponse",
62
+ "AggregatedResult",
53
63
  "App",
54
64
  "AppVariantOutput",
65
+ "AppVariantOutputExtended",
66
+ "AppVariantRevision",
55
67
  "BaseOutput",
56
68
  "BodyImportTestset",
69
+ "ConfigDb",
57
70
  "ContainerTemplatesResponse",
58
71
  "CreateAppOutput",
59
- "CreateCustomEvaluation",
60
- "CustomEvaluationDetail",
61
- "CustomEvaluationNames",
62
- "CustomEvaluationOutput",
72
+ "DeleteEvaluation",
63
73
  "DockerEnvVars",
64
74
  "EnvironmentOutput",
65
75
  "Evaluation",
66
76
  "EvaluationScenario",
67
77
  "EvaluationScenarioInput",
68
78
  "EvaluationScenarioOutput",
69
- "EvaluationScenarioScore",
70
- "EvaluationScenarioUpdateScore",
79
+ "EvaluationScenarioResult",
71
80
  "EvaluationStatusEnum",
72
81
  "EvaluationType",
73
- "EvaluationTypeSettings",
74
82
  "EvaluationWebhook",
83
+ "Evaluator",
84
+ "EvaluatorConfig",
75
85
  "Feedback",
76
86
  "GetConfigReponse",
77
87
  "HttpValidationError",
88
+ "HumanEvaluation",
89
+ "HumanEvaluationScenario",
90
+ "HumanEvaluationScenarioInput",
91
+ "HumanEvaluationScenarioOutput",
92
+ "HumanEvaluationScenarioScore",
93
+ "HumanEvaluationScenarioUpdateScore",
78
94
  "Image",
79
95
  "InviteRequest",
80
96
  "ListApiKeysOutput",
97
+ "LlmRunRateLimit",
81
98
  "NewTestset",
82
99
  "Organization",
83
100
  "OrganizationOutput",
101
+ "Result",
84
102
  "SimpleEvaluationOutput",
85
103
  "Span",
86
104
  "Template",