llama-stack-api 0.4.4__py3-none-any.whl → 0.5.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. llama_stack_api/__init__.py +175 -20
  2. llama_stack_api/agents/__init__.py +38 -0
  3. llama_stack_api/agents/api.py +52 -0
  4. llama_stack_api/agents/fastapi_routes.py +268 -0
  5. llama_stack_api/agents/models.py +181 -0
  6. llama_stack_api/common/errors.py +15 -0
  7. llama_stack_api/connectors/__init__.py +38 -0
  8. llama_stack_api/connectors/api.py +50 -0
  9. llama_stack_api/connectors/fastapi_routes.py +103 -0
  10. llama_stack_api/connectors/models.py +103 -0
  11. llama_stack_api/conversations/__init__.py +61 -0
  12. llama_stack_api/conversations/api.py +44 -0
  13. llama_stack_api/conversations/fastapi_routes.py +177 -0
  14. llama_stack_api/conversations/models.py +245 -0
  15. llama_stack_api/datasetio/__init__.py +34 -0
  16. llama_stack_api/datasetio/api.py +42 -0
  17. llama_stack_api/datasetio/fastapi_routes.py +94 -0
  18. llama_stack_api/datasetio/models.py +48 -0
  19. llama_stack_api/eval/__init__.py +55 -0
  20. llama_stack_api/eval/api.py +51 -0
  21. llama_stack_api/eval/compat.py +300 -0
  22. llama_stack_api/eval/fastapi_routes.py +126 -0
  23. llama_stack_api/eval/models.py +141 -0
  24. llama_stack_api/inference/__init__.py +207 -0
  25. llama_stack_api/inference/api.py +93 -0
  26. llama_stack_api/inference/fastapi_routes.py +243 -0
  27. llama_stack_api/inference/models.py +1035 -0
  28. llama_stack_api/models/__init__.py +47 -0
  29. llama_stack_api/models/api.py +38 -0
  30. llama_stack_api/models/fastapi_routes.py +104 -0
  31. llama_stack_api/{models.py → models/models.py} +65 -79
  32. llama_stack_api/openai_responses.py +32 -6
  33. llama_stack_api/post_training/__init__.py +73 -0
  34. llama_stack_api/post_training/api.py +36 -0
  35. llama_stack_api/post_training/fastapi_routes.py +116 -0
  36. llama_stack_api/{post_training.py → post_training/models.py} +55 -86
  37. llama_stack_api/prompts/__init__.py +47 -0
  38. llama_stack_api/prompts/api.py +44 -0
  39. llama_stack_api/prompts/fastapi_routes.py +163 -0
  40. llama_stack_api/prompts/models.py +177 -0
  41. llama_stack_api/resource.py +0 -1
  42. llama_stack_api/safety/__init__.py +37 -0
  43. llama_stack_api/safety/api.py +29 -0
  44. llama_stack_api/safety/datatypes.py +83 -0
  45. llama_stack_api/safety/fastapi_routes.py +55 -0
  46. llama_stack_api/safety/models.py +38 -0
  47. llama_stack_api/schema_utils.py +47 -4
  48. llama_stack_api/scoring/__init__.py +66 -0
  49. llama_stack_api/scoring/api.py +35 -0
  50. llama_stack_api/scoring/fastapi_routes.py +67 -0
  51. llama_stack_api/scoring/models.py +81 -0
  52. llama_stack_api/scoring_functions/__init__.py +50 -0
  53. llama_stack_api/scoring_functions/api.py +39 -0
  54. llama_stack_api/scoring_functions/fastapi_routes.py +108 -0
  55. llama_stack_api/{scoring_functions.py → scoring_functions/models.py} +67 -64
  56. llama_stack_api/shields/__init__.py +41 -0
  57. llama_stack_api/shields/api.py +39 -0
  58. llama_stack_api/shields/fastapi_routes.py +104 -0
  59. llama_stack_api/shields/models.py +74 -0
  60. llama_stack_api/validators.py +46 -0
  61. llama_stack_api/vector_io/__init__.py +88 -0
  62. llama_stack_api/vector_io/api.py +234 -0
  63. llama_stack_api/vector_io/fastapi_routes.py +447 -0
  64. llama_stack_api/{vector_io.py → vector_io/models.py} +99 -377
  65. {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/METADATA +1 -1
  66. llama_stack_api-0.5.0rc1.dist-info/RECORD +115 -0
  67. llama_stack_api/agents.py +0 -173
  68. llama_stack_api/connectors.py +0 -146
  69. llama_stack_api/conversations.py +0 -270
  70. llama_stack_api/datasetio.py +0 -55
  71. llama_stack_api/eval.py +0 -137
  72. llama_stack_api/inference.py +0 -1169
  73. llama_stack_api/prompts.py +0 -203
  74. llama_stack_api/safety.py +0 -132
  75. llama_stack_api/scoring.py +0 -93
  76. llama_stack_api/shields.py +0 -93
  77. llama_stack_api-0.4.4.dist-info/RECORD +0 -70
  78. {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/WHEEL +0 -0
  79. {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/top_level.txt +0 -0
llama_stack_api/eval.py DELETED
@@ -1,137 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the terms described in the LICENSE file in
5
- # the root directory of this source tree.
6
-
7
- from typing import Any, Literal, Protocol
8
-
9
- from pydantic import BaseModel, Field
10
-
11
- from llama_stack_api.common.job_types import Job
12
- from llama_stack_api.inference import SamplingParams, SystemMessage
13
- from llama_stack_api.schema_utils import json_schema_type, webmethod
14
- from llama_stack_api.scoring import ScoringResult
15
- from llama_stack_api.scoring_functions import ScoringFnParams
16
- from llama_stack_api.version import LLAMA_STACK_API_V1ALPHA
17
-
18
-
19
- @json_schema_type
20
- class ModelCandidate(BaseModel):
21
- """A model candidate for evaluation.
22
-
23
- :param model: The model ID to evaluate.
24
- :param sampling_params: The sampling parameters for the model.
25
- :param system_message: (Optional) The system message providing instructions or context to the model.
26
- """
27
-
28
- type: Literal["model"] = "model"
29
- model: str
30
- sampling_params: SamplingParams
31
- system_message: SystemMessage | None = None
32
-
33
-
34
- EvalCandidate = ModelCandidate
35
-
36
-
37
- @json_schema_type
38
- class BenchmarkConfig(BaseModel):
39
- """A benchmark configuration for evaluation.
40
-
41
- :param eval_candidate: The candidate to evaluate.
42
- :param scoring_params: Map between scoring function id and parameters for each scoring function you want to run
43
- :param num_examples: (Optional) The number of examples to evaluate. If not provided, all examples in the dataset will be evaluated
44
- """
45
-
46
- eval_candidate: EvalCandidate
47
- scoring_params: dict[str, ScoringFnParams] = Field(
48
- description="Map between scoring function id and parameters for each scoring function you want to run",
49
- default_factory=dict,
50
- )
51
- num_examples: int | None = Field(
52
- description="Number of examples to evaluate (useful for testing), if not provided, all examples in the dataset will be evaluated",
53
- default=None,
54
- )
55
- # we could optinally add any specific dataset config here
56
-
57
-
58
- @json_schema_type
59
- class EvaluateResponse(BaseModel):
60
- """The response from an evaluation.
61
-
62
- :param generations: The generations from the evaluation.
63
- :param scores: The scores from the evaluation.
64
- """
65
-
66
- generations: list[dict[str, Any]]
67
- # each key in the dict is a scoring function name
68
- scores: dict[str, ScoringResult]
69
-
70
-
71
- class Eval(Protocol):
72
- """Evaluations
73
-
74
- Llama Stack Evaluation API for running evaluations on model and agent candidates."""
75
-
76
- @webmethod(route="/eval/benchmarks/{benchmark_id}/jobs", method="POST", level=LLAMA_STACK_API_V1ALPHA)
77
- async def run_eval(
78
- self,
79
- benchmark_id: str,
80
- benchmark_config: BenchmarkConfig,
81
- ) -> Job:
82
- """Run an evaluation on a benchmark.
83
-
84
- :param benchmark_id: The ID of the benchmark to run the evaluation on.
85
- :param benchmark_config: The configuration for the benchmark.
86
- :returns: The job that was created to run the evaluation.
87
- """
88
- ...
89
-
90
- @webmethod(route="/eval/benchmarks/{benchmark_id}/evaluations", method="POST", level=LLAMA_STACK_API_V1ALPHA)
91
- async def evaluate_rows(
92
- self,
93
- benchmark_id: str,
94
- input_rows: list[dict[str, Any]],
95
- scoring_functions: list[str],
96
- benchmark_config: BenchmarkConfig,
97
- ) -> EvaluateResponse:
98
- """Evaluate a list of rows on a benchmark.
99
-
100
- :param benchmark_id: The ID of the benchmark to run the evaluation on.
101
- :param input_rows: The rows to evaluate.
102
- :param scoring_functions: The scoring functions to use for the evaluation.
103
- :param benchmark_config: The configuration for the benchmark.
104
- :returns: EvaluateResponse object containing generations and scores.
105
- """
106
- ...
107
-
108
- @webmethod(route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}", method="GET", level=LLAMA_STACK_API_V1ALPHA)
109
- async def job_status(self, benchmark_id: str, job_id: str) -> Job:
110
- """Get the status of a job.
111
-
112
- :param benchmark_id: The ID of the benchmark to run the evaluation on.
113
- :param job_id: The ID of the job to get the status of.
114
- :returns: The status of the evaluation job.
115
- """
116
- ...
117
-
118
- @webmethod(route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}", method="DELETE", level=LLAMA_STACK_API_V1ALPHA)
119
- async def job_cancel(self, benchmark_id: str, job_id: str) -> None:
120
- """Cancel a job.
121
-
122
- :param benchmark_id: The ID of the benchmark to run the evaluation on.
123
- :param job_id: The ID of the job to cancel.
124
- """
125
- ...
126
-
127
- @webmethod(
128
- route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result", method="GET", level=LLAMA_STACK_API_V1ALPHA
129
- )
130
- async def job_result(self, benchmark_id: str, job_id: str) -> EvaluateResponse:
131
- """Get the result of a job.
132
-
133
- :param benchmark_id: The ID of the benchmark to run the evaluation on.
134
- :param job_id: The ID of the job to get the result of.
135
- :returns: The result of the job.
136
- """
137
- ...