llama-stack-api 0.4.4__py3-none-any.whl → 0.5.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack_api/__init__.py +175 -20
- llama_stack_api/agents/__init__.py +38 -0
- llama_stack_api/agents/api.py +52 -0
- llama_stack_api/agents/fastapi_routes.py +268 -0
- llama_stack_api/agents/models.py +181 -0
- llama_stack_api/common/errors.py +15 -0
- llama_stack_api/connectors/__init__.py +38 -0
- llama_stack_api/connectors/api.py +50 -0
- llama_stack_api/connectors/fastapi_routes.py +103 -0
- llama_stack_api/connectors/models.py +103 -0
- llama_stack_api/conversations/__init__.py +61 -0
- llama_stack_api/conversations/api.py +44 -0
- llama_stack_api/conversations/fastapi_routes.py +177 -0
- llama_stack_api/conversations/models.py +245 -0
- llama_stack_api/datasetio/__init__.py +34 -0
- llama_stack_api/datasetio/api.py +42 -0
- llama_stack_api/datasetio/fastapi_routes.py +94 -0
- llama_stack_api/datasetio/models.py +48 -0
- llama_stack_api/eval/__init__.py +55 -0
- llama_stack_api/eval/api.py +51 -0
- llama_stack_api/eval/compat.py +300 -0
- llama_stack_api/eval/fastapi_routes.py +126 -0
- llama_stack_api/eval/models.py +141 -0
- llama_stack_api/inference/__init__.py +207 -0
- llama_stack_api/inference/api.py +93 -0
- llama_stack_api/inference/fastapi_routes.py +243 -0
- llama_stack_api/inference/models.py +1035 -0
- llama_stack_api/models/__init__.py +47 -0
- llama_stack_api/models/api.py +38 -0
- llama_stack_api/models/fastapi_routes.py +104 -0
- llama_stack_api/{models.py → models/models.py} +65 -79
- llama_stack_api/openai_responses.py +32 -6
- llama_stack_api/post_training/__init__.py +73 -0
- llama_stack_api/post_training/api.py +36 -0
- llama_stack_api/post_training/fastapi_routes.py +116 -0
- llama_stack_api/{post_training.py → post_training/models.py} +55 -86
- llama_stack_api/prompts/__init__.py +47 -0
- llama_stack_api/prompts/api.py +44 -0
- llama_stack_api/prompts/fastapi_routes.py +163 -0
- llama_stack_api/prompts/models.py +177 -0
- llama_stack_api/resource.py +0 -1
- llama_stack_api/safety/__init__.py +37 -0
- llama_stack_api/safety/api.py +29 -0
- llama_stack_api/safety/datatypes.py +83 -0
- llama_stack_api/safety/fastapi_routes.py +55 -0
- llama_stack_api/safety/models.py +38 -0
- llama_stack_api/schema_utils.py +47 -4
- llama_stack_api/scoring/__init__.py +66 -0
- llama_stack_api/scoring/api.py +35 -0
- llama_stack_api/scoring/fastapi_routes.py +67 -0
- llama_stack_api/scoring/models.py +81 -0
- llama_stack_api/scoring_functions/__init__.py +50 -0
- llama_stack_api/scoring_functions/api.py +39 -0
- llama_stack_api/scoring_functions/fastapi_routes.py +108 -0
- llama_stack_api/{scoring_functions.py → scoring_functions/models.py} +67 -64
- llama_stack_api/shields/__init__.py +41 -0
- llama_stack_api/shields/api.py +39 -0
- llama_stack_api/shields/fastapi_routes.py +104 -0
- llama_stack_api/shields/models.py +74 -0
- llama_stack_api/validators.py +46 -0
- llama_stack_api/vector_io/__init__.py +88 -0
- llama_stack_api/vector_io/api.py +234 -0
- llama_stack_api/vector_io/fastapi_routes.py +447 -0
- llama_stack_api/{vector_io.py → vector_io/models.py} +99 -377
- {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/METADATA +1 -1
- llama_stack_api-0.5.0rc1.dist-info/RECORD +115 -0
- llama_stack_api/agents.py +0 -173
- llama_stack_api/connectors.py +0 -146
- llama_stack_api/conversations.py +0 -270
- llama_stack_api/datasetio.py +0 -55
- llama_stack_api/eval.py +0 -137
- llama_stack_api/inference.py +0 -1169
- llama_stack_api/prompts.py +0 -203
- llama_stack_api/safety.py +0 -132
- llama_stack_api/scoring.py +0 -93
- llama_stack_api/shields.py +0 -93
- llama_stack_api-0.4.4.dist-info/RECORD +0 -70
- {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/WHEEL +0 -0
- {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/top_level.txt +0 -0
llama_stack_api/prompts.py
DELETED
|
@@ -1,203 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
import re
|
|
8
|
-
import secrets
|
|
9
|
-
from typing import Protocol, runtime_checkable
|
|
10
|
-
|
|
11
|
-
from pydantic import BaseModel, Field, field_validator, model_validator
|
|
12
|
-
|
|
13
|
-
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
|
14
|
-
from llama_stack_api.version import LLAMA_STACK_API_V1
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@json_schema_type
|
|
18
|
-
class Prompt(BaseModel):
|
|
19
|
-
"""A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack.
|
|
20
|
-
|
|
21
|
-
:param prompt: The system prompt text with variable placeholders. Variables are only supported when using the Responses API.
|
|
22
|
-
:param version: Version (integer starting at 1, incremented on save)
|
|
23
|
-
:param prompt_id: Unique identifier formatted as 'pmpt_<48-digit-hash>'
|
|
24
|
-
:param variables: List of prompt variable names that can be used in the prompt template
|
|
25
|
-
:param is_default: Boolean indicating whether this version is the default version for this prompt
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
|
-
prompt: str | None = Field(default=None, description="The system prompt with variable placeholders")
|
|
29
|
-
version: int = Field(description="Version (integer starting at 1, incremented on save)", ge=1)
|
|
30
|
-
prompt_id: str = Field(description="Unique identifier in format 'pmpt_<48-digit-hash>'")
|
|
31
|
-
variables: list[str] = Field(
|
|
32
|
-
default_factory=list, description="List of variable names that can be used in the prompt template"
|
|
33
|
-
)
|
|
34
|
-
is_default: bool = Field(
|
|
35
|
-
default=False, description="Boolean indicating whether this version is the default version"
|
|
36
|
-
)
|
|
37
|
-
|
|
38
|
-
@field_validator("prompt_id")
|
|
39
|
-
@classmethod
|
|
40
|
-
def validate_prompt_id(cls, prompt_id: str) -> str:
|
|
41
|
-
if not isinstance(prompt_id, str):
|
|
42
|
-
raise TypeError("prompt_id must be a string in format 'pmpt_<48-digit-hash>'")
|
|
43
|
-
|
|
44
|
-
if not prompt_id.startswith("pmpt_"):
|
|
45
|
-
raise ValueError("prompt_id must start with 'pmpt_' prefix")
|
|
46
|
-
|
|
47
|
-
hex_part = prompt_id[5:]
|
|
48
|
-
if len(hex_part) != 48:
|
|
49
|
-
raise ValueError("prompt_id must be in format 'pmpt_<48-digit-hash>' (48 lowercase hex chars)")
|
|
50
|
-
|
|
51
|
-
for char in hex_part:
|
|
52
|
-
if char not in "0123456789abcdef":
|
|
53
|
-
raise ValueError("prompt_id hex part must contain only lowercase hex characters [0-9a-f]")
|
|
54
|
-
|
|
55
|
-
return prompt_id
|
|
56
|
-
|
|
57
|
-
@field_validator("version")
|
|
58
|
-
@classmethod
|
|
59
|
-
def validate_version(cls, prompt_version: int) -> int:
|
|
60
|
-
if prompt_version < 1:
|
|
61
|
-
raise ValueError("version must be >= 1")
|
|
62
|
-
return prompt_version
|
|
63
|
-
|
|
64
|
-
@model_validator(mode="after")
|
|
65
|
-
def validate_prompt_variables(self):
|
|
66
|
-
"""Validate that all variables used in the prompt are declared in the variables list."""
|
|
67
|
-
if not self.prompt:
|
|
68
|
-
return self
|
|
69
|
-
|
|
70
|
-
prompt_variables = set(re.findall(r"{{\s*(\w+)\s*}}", self.prompt))
|
|
71
|
-
declared_variables = set(self.variables)
|
|
72
|
-
|
|
73
|
-
undeclared = prompt_variables - declared_variables
|
|
74
|
-
if undeclared:
|
|
75
|
-
raise ValueError(f"Prompt contains undeclared variables: {sorted(undeclared)}")
|
|
76
|
-
|
|
77
|
-
return self
|
|
78
|
-
|
|
79
|
-
@classmethod
|
|
80
|
-
def generate_prompt_id(cls) -> str:
|
|
81
|
-
# Generate 48 hex characters (24 bytes)
|
|
82
|
-
random_bytes = secrets.token_bytes(24)
|
|
83
|
-
hex_string = random_bytes.hex()
|
|
84
|
-
return f"pmpt_{hex_string}"
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
@json_schema_type
|
|
88
|
-
class ListPromptsResponse(BaseModel):
|
|
89
|
-
"""Response model to list prompts."""
|
|
90
|
-
|
|
91
|
-
data: list[Prompt]
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
@runtime_checkable
|
|
95
|
-
class Prompts(Protocol):
|
|
96
|
-
"""Prompts
|
|
97
|
-
|
|
98
|
-
Protocol for prompt management operations."""
|
|
99
|
-
|
|
100
|
-
@webmethod(route="/prompts", method="GET", level=LLAMA_STACK_API_V1)
|
|
101
|
-
async def list_prompts(self) -> ListPromptsResponse:
|
|
102
|
-
"""List all prompts.
|
|
103
|
-
|
|
104
|
-
:returns: A ListPromptsResponse containing all prompts.
|
|
105
|
-
"""
|
|
106
|
-
...
|
|
107
|
-
|
|
108
|
-
@webmethod(route="/prompts/{prompt_id}/versions", method="GET", level=LLAMA_STACK_API_V1)
|
|
109
|
-
async def list_prompt_versions(
|
|
110
|
-
self,
|
|
111
|
-
prompt_id: str,
|
|
112
|
-
) -> ListPromptsResponse:
|
|
113
|
-
"""List prompt versions.
|
|
114
|
-
|
|
115
|
-
List all versions of a specific prompt.
|
|
116
|
-
|
|
117
|
-
:param prompt_id: The identifier of the prompt to list versions for.
|
|
118
|
-
:returns: A ListPromptsResponse containing all versions of the prompt.
|
|
119
|
-
"""
|
|
120
|
-
...
|
|
121
|
-
|
|
122
|
-
@webmethod(route="/prompts/{prompt_id}", method="GET", level=LLAMA_STACK_API_V1)
|
|
123
|
-
async def get_prompt(
|
|
124
|
-
self,
|
|
125
|
-
prompt_id: str,
|
|
126
|
-
version: int | None = None,
|
|
127
|
-
) -> Prompt:
|
|
128
|
-
"""Get prompt.
|
|
129
|
-
|
|
130
|
-
Get a prompt by its identifier and optional version.
|
|
131
|
-
|
|
132
|
-
:param prompt_id: The identifier of the prompt to get.
|
|
133
|
-
:param version: The version of the prompt to get (defaults to latest).
|
|
134
|
-
:returns: A Prompt resource.
|
|
135
|
-
"""
|
|
136
|
-
...
|
|
137
|
-
|
|
138
|
-
@webmethod(route="/prompts", method="POST", level=LLAMA_STACK_API_V1)
|
|
139
|
-
async def create_prompt(
|
|
140
|
-
self,
|
|
141
|
-
prompt: str,
|
|
142
|
-
variables: list[str] | None = None,
|
|
143
|
-
) -> Prompt:
|
|
144
|
-
"""Create prompt.
|
|
145
|
-
|
|
146
|
-
Create a new prompt.
|
|
147
|
-
|
|
148
|
-
:param prompt: The prompt text content with variable placeholders.
|
|
149
|
-
:param variables: List of variable names that can be used in the prompt template.
|
|
150
|
-
:returns: The created Prompt resource.
|
|
151
|
-
"""
|
|
152
|
-
...
|
|
153
|
-
|
|
154
|
-
@webmethod(route="/prompts/{prompt_id}", method="PUT", level=LLAMA_STACK_API_V1)
|
|
155
|
-
async def update_prompt(
|
|
156
|
-
self,
|
|
157
|
-
prompt_id: str,
|
|
158
|
-
prompt: str,
|
|
159
|
-
version: int,
|
|
160
|
-
variables: list[str] | None = None,
|
|
161
|
-
set_as_default: bool = True,
|
|
162
|
-
) -> Prompt:
|
|
163
|
-
"""Update prompt.
|
|
164
|
-
|
|
165
|
-
Update an existing prompt (increments version).
|
|
166
|
-
|
|
167
|
-
:param prompt_id: The identifier of the prompt to update.
|
|
168
|
-
:param prompt: The updated prompt text content.
|
|
169
|
-
:param version: The current version of the prompt being updated.
|
|
170
|
-
:param variables: Updated list of variable names that can be used in the prompt template.
|
|
171
|
-
:param set_as_default: Set the new version as the default (default=True).
|
|
172
|
-
:returns: The updated Prompt resource with incremented version.
|
|
173
|
-
"""
|
|
174
|
-
...
|
|
175
|
-
|
|
176
|
-
@webmethod(route="/prompts/{prompt_id}", method="DELETE", level=LLAMA_STACK_API_V1)
|
|
177
|
-
async def delete_prompt(
|
|
178
|
-
self,
|
|
179
|
-
prompt_id: str,
|
|
180
|
-
) -> None:
|
|
181
|
-
"""Delete prompt.
|
|
182
|
-
|
|
183
|
-
Delete a prompt.
|
|
184
|
-
|
|
185
|
-
:param prompt_id: The identifier of the prompt to delete.
|
|
186
|
-
"""
|
|
187
|
-
...
|
|
188
|
-
|
|
189
|
-
@webmethod(route="/prompts/{prompt_id}/set-default-version", method="PUT", level=LLAMA_STACK_API_V1)
|
|
190
|
-
async def set_default_version(
|
|
191
|
-
self,
|
|
192
|
-
prompt_id: str,
|
|
193
|
-
version: int,
|
|
194
|
-
) -> Prompt:
|
|
195
|
-
"""Set prompt version.
|
|
196
|
-
|
|
197
|
-
Set which version of a prompt should be the default in get_prompt (latest).
|
|
198
|
-
|
|
199
|
-
:param prompt_id: The identifier of the prompt.
|
|
200
|
-
:param version: The version to set as default.
|
|
201
|
-
:returns: The prompt with the specified version now set as default.
|
|
202
|
-
"""
|
|
203
|
-
...
|
llama_stack_api/safety.py
DELETED
|
@@ -1,132 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from enum import Enum
|
|
8
|
-
from typing import Any, Protocol, runtime_checkable
|
|
9
|
-
|
|
10
|
-
from pydantic import BaseModel, Field
|
|
11
|
-
|
|
12
|
-
from llama_stack_api.inference import OpenAIMessageParam
|
|
13
|
-
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
|
14
|
-
from llama_stack_api.shields import Shield
|
|
15
|
-
from llama_stack_api.version import LLAMA_STACK_API_V1
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
@json_schema_type
|
|
19
|
-
class ModerationObjectResults(BaseModel):
|
|
20
|
-
"""A moderation object.
|
|
21
|
-
:param flagged: Whether any of the below categories are flagged.
|
|
22
|
-
:param categories: A list of the categories, and whether they are flagged or not.
|
|
23
|
-
:param category_applied_input_types: A list of the categories along with the input type(s) that the score applies to.
|
|
24
|
-
:param category_scores: A list of the categories along with their scores as predicted by model.
|
|
25
|
-
"""
|
|
26
|
-
|
|
27
|
-
flagged: bool
|
|
28
|
-
categories: dict[str, bool] | None = None
|
|
29
|
-
category_applied_input_types: dict[str, list[str]] | None = None
|
|
30
|
-
category_scores: dict[str, float] | None = None
|
|
31
|
-
user_message: str | None = None
|
|
32
|
-
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
@json_schema_type
|
|
36
|
-
class ModerationObject(BaseModel):
|
|
37
|
-
"""A moderation object.
|
|
38
|
-
:param id: The unique identifier for the moderation request.
|
|
39
|
-
:param model: The model used to generate the moderation results.
|
|
40
|
-
:param results: A list of moderation objects
|
|
41
|
-
"""
|
|
42
|
-
|
|
43
|
-
id: str
|
|
44
|
-
model: str
|
|
45
|
-
results: list[ModerationObjectResults]
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
@json_schema_type
|
|
49
|
-
class ViolationLevel(Enum):
|
|
50
|
-
"""Severity level of a safety violation.
|
|
51
|
-
|
|
52
|
-
:cvar INFO: Informational level violation that does not require action
|
|
53
|
-
:cvar WARN: Warning level violation that suggests caution but allows continuation
|
|
54
|
-
:cvar ERROR: Error level violation that requires blocking or intervention
|
|
55
|
-
"""
|
|
56
|
-
|
|
57
|
-
INFO = "info"
|
|
58
|
-
WARN = "warn"
|
|
59
|
-
ERROR = "error"
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
@json_schema_type
|
|
63
|
-
class SafetyViolation(BaseModel):
|
|
64
|
-
"""Details of a safety violation detected by content moderation.
|
|
65
|
-
|
|
66
|
-
:param violation_level: Severity level of the violation
|
|
67
|
-
:param user_message: (Optional) Message to convey to the user about the violation
|
|
68
|
-
:param metadata: Additional metadata including specific violation codes for debugging and telemetry
|
|
69
|
-
"""
|
|
70
|
-
|
|
71
|
-
violation_level: ViolationLevel
|
|
72
|
-
|
|
73
|
-
# what message should you convey to the user
|
|
74
|
-
user_message: str | None = None
|
|
75
|
-
|
|
76
|
-
# additional metadata (including specific violation codes) more for
|
|
77
|
-
# debugging, telemetry
|
|
78
|
-
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
@json_schema_type
|
|
82
|
-
class RunShieldResponse(BaseModel):
|
|
83
|
-
"""Response from running a safety shield.
|
|
84
|
-
|
|
85
|
-
:param violation: (Optional) Safety violation detected by the shield, if any
|
|
86
|
-
"""
|
|
87
|
-
|
|
88
|
-
violation: SafetyViolation | None = None
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
class ShieldStore(Protocol):
|
|
92
|
-
async def get_shield(self, identifier: str) -> Shield: ...
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
@runtime_checkable
|
|
96
|
-
class Safety(Protocol):
|
|
97
|
-
"""Safety
|
|
98
|
-
|
|
99
|
-
OpenAI-compatible Moderations API.
|
|
100
|
-
"""
|
|
101
|
-
|
|
102
|
-
shield_store: ShieldStore
|
|
103
|
-
|
|
104
|
-
@webmethod(route="/safety/run-shield", method="POST", level=LLAMA_STACK_API_V1)
|
|
105
|
-
async def run_shield(
|
|
106
|
-
self,
|
|
107
|
-
shield_id: str,
|
|
108
|
-
messages: list[OpenAIMessageParam],
|
|
109
|
-
params: dict[str, Any],
|
|
110
|
-
) -> RunShieldResponse:
|
|
111
|
-
"""Run shield.
|
|
112
|
-
|
|
113
|
-
Run a shield.
|
|
114
|
-
|
|
115
|
-
:param shield_id: The identifier of the shield to run.
|
|
116
|
-
:param messages: The messages to run the shield on.
|
|
117
|
-
:param params: The parameters of the shield.
|
|
118
|
-
:returns: A RunShieldResponse.
|
|
119
|
-
"""
|
|
120
|
-
...
|
|
121
|
-
|
|
122
|
-
@webmethod(route="/moderations", method="POST", level=LLAMA_STACK_API_V1)
|
|
123
|
-
async def run_moderation(self, input: str | list[str], model: str | None = None) -> ModerationObject:
|
|
124
|
-
"""Create moderation.
|
|
125
|
-
|
|
126
|
-
Classifies if text and/or image inputs are potentially harmful.
|
|
127
|
-
:param input: Input (or inputs) to classify.
|
|
128
|
-
Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models.
|
|
129
|
-
:param model: (Optional) The content moderation model you would like to use.
|
|
130
|
-
:returns: A moderation object.
|
|
131
|
-
"""
|
|
132
|
-
...
|
llama_stack_api/scoring.py
DELETED
|
@@ -1,93 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from typing import Any, Protocol, runtime_checkable
|
|
8
|
-
|
|
9
|
-
from pydantic import BaseModel
|
|
10
|
-
|
|
11
|
-
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
|
12
|
-
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnParams
|
|
13
|
-
from llama_stack_api.version import LLAMA_STACK_API_V1
|
|
14
|
-
|
|
15
|
-
# mapping of metric to value
|
|
16
|
-
ScoringResultRow = dict[str, Any]
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
@json_schema_type
|
|
20
|
-
class ScoringResult(BaseModel):
|
|
21
|
-
"""
|
|
22
|
-
A scoring result for a single row.
|
|
23
|
-
|
|
24
|
-
:param score_rows: The scoring result for each row. Each row is a map of column name to value.
|
|
25
|
-
:param aggregated_results: Map of metric name to aggregated value
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
|
-
score_rows: list[ScoringResultRow]
|
|
29
|
-
# aggregated metrics to value
|
|
30
|
-
aggregated_results: dict[str, Any]
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
@json_schema_type
|
|
34
|
-
class ScoreBatchResponse(BaseModel):
|
|
35
|
-
"""Response from batch scoring operations on datasets.
|
|
36
|
-
|
|
37
|
-
:param dataset_id: (Optional) The identifier of the dataset that was scored
|
|
38
|
-
:param results: A map of scoring function name to ScoringResult
|
|
39
|
-
"""
|
|
40
|
-
|
|
41
|
-
dataset_id: str | None = None
|
|
42
|
-
results: dict[str, ScoringResult]
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
@json_schema_type
|
|
46
|
-
class ScoreResponse(BaseModel):
|
|
47
|
-
"""
|
|
48
|
-
The response from scoring.
|
|
49
|
-
|
|
50
|
-
:param results: A map of scoring function name to ScoringResult.
|
|
51
|
-
"""
|
|
52
|
-
|
|
53
|
-
# each key in the dict is a scoring function name
|
|
54
|
-
results: dict[str, ScoringResult]
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
class ScoringFunctionStore(Protocol):
|
|
58
|
-
def get_scoring_function(self, scoring_fn_id: str) -> ScoringFn: ...
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
@runtime_checkable
|
|
62
|
-
class Scoring(Protocol):
|
|
63
|
-
scoring_function_store: ScoringFunctionStore
|
|
64
|
-
|
|
65
|
-
@webmethod(route="/scoring/score-batch", method="POST", level=LLAMA_STACK_API_V1)
|
|
66
|
-
async def score_batch(
|
|
67
|
-
self,
|
|
68
|
-
dataset_id: str,
|
|
69
|
-
scoring_functions: dict[str, ScoringFnParams | None],
|
|
70
|
-
save_results_dataset: bool = False,
|
|
71
|
-
) -> ScoreBatchResponse:
|
|
72
|
-
"""Score a batch of rows.
|
|
73
|
-
|
|
74
|
-
:param dataset_id: The ID of the dataset to score.
|
|
75
|
-
:param scoring_functions: The scoring functions to use for the scoring.
|
|
76
|
-
:param save_results_dataset: Whether to save the results to a dataset.
|
|
77
|
-
:returns: A ScoreBatchResponse.
|
|
78
|
-
"""
|
|
79
|
-
...
|
|
80
|
-
|
|
81
|
-
@webmethod(route="/scoring/score", method="POST", level=LLAMA_STACK_API_V1)
|
|
82
|
-
async def score(
|
|
83
|
-
self,
|
|
84
|
-
input_rows: list[dict[str, Any]],
|
|
85
|
-
scoring_functions: dict[str, ScoringFnParams | None],
|
|
86
|
-
) -> ScoreResponse:
|
|
87
|
-
"""Score a list of rows.
|
|
88
|
-
|
|
89
|
-
:param input_rows: The rows to score.
|
|
90
|
-
:param scoring_functions: The scoring functions to use for the scoring.
|
|
91
|
-
:returns: A ScoreResponse object containing rows and aggregated results.
|
|
92
|
-
"""
|
|
93
|
-
...
|
llama_stack_api/shields.py
DELETED
|
@@ -1,93 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from typing import Any, Literal, Protocol, runtime_checkable
|
|
8
|
-
|
|
9
|
-
from pydantic import BaseModel
|
|
10
|
-
|
|
11
|
-
from llama_stack_api.resource import Resource, ResourceType
|
|
12
|
-
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
|
13
|
-
from llama_stack_api.version import LLAMA_STACK_API_V1
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class CommonShieldFields(BaseModel):
|
|
17
|
-
params: dict[str, Any] | None = None
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
@json_schema_type
|
|
21
|
-
class Shield(CommonShieldFields, Resource):
|
|
22
|
-
"""A safety shield resource that can be used to check content.
|
|
23
|
-
|
|
24
|
-
:param params: (Optional) Configuration parameters for the shield
|
|
25
|
-
:param type: The resource type, always shield
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
|
-
type: Literal[ResourceType.shield] = ResourceType.shield
|
|
29
|
-
|
|
30
|
-
@property
|
|
31
|
-
def shield_id(self) -> str:
|
|
32
|
-
return self.identifier
|
|
33
|
-
|
|
34
|
-
@property
|
|
35
|
-
def provider_shield_id(self) -> str | None:
|
|
36
|
-
return self.provider_resource_id
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
class ShieldInput(CommonShieldFields):
|
|
40
|
-
shield_id: str
|
|
41
|
-
provider_id: str | None = None
|
|
42
|
-
provider_shield_id: str | None = None
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
@json_schema_type
|
|
46
|
-
class ListShieldsResponse(BaseModel):
|
|
47
|
-
data: list[Shield]
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
@runtime_checkable
|
|
51
|
-
class Shields(Protocol):
|
|
52
|
-
@webmethod(route="/shields", method="GET", level=LLAMA_STACK_API_V1)
|
|
53
|
-
async def list_shields(self) -> ListShieldsResponse:
|
|
54
|
-
"""List all shields.
|
|
55
|
-
|
|
56
|
-
:returns: A ListShieldsResponse.
|
|
57
|
-
"""
|
|
58
|
-
...
|
|
59
|
-
|
|
60
|
-
@webmethod(route="/shields/{identifier:path}", method="GET", level=LLAMA_STACK_API_V1)
|
|
61
|
-
async def get_shield(self, identifier: str) -> Shield:
|
|
62
|
-
"""Get a shield by its identifier.
|
|
63
|
-
|
|
64
|
-
:param identifier: The identifier of the shield to get.
|
|
65
|
-
:returns: A Shield.
|
|
66
|
-
"""
|
|
67
|
-
...
|
|
68
|
-
|
|
69
|
-
@webmethod(route="/shields", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
|
|
70
|
-
async def register_shield(
|
|
71
|
-
self,
|
|
72
|
-
shield_id: str,
|
|
73
|
-
provider_shield_id: str | None = None,
|
|
74
|
-
provider_id: str | None = None,
|
|
75
|
-
params: dict[str, Any] | None = None,
|
|
76
|
-
) -> Shield:
|
|
77
|
-
"""Register a shield.
|
|
78
|
-
|
|
79
|
-
:param shield_id: The identifier of the shield to register.
|
|
80
|
-
:param provider_shield_id: The identifier of the shield in the provider.
|
|
81
|
-
:param provider_id: The identifier of the provider.
|
|
82
|
-
:param params: The parameters of the shield.
|
|
83
|
-
:returns: A Shield.
|
|
84
|
-
"""
|
|
85
|
-
...
|
|
86
|
-
|
|
87
|
-
@webmethod(route="/shields/{identifier:path}", method="DELETE", level=LLAMA_STACK_API_V1, deprecated=True)
|
|
88
|
-
async def unregister_shield(self, identifier: str) -> None:
|
|
89
|
-
"""Unregister a shield.
|
|
90
|
-
|
|
91
|
-
:param identifier: The identifier of the shield to unregister.
|
|
92
|
-
"""
|
|
93
|
-
...
|
|
@@ -1,70 +0,0 @@
|
|
|
1
|
-
llama_stack_api/__init__.py,sha256=5XNQGpundjXTutLgnYp6B1t6KITWXH_of626GciNma4,28103
|
|
2
|
-
llama_stack_api/agents.py,sha256=u0sg3AoWCip5o8T4DMTM8uqP3BsdbkKbor3PmxKTg0g,7143
|
|
3
|
-
llama_stack_api/connectors.py,sha256=PcAwndbVQC6pm5HGSlNprqYFTZzhCM7SYHPyRkSIoaQ,4644
|
|
4
|
-
llama_stack_api/conversations.py,sha256=pLQD2ZT6rSWF2IIQUtdSvkq50w9-piCMVr9hgdMmlBw,10290
|
|
5
|
-
llama_stack_api/datasetio.py,sha256=n4wQRv01rl8K_Ig2_Ln5hZBfdbmptKtDGsNE1igJ1-E,2075
|
|
6
|
-
llama_stack_api/datatypes.py,sha256=S7qOix_CBofuCEU6Gmm9qogZnnIO-WlN1kfO3D4Xlnc,12590
|
|
7
|
-
llama_stack_api/eval.py,sha256=PjgrSNk_Q8MmnN5hGKr1mMHTdrouuCItVJko32vxT6M,5095
|
|
8
|
-
llama_stack_api/inference.py,sha256=-Zy6F6R6NWI9Wq7acxz84K5C5RfOD9c2ytn8MrlK76s,41432
|
|
9
|
-
llama_stack_api/models.py,sha256=6RLvp94GDNBcMYya06SefIF6whIqAmm0Igsp1MoqLLA,5206
|
|
10
|
-
llama_stack_api/openai_responses.py,sha256=IslBagXUoebtBCYKATr9w7YR72GBjM7gYLNBPGDST4E,53967
|
|
11
|
-
llama_stack_api/post_training.py,sha256=94C4xbjG7Y9w7TRAcfXPOR1Um11QQ7KopU5y1lwCiX4,12991
|
|
12
|
-
llama_stack_api/prompts.py,sha256=D7wa6wZB4LslUGgIQSUezFtYap16qjQ-d33-6SUzTaw,7063
|
|
13
|
-
llama_stack_api/rag_tool.py,sha256=EtfHzPaGjxutdbJ3Ymx6QLtzBNHfCM6W6UGZ9TaV7UU,5695
|
|
14
|
-
llama_stack_api/resource.py,sha256=WDLMV9yeHYwSwxJSt-x-bWEMJU3Dgrl0fwzufTZuyWE,1088
|
|
15
|
-
llama_stack_api/router_utils.py,sha256=ylbRZ16gylyFCCHvS-B9cFpl9E1yRsYL8YlsuIFGP8Y,6949
|
|
16
|
-
llama_stack_api/safety.py,sha256=JXz6gwcl0YlKBMgkAVg89Atq0AtugvubRaQomAHmTzM,4319
|
|
17
|
-
llama_stack_api/schema_utils.py,sha256=YThcm7VlaQdkpOxNvIkn51FfGRlvdVt1TiV-KVBKkyA,7661
|
|
18
|
-
llama_stack_api/scoring.py,sha256=ejVkQbmeBBtbBuy8Xgg-b4aHFe6l8zwYnr5R7GV5gn0,2867
|
|
19
|
-
llama_stack_api/scoring_functions.py,sha256=0lP_ZENUh12i12ibg-_XNNPKLHi_TvB8H5LyEtBLhSE,7789
|
|
20
|
-
llama_stack_api/shields.py,sha256=9dNMyTVL0xcR8_BXCHb_zuAJC7Cz8pX8htRwW2-EDSw,2823
|
|
21
|
-
llama_stack_api/tools.py,sha256=eCyZx806VfpBJgsuJF9R3urA8ljF3g0kLapNpx9YRzY,7518
|
|
22
|
-
llama_stack_api/vector_io.py,sha256=3tYy8xLhVvx_rMtfi5Pxv0GwTMm1TfMYwq82tFqRz1U,36517
|
|
23
|
-
llama_stack_api/vector_stores.py,sha256=mILSO3k2X-Hg4G3YEdq54fKAenCuAzRAXqpNg-_D_Ng,1832
|
|
24
|
-
llama_stack_api/version.py,sha256=V3jdW3iFPdfOt4jWzJA-di7v0zHLYsn11hNtRzkY7uQ,297
|
|
25
|
-
llama_stack_api/admin/__init__.py,sha256=VnJn9fbk-dFkRrm1P5UWlAOcZDA2jf6dx9W5nt-WgOY,1049
|
|
26
|
-
llama_stack_api/admin/api.py,sha256=m14f4iBUJf-G0qITj66o-TFKCSUiD9U12XRnZ1Slr_w,1961
|
|
27
|
-
llama_stack_api/admin/fastapi_routes.py,sha256=3CPWhB86UMlYl3pQ0ZkbF5FLIKIzG2I61esXavoGEjY,3739
|
|
28
|
-
llama_stack_api/admin/models.py,sha256=aoDiI1mtM_XemHwqRFFwiVD64LbenXiYU-QK52IJDQU,3932
|
|
29
|
-
llama_stack_api/batches/__init__.py,sha256=vnHvv8mzJnFlCGa3V-lTiC0k2mVPDLOGZTqgUDovwKg,999
|
|
30
|
-
llama_stack_api/batches/api.py,sha256=49aBQJPOB-x6ohKVWwJ7SORmfm9QSsWak7OBE6L0cMM,1416
|
|
31
|
-
llama_stack_api/batches/fastapi_routes.py,sha256=1b0eSB2Wb2K6gvrhpBFTexsOpxuVU1urgfIOnwxx1fc,3864
|
|
32
|
-
llama_stack_api/batches/models.py,sha256=Dv9cHaaCqaLi_g5wIkKoy-Mn282Gqh711G2swb5ufGM,2692
|
|
33
|
-
llama_stack_api/benchmarks/__init__.py,sha256=_1Vs5xcZb179BxRjTv7OUVL1yvUdzWL92Bsx1aXYMSU,1119
|
|
34
|
-
llama_stack_api/benchmarks/api.py,sha256=j0zaRRBCdJS0XSq5dMthK1nUhiRqzPsJaHkQF61KlFc,933
|
|
35
|
-
llama_stack_api/benchmarks/fastapi_routes.py,sha256=_ZQ74esmXQRC8dbCb3vypT4z-7KdNoouDDH5T2LmD_c,3560
|
|
36
|
-
llama_stack_api/benchmarks/models.py,sha256=h5fWO3KUTnoFzLeIB_lYEVtcgw3D53Rx44WPHE0M7is,3644
|
|
37
|
-
llama_stack_api/common/__init__.py,sha256=vUvqRS2CXhASaFzYVspRYa5q8usSCzjKUlZhzNLuiKg,200
|
|
38
|
-
llama_stack_api/common/content_types.py,sha256=lwc4VlPKWpRSTBO_U1MHdyItmQUzyNAqoaV9g3wKzF4,2693
|
|
39
|
-
llama_stack_api/common/errors.py,sha256=zrOjWerYj5BweLoyoqAbc3HGVSiaXLt10sw6TIJHnZ8,3725
|
|
40
|
-
llama_stack_api/common/job_types.py,sha256=1ifNdcNPqWPWw64R58zkhAnVWCj7oYg3utImbvf4NIc,1031
|
|
41
|
-
llama_stack_api/common/responses.py,sha256=qhwUdKKYzIhnlPPIah36rN3vVgMXEld3kS14XjtwFC0,2505
|
|
42
|
-
llama_stack_api/common/training_types.py,sha256=47eJdnLGfFEesnzRLYr0wysolfql7jpGz7Uh8X-hEec,1468
|
|
43
|
-
llama_stack_api/common/type_system.py,sha256=hTfEKuCXU16X0dBNWbzydhAMgKpPVm6lMM6L28gc9gw,3374
|
|
44
|
-
llama_stack_api/datasets/__init__.py,sha256=Cy5e0m2kU0rCyRcizrEC60gP1BEdD65-XFBvcCEpRWo,1436
|
|
45
|
-
llama_stack_api/datasets/api.py,sha256=DRJAwf8ZYjwVcYoE0pbHZGDHnHsrQJQiVcljvE9qkLc,1046
|
|
46
|
-
llama_stack_api/datasets/fastapi_routes.py,sha256=_F_-nnXeYwo8c5nFAEw7z3b8WPhSnGN_Uy61Cxv1F9A,3096
|
|
47
|
-
llama_stack_api/datasets/models.py,sha256=-Pkz8nD7se10Z_JzSKuRRwY-vcwAwU6UhWSajwfem_U,4648
|
|
48
|
-
llama_stack_api/file_processors/__init__.py,sha256=s9H1EQdDPm5MAmZiZDQbAgY0XXsdo10Bw3WlDu390B4,766
|
|
49
|
-
llama_stack_api/file_processors/api.py,sha256=MxrxuEDjTaqEdMu5kxMuAwwaGZy3yiAFku7VtORdWjk,2775
|
|
50
|
-
llama_stack_api/file_processors/fastapi_routes.py,sha256=NT1D_goFVmtAXdurOjY2ctgi6aAr4nHtgplz2Nhg5cg,2925
|
|
51
|
-
llama_stack_api/file_processors/models.py,sha256=a6_evBoh3PEZVrxJ1lDkWKUy5bZkjCHbydiyMZB9E50,1366
|
|
52
|
-
llama_stack_api/files/__init__.py,sha256=7ncmkC_-3WKYu3FIseApV5w4ER7PHyG1M2E6pb2mduo,839
|
|
53
|
-
llama_stack_api/files/api.py,sha256=79tc1hRe78AE_QA_BdOfpNpjfYTzLVYg6h4dXNkKu3I,1258
|
|
54
|
-
llama_stack_api/files/fastapi_routes.py,sha256=-FadxkQZKXUlYSJtmfZCXCBExAG9HBHttT-j_i0d8Ig,4177
|
|
55
|
-
llama_stack_api/files/models.py,sha256=Uz-gPoMZSV8P7eVHdKSDGMTE-B3dFUdM3BXU9s0PdGY,4239
|
|
56
|
-
llama_stack_api/inspect_api/__init__.py,sha256=0jRDcUhEmVtXqK3BDX8I2qtcN0S4lFAAcLI-dMpGQ-w,861
|
|
57
|
-
llama_stack_api/inspect_api/api.py,sha256=XkdM7jJ3_UlEIE4woEVi5mO2O1aNn9_FPtb18NTnWSM,726
|
|
58
|
-
llama_stack_api/inspect_api/fastapi_routes.py,sha256=I7R8roy6einYDzrPN8wNjrRokpoSNZi9zrtmLHS1vDw,2575
|
|
59
|
-
llama_stack_api/inspect_api/models.py,sha256=EW69EHkOG8i0GS8KW8Kz6WaPZV74hzwad8dGXWrrKhs,683
|
|
60
|
-
llama_stack_api/internal/__init__.py,sha256=hZiF7mONpu54guvMUTW9XpfkETUO55u6hqYOYkz8Bt0,307
|
|
61
|
-
llama_stack_api/internal/kvstore.py,sha256=mgNJz6r8_ju3I3JT2Pz5fSX_9DLv_OupsS2NnJe3usY,833
|
|
62
|
-
llama_stack_api/internal/sqlstore.py,sha256=FBIQhG7VOVMMSTe24uMigfxEWXnarY0hzx9HjrNXVnI,2262
|
|
63
|
-
llama_stack_api/providers/__init__.py,sha256=a_187ghsdPNYJ5xLizqKYREJJLBa-lpcIhLp8spgsH8,841
|
|
64
|
-
llama_stack_api/providers/api.py,sha256=ytwxri9s6p8j9ClFKgN9mfa1TF0VZh1o8W5cVZR49rc,534
|
|
65
|
-
llama_stack_api/providers/fastapi_routes.py,sha256=jb1yrXEk1MdtcgWCToSZtaB-wjKqv5uVKIkvduXoKlM,1962
|
|
66
|
-
llama_stack_api/providers/models.py,sha256=nqBzh9je_dou35XFjYGD43hwKgjWy6HIRmGWUrcGqOw,653
|
|
67
|
-
llama_stack_api-0.4.4.dist-info/METADATA,sha256=5R8WVvwWnO9thYWQVhbDf-HXdn8ixAdAhYh4SLrY65g,4190
|
|
68
|
-
llama_stack_api-0.4.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
69
|
-
llama_stack_api-0.4.4.dist-info/top_level.txt,sha256=Ybn9EvE0q9jHHJ9K9d3kdYXLbof-4zEYS2IX7tjztgY,16
|
|
70
|
-
llama_stack_api-0.4.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|