llama-stack 0.4.1__py3-none-any.whl → 0.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack/cli/stack/run.py +3 -0
- llama_stack/core/stack.py +22 -10
- llama_stack/providers/inline/tool_runtime/rag/memory.py +8 -3
- llama_stack/providers/utils/memory/vector_store.py +9 -4
- {llama_stack-0.4.1.dist-info → llama_stack-0.4.2.dist-info}/METADATA +2 -2
- {llama_stack-0.4.1.dist-info → llama_stack-0.4.2.dist-info}/RECORD +77 -10
- llama_stack_api/llama_stack_api/__init__.py +945 -0
- llama_stack_api/llama_stack_api/admin/__init__.py +45 -0
- llama_stack_api/llama_stack_api/admin/api.py +72 -0
- llama_stack_api/llama_stack_api/admin/fastapi_routes.py +117 -0
- llama_stack_api/llama_stack_api/admin/models.py +113 -0
- llama_stack_api/llama_stack_api/agents.py +173 -0
- llama_stack_api/llama_stack_api/batches/__init__.py +40 -0
- llama_stack_api/llama_stack_api/batches/api.py +53 -0
- llama_stack_api/llama_stack_api/batches/fastapi_routes.py +113 -0
- llama_stack_api/llama_stack_api/batches/models.py +78 -0
- llama_stack_api/llama_stack_api/benchmarks/__init__.py +43 -0
- llama_stack_api/llama_stack_api/benchmarks/api.py +39 -0
- llama_stack_api/llama_stack_api/benchmarks/fastapi_routes.py +109 -0
- llama_stack_api/llama_stack_api/benchmarks/models.py +109 -0
- llama_stack_api/llama_stack_api/common/__init__.py +5 -0
- llama_stack_api/llama_stack_api/common/content_types.py +101 -0
- llama_stack_api/llama_stack_api/common/errors.py +95 -0
- llama_stack_api/llama_stack_api/common/job_types.py +38 -0
- llama_stack_api/llama_stack_api/common/responses.py +77 -0
- llama_stack_api/llama_stack_api/common/training_types.py +47 -0
- llama_stack_api/llama_stack_api/common/type_system.py +146 -0
- llama_stack_api/llama_stack_api/connectors.py +146 -0
- llama_stack_api/llama_stack_api/conversations.py +270 -0
- llama_stack_api/llama_stack_api/datasetio.py +55 -0
- llama_stack_api/llama_stack_api/datasets/__init__.py +61 -0
- llama_stack_api/llama_stack_api/datasets/api.py +35 -0
- llama_stack_api/llama_stack_api/datasets/fastapi_routes.py +104 -0
- llama_stack_api/llama_stack_api/datasets/models.py +152 -0
- llama_stack_api/llama_stack_api/datatypes.py +373 -0
- llama_stack_api/llama_stack_api/eval.py +137 -0
- llama_stack_api/llama_stack_api/file_processors/__init__.py +27 -0
- llama_stack_api/llama_stack_api/file_processors/api.py +64 -0
- llama_stack_api/llama_stack_api/file_processors/fastapi_routes.py +78 -0
- llama_stack_api/llama_stack_api/file_processors/models.py +42 -0
- llama_stack_api/llama_stack_api/files/__init__.py +35 -0
- llama_stack_api/llama_stack_api/files/api.py +51 -0
- llama_stack_api/llama_stack_api/files/fastapi_routes.py +124 -0
- llama_stack_api/llama_stack_api/files/models.py +107 -0
- llama_stack_api/llama_stack_api/inference.py +1169 -0
- llama_stack_api/llama_stack_api/inspect_api/__init__.py +37 -0
- llama_stack_api/llama_stack_api/inspect_api/api.py +25 -0
- llama_stack_api/llama_stack_api/inspect_api/fastapi_routes.py +76 -0
- llama_stack_api/llama_stack_api/inspect_api/models.py +28 -0
- llama_stack_api/llama_stack_api/internal/__init__.py +9 -0
- llama_stack_api/llama_stack_api/internal/kvstore.py +26 -0
- llama_stack_api/llama_stack_api/internal/sqlstore.py +79 -0
- llama_stack_api/llama_stack_api/models.py +171 -0
- llama_stack_api/llama_stack_api/openai_responses.py +1468 -0
- llama_stack_api/llama_stack_api/post_training.py +370 -0
- llama_stack_api/llama_stack_api/prompts.py +203 -0
- llama_stack_api/llama_stack_api/providers/__init__.py +33 -0
- llama_stack_api/llama_stack_api/providers/api.py +16 -0
- llama_stack_api/llama_stack_api/providers/fastapi_routes.py +57 -0
- llama_stack_api/llama_stack_api/providers/models.py +24 -0
- llama_stack_api/llama_stack_api/py.typed +0 -0
- llama_stack_api/llama_stack_api/rag_tool.py +168 -0
- llama_stack_api/llama_stack_api/resource.py +37 -0
- llama_stack_api/llama_stack_api/router_utils.py +160 -0
- llama_stack_api/llama_stack_api/safety.py +132 -0
- llama_stack_api/llama_stack_api/schema_utils.py +208 -0
- llama_stack_api/llama_stack_api/scoring.py +93 -0
- llama_stack_api/llama_stack_api/scoring_functions.py +211 -0
- llama_stack_api/llama_stack_api/shields.py +93 -0
- llama_stack_api/llama_stack_api/tools.py +226 -0
- llama_stack_api/llama_stack_api/vector_io.py +941 -0
- llama_stack_api/llama_stack_api/vector_stores.py +51 -0
- llama_stack_api/llama_stack_api/version.py +9 -0
- {llama_stack-0.4.1.dist-info → llama_stack-0.4.2.dist-info}/WHEEL +0 -0
- {llama_stack-0.4.1.dist-info → llama_stack-0.4.2.dist-info}/entry_points.txt +0 -0
- {llama_stack-0.4.1.dist-info → llama_stack-0.4.2.dist-info}/licenses/LICENSE +0 -0
- {llama_stack-0.4.1.dist-info → llama_stack-0.4.2.dist-info}/top_level.txt +0 -0
llama_stack/cli/stack/run.py
CHANGED
|
@@ -202,6 +202,9 @@ class StackRun(Subcommand):
|
|
|
202
202
|
# Set the config file in environment so create_app can find it
|
|
203
203
|
os.environ["LLAMA_STACK_CONFIG"] = str(config_file)
|
|
204
204
|
|
|
205
|
+
# disable together banner that spams llama stack run every time
|
|
206
|
+
os.environ["TOGETHER_NO_BANNER"] = "1"
|
|
207
|
+
|
|
205
208
|
uvicorn_config = {
|
|
206
209
|
"factory": True,
|
|
207
210
|
"host": host,
|
llama_stack/core/stack.py
CHANGED
|
@@ -53,6 +53,7 @@ from llama_stack_api import (
|
|
|
53
53
|
PostTraining,
|
|
54
54
|
Prompts,
|
|
55
55
|
Providers,
|
|
56
|
+
RegisterBenchmarkRequest,
|
|
56
57
|
Safety,
|
|
57
58
|
Scoring,
|
|
58
59
|
ScoringFunctions,
|
|
@@ -61,6 +62,7 @@ from llama_stack_api import (
|
|
|
61
62
|
ToolRuntime,
|
|
62
63
|
VectorIO,
|
|
63
64
|
)
|
|
65
|
+
from llama_stack_api.datasets import RegisterDatasetRequest
|
|
64
66
|
|
|
65
67
|
logger = get_logger(name=__name__, category="core")
|
|
66
68
|
|
|
@@ -91,18 +93,21 @@ class LlamaStack(
|
|
|
91
93
|
pass
|
|
92
94
|
|
|
93
95
|
|
|
96
|
+
# Resources to register based on configuration.
|
|
97
|
+
# If a request class is specified, the configuration object will be converted to this class before invoking the registration method.
|
|
94
98
|
RESOURCES = [
|
|
95
|
-
("models", Api.models, "register_model", "list_models"),
|
|
96
|
-
("shields", Api.shields, "register_shield", "list_shields"),
|
|
97
|
-
("datasets", Api.datasets, "register_dataset", "list_datasets"),
|
|
99
|
+
("models", Api.models, "register_model", "list_models", None),
|
|
100
|
+
("shields", Api.shields, "register_shield", "list_shields", None),
|
|
101
|
+
("datasets", Api.datasets, "register_dataset", "list_datasets", RegisterDatasetRequest),
|
|
98
102
|
(
|
|
99
103
|
"scoring_fns",
|
|
100
104
|
Api.scoring_functions,
|
|
101
105
|
"register_scoring_function",
|
|
102
106
|
"list_scoring_functions",
|
|
107
|
+
None,
|
|
103
108
|
),
|
|
104
|
-
("benchmarks", Api.benchmarks, "register_benchmark", "list_benchmarks"),
|
|
105
|
-
("tool_groups", Api.tool_groups, "register_tool_group", "list_tool_groups"),
|
|
109
|
+
("benchmarks", Api.benchmarks, "register_benchmark", "list_benchmarks", RegisterBenchmarkRequest),
|
|
110
|
+
("tool_groups", Api.tool_groups, "register_tool_group", "list_tool_groups", None),
|
|
106
111
|
]
|
|
107
112
|
|
|
108
113
|
|
|
@@ -199,7 +204,7 @@ async def invoke_with_optional_request(method: Any) -> Any:
|
|
|
199
204
|
|
|
200
205
|
|
|
201
206
|
async def register_resources(run_config: StackConfig, impls: dict[Api, Any]):
|
|
202
|
-
for rsrc, api, register_method, list_method in RESOURCES:
|
|
207
|
+
for rsrc, api, register_method, list_method, request_class in RESOURCES:
|
|
203
208
|
objects = getattr(run_config.registered_resources, rsrc)
|
|
204
209
|
if api not in impls:
|
|
205
210
|
continue
|
|
@@ -213,10 +218,17 @@ async def register_resources(run_config: StackConfig, impls: dict[Api, Any]):
|
|
|
213
218
|
continue
|
|
214
219
|
logger.debug(f"registering {rsrc.capitalize()} {obj} for provider {obj.provider_id}")
|
|
215
220
|
|
|
216
|
-
#
|
|
217
|
-
#
|
|
218
|
-
|
|
219
|
-
|
|
221
|
+
# TODO: Once all register methods are migrated to accept request objects,
|
|
222
|
+
# remove this conditional and always use the request_class pattern.
|
|
223
|
+
if request_class is not None:
|
|
224
|
+
request = request_class(**obj.model_dump())
|
|
225
|
+
await method(request)
|
|
226
|
+
else:
|
|
227
|
+
# we want to maintain the type information in arguments to method.
|
|
228
|
+
# instead of method(**obj.model_dump()), which may convert a typed attr to a dict,
|
|
229
|
+
# we use model_dump() to find all the attrs and then getattr to get the still typed
|
|
230
|
+
# value.
|
|
231
|
+
await method(**{k: getattr(obj, k) for k in obj.model_dump().keys()})
|
|
220
232
|
|
|
221
233
|
method = getattr(impls[api], list_method)
|
|
222
234
|
response = await invoke_with_optional_request(method)
|
|
@@ -50,8 +50,11 @@ log = get_logger(name=__name__, category="tool_runtime")
|
|
|
50
50
|
async def raw_data_from_doc(doc: RAGDocument) -> tuple[bytes, str]:
|
|
51
51
|
"""Get raw binary data and mime type from a RAGDocument for file upload."""
|
|
52
52
|
if isinstance(doc.content, URL):
|
|
53
|
-
|
|
54
|
-
|
|
53
|
+
uri = doc.content.uri
|
|
54
|
+
if uri.startswith("file://"):
|
|
55
|
+
raise ValueError("file:// URIs are not supported. Please use the Files API (/v1/files) to upload files.")
|
|
56
|
+
if uri.startswith("data:"):
|
|
57
|
+
parts = parse_data_url(uri)
|
|
55
58
|
mime_type = parts["mimetype"]
|
|
56
59
|
data = parts["data"]
|
|
57
60
|
|
|
@@ -63,7 +66,7 @@ async def raw_data_from_doc(doc: RAGDocument) -> tuple[bytes, str]:
|
|
|
63
66
|
return file_data, mime_type
|
|
64
67
|
else:
|
|
65
68
|
async with httpx.AsyncClient() as client:
|
|
66
|
-
r = await client.get(
|
|
69
|
+
r = await client.get(uri)
|
|
67
70
|
r.raise_for_status()
|
|
68
71
|
mime_type = r.headers.get("content-type", "application/octet-stream")
|
|
69
72
|
return r.content, mime_type
|
|
@@ -73,6 +76,8 @@ async def raw_data_from_doc(doc: RAGDocument) -> tuple[bytes, str]:
|
|
|
73
76
|
else:
|
|
74
77
|
content_str = interleaved_content_as_str(doc.content)
|
|
75
78
|
|
|
79
|
+
if content_str.startswith("file://"):
|
|
80
|
+
raise ValueError("file:// URIs are not supported. Please use the Files API (/v1/files) to upload files.")
|
|
76
81
|
if content_str.startswith("data:"):
|
|
77
82
|
parts = parse_data_url(content_str)
|
|
78
83
|
mime_type = parts["mimetype"]
|
|
@@ -135,15 +135,20 @@ def content_from_data_and_mime_type(data: bytes | str, mime_type: str | None, en
|
|
|
135
135
|
|
|
136
136
|
async def content_from_doc(doc: RAGDocument) -> str:
|
|
137
137
|
if isinstance(doc.content, URL):
|
|
138
|
-
|
|
139
|
-
|
|
138
|
+
uri = doc.content.uri
|
|
139
|
+
if uri.startswith("file://"):
|
|
140
|
+
raise ValueError("file:// URIs are not supported. Please use the Files API (/v1/files) to upload files.")
|
|
141
|
+
if uri.startswith("data:"):
|
|
142
|
+
return content_from_data(uri)
|
|
140
143
|
async with httpx.AsyncClient() as client:
|
|
141
|
-
r = await client.get(
|
|
144
|
+
r = await client.get(uri)
|
|
142
145
|
if doc.mime_type == "application/pdf":
|
|
143
146
|
return parse_pdf(r.content)
|
|
144
147
|
return r.text
|
|
145
148
|
elif isinstance(doc.content, str):
|
|
146
|
-
|
|
149
|
+
if doc.content.startswith("file://"):
|
|
150
|
+
raise ValueError("file:// URIs are not supported. Please use the Files API (/v1/files) to upload files.")
|
|
151
|
+
pattern = re.compile("^(https?://|data:)")
|
|
147
152
|
if pattern.match(doc.content):
|
|
148
153
|
if doc.content.startswith("data:"):
|
|
149
154
|
return content_from_data(doc.content)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: llama_stack
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.2
|
|
4
4
|
Summary: Llama Stack
|
|
5
5
|
Author-email: Meta Llama <llama-oss@meta.com>
|
|
6
6
|
License: MIT
|
|
@@ -46,7 +46,7 @@ Requires-Dist: psycopg2-binary
|
|
|
46
46
|
Requires-Dist: tornado>=6.5.3
|
|
47
47
|
Requires-Dist: urllib3>=2.6.3
|
|
48
48
|
Provides-Extra: client
|
|
49
|
-
Requires-Dist: llama-stack-client==0.4.
|
|
49
|
+
Requires-Dist: llama-stack-client==0.4.2; extra == "client"
|
|
50
50
|
Dynamic: license-file
|
|
51
51
|
|
|
52
52
|
# Llama Stack
|
|
@@ -16,7 +16,7 @@ llama_stack/cli/stack/list_deps.py,sha256=gSpvvMZ2UnmfYxwyFg4AXr_q2EGmXBtU0bcwrP
|
|
|
16
16
|
llama_stack/cli/stack/list_providers.py,sha256=-NMpsZygUUWsWNZ02rFa9XtIZCBBRIYHk6fbDl6K-kc,2471
|
|
17
17
|
llama_stack/cli/stack/list_stacks.py,sha256=ju1LaQ3aUlOsMNUBnEPg_Q6yb_cIvGWq_ratjuQ7Mjg,3146
|
|
18
18
|
llama_stack/cli/stack/remove.py,sha256=QXDQxNKWQ9kfBd5guJn7NdiPajOS-FXr2HFZHcLX124,3923
|
|
19
|
-
llama_stack/cli/stack/run.py,sha256=
|
|
19
|
+
llama_stack/cli/stack/run.py,sha256=E3R8jNxLde1WuFsHS07ZyExkpSzXXhwUuYLHeN2tvz8,14020
|
|
20
20
|
llama_stack/cli/stack/stack.py,sha256=i-PSWXVcfNCJMoWsvVYZf2ny6QS79Tt4FDU39TSEwxI,1622
|
|
21
21
|
llama_stack/cli/stack/utils.py,sha256=kk9QWM7bzxZ03VsPtD4DkQ1o9_D3tvozjhfezcpqKog,716
|
|
22
22
|
llama_stack/core/__init__.py,sha256=vUvqRS2CXhASaFzYVspRYa5q8usSCzjKUlZhzNLuiKg,200
|
|
@@ -34,7 +34,7 @@ llama_stack/core/library_client.py,sha256=V5f7apz0heD5DyExwNXiEN0E5xGyQh279BeuVS
|
|
|
34
34
|
llama_stack/core/providers.py,sha256=EblMlsWJKGHsXCTmVo-doCJ64JEpBy7-2DoupFkaTUo,5134
|
|
35
35
|
llama_stack/core/request_headers.py,sha256=tUt-RvzUrl7yxbYKBe7nN5YBCgWxShz4cemLvl7XGxc,3692
|
|
36
36
|
llama_stack/core/resolver.py,sha256=IRPPwi60uAe5mlj-NjAR41laP9Dp1WvAI3A-bTMB-mk,19383
|
|
37
|
-
llama_stack/core/stack.py,sha256=
|
|
37
|
+
llama_stack/core/stack.py,sha256=YWk2opmFtsYmyEvjrUzDGhLkGV3SNN-omr_eVZKsS-8,27944
|
|
38
38
|
llama_stack/core/start_stack.sh,sha256=3snlFzur13NS1_UnJQ6t8zK7R5DCRFJKJrz9YTJmWVA,2834
|
|
39
39
|
llama_stack/core/testing_context.py,sha256=TIWetol6Sb2BSiqkq5X0knb0chG03GSpmjByFwVfY60,1438
|
|
40
40
|
llama_stack/core/access_control/__init__.py,sha256=vUvqRS2CXhASaFzYVspRYa5q8usSCzjKUlZhzNLuiKg,200
|
|
@@ -311,7 +311,7 @@ llama_stack/providers/inline/tool_runtime/__init__.py,sha256=vUvqRS2CXhASaFzYVsp
|
|
|
311
311
|
llama_stack/providers/inline/tool_runtime/rag/__init__.py,sha256=ncRDYgjaKgn2Y-59b0_x8_LoJERzVvZVU17j7HDGFgY,573
|
|
312
312
|
llama_stack/providers/inline/tool_runtime/rag/config.py,sha256=zG0e0iXFwBoQHALvEjJmBJsRbLCKQZGOz0KcH4572dg,673
|
|
313
313
|
llama_stack/providers/inline/tool_runtime/rag/context_retriever.py,sha256=QczqRA8OLXijkOZslIaRWBZPOJxJRJYKHI5bePrfMTY,2299
|
|
314
|
-
llama_stack/providers/inline/tool_runtime/rag/memory.py,sha256=
|
|
314
|
+
llama_stack/providers/inline/tool_runtime/rag/memory.py,sha256=nXQiKvk37IAtGsl13-pnJjcO2Z1Fe20JGo9Pdgs91I8,13239
|
|
315
315
|
llama_stack/providers/inline/vector_io/__init__.py,sha256=vUvqRS2CXhASaFzYVspRYa5q8usSCzjKUlZhzNLuiKg,200
|
|
316
316
|
llama_stack/providers/inline/vector_io/chroma/__init__.py,sha256=gWJ-VCpFHyqmZopOMyhO_8jWea3cbYQdsRsxEWcuMhE,601
|
|
317
317
|
llama_stack/providers/inline/vector_io/chroma/config.py,sha256=T3dM9KqN280F9McGoIEonzfoLl3cTnJxUwH4nLq14no,925
|
|
@@ -496,7 +496,7 @@ llama_stack/providers/utils/inference/stream_utils.py,sha256=WdM3SPMh9xfOAcpd67_
|
|
|
496
496
|
llama_stack/providers/utils/memory/__init__.py,sha256=pA4yikPZUO-A0K2nscz5tEp1yYSBtvglbgC5pe-FGKE,214
|
|
497
497
|
llama_stack/providers/utils/memory/file_utils.py,sha256=MsjispuPO0cMXmRqAoTJ-dwM9uzgYn4aiRFBM-aHP9w,712
|
|
498
498
|
llama_stack/providers/utils/memory/openai_vector_store_mixin.py,sha256=8nq_Nj_pLpznlf7YbsZAHeSucH1FaHD3IILfNoquwpo,58620
|
|
499
|
-
llama_stack/providers/utils/memory/vector_store.py,sha256=
|
|
499
|
+
llama_stack/providers/utils/memory/vector_store.py,sha256=HiNCtZ4OzvPk4RGuRNks7CnbrDoppYyrJdjST-emnZQ,11941
|
|
500
500
|
llama_stack/providers/utils/responses/__init__.py,sha256=vUvqRS2CXhASaFzYVspRYa5q8usSCzjKUlZhzNLuiKg,200
|
|
501
501
|
llama_stack/providers/utils/responses/responses_store.py,sha256=4ziPwlqxMS-mrlB2rL2M8LU9sYewmInH9zN5WPHK00U,10397
|
|
502
502
|
llama_stack/providers/utils/scoring/__init__.py,sha256=vUvqRS2CXhASaFzYVspRYa5q8usSCzjKUlZhzNLuiKg,200
|
|
@@ -513,7 +513,7 @@ llama_stack/telemetry/constants.py,sha256=LtXE61xwNL3cBYZXKcXcbwD_Uh1jazP3V8a0od
|
|
|
513
513
|
llama_stack/telemetry/helpers.py,sha256=7uarMIHL5ngOUXQZxkH96corFxE7Jk5JaizRQ8Z8Ok0,1694
|
|
514
514
|
llama_stack/testing/__init__.py,sha256=vUvqRS2CXhASaFzYVspRYa5q8usSCzjKUlZhzNLuiKg,200
|
|
515
515
|
llama_stack/testing/api_recorder.py,sha256=oGGTrzzBYNNvOIcvcFZenNPthr0yziJ7hlGPtckx460,39240
|
|
516
|
-
llama_stack-0.4.
|
|
516
|
+
llama_stack-0.4.2.dist-info/licenses/LICENSE,sha256=42g1gBn9gHYdBt5e6e1aFYhnc-JT9trU9qBD84oUAlY,1087
|
|
517
517
|
llama_stack_api/__init__.py,sha256=5XNQGpundjXTutLgnYp6B1t6KITWXH_of626GciNma4,28103
|
|
518
518
|
llama_stack_api/agents.py,sha256=u0sg3AoWCip5o8T4DMTM8uqP3BsdbkKbor3PmxKTg0g,7143
|
|
519
519
|
llama_stack_api/connectors.py,sha256=PcAwndbVQC6pm5HGSlNprqYFTZzhCM7SYHPyRkSIoaQ,4644
|
|
@@ -577,12 +577,79 @@ llama_stack_api/inspect_api/models.py,sha256=EW69EHkOG8i0GS8KW8Kz6WaPZV74hzwad8d
|
|
|
577
577
|
llama_stack_api/internal/__init__.py,sha256=hZiF7mONpu54guvMUTW9XpfkETUO55u6hqYOYkz8Bt0,307
|
|
578
578
|
llama_stack_api/internal/kvstore.py,sha256=J_lFhhlFcg9uCyn6J758qWSbMIW5nvcfvB66kkitF8g,790
|
|
579
579
|
llama_stack_api/internal/sqlstore.py,sha256=IMOmHiNpxrjqvYNmcsdxbGDUdnMvviFo8AlmT9P27IQ,2219
|
|
580
|
+
llama_stack_api/llama_stack_api/__init__.py,sha256=5XNQGpundjXTutLgnYp6B1t6KITWXH_of626GciNma4,28103
|
|
581
|
+
llama_stack_api/llama_stack_api/agents.py,sha256=u0sg3AoWCip5o8T4DMTM8uqP3BsdbkKbor3PmxKTg0g,7143
|
|
582
|
+
llama_stack_api/llama_stack_api/connectors.py,sha256=PcAwndbVQC6pm5HGSlNprqYFTZzhCM7SYHPyRkSIoaQ,4644
|
|
583
|
+
llama_stack_api/llama_stack_api/conversations.py,sha256=pLQD2ZT6rSWF2IIQUtdSvkq50w9-piCMVr9hgdMmlBw,10290
|
|
584
|
+
llama_stack_api/llama_stack_api/datasetio.py,sha256=n4wQRv01rl8K_Ig2_Ln5hZBfdbmptKtDGsNE1igJ1-E,2075
|
|
585
|
+
llama_stack_api/llama_stack_api/datatypes.py,sha256=S7qOix_CBofuCEU6Gmm9qogZnnIO-WlN1kfO3D4Xlnc,12590
|
|
586
|
+
llama_stack_api/llama_stack_api/eval.py,sha256=PjgrSNk_Q8MmnN5hGKr1mMHTdrouuCItVJko32vxT6M,5095
|
|
587
|
+
llama_stack_api/llama_stack_api/inference.py,sha256=-Zy6F6R6NWI9Wq7acxz84K5C5RfOD9c2ytn8MrlK76s,41432
|
|
588
|
+
llama_stack_api/llama_stack_api/models.py,sha256=6RLvp94GDNBcMYya06SefIF6whIqAmm0Igsp1MoqLLA,5206
|
|
589
|
+
llama_stack_api/llama_stack_api/openai_responses.py,sha256=IslBagXUoebtBCYKATr9w7YR72GBjM7gYLNBPGDST4E,53967
|
|
590
|
+
llama_stack_api/llama_stack_api/post_training.py,sha256=94C4xbjG7Y9w7TRAcfXPOR1Um11QQ7KopU5y1lwCiX4,12991
|
|
591
|
+
llama_stack_api/llama_stack_api/prompts.py,sha256=D7wa6wZB4LslUGgIQSUezFtYap16qjQ-d33-6SUzTaw,7063
|
|
592
|
+
llama_stack_api/llama_stack_api/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
593
|
+
llama_stack_api/llama_stack_api/rag_tool.py,sha256=EtfHzPaGjxutdbJ3Ymx6QLtzBNHfCM6W6UGZ9TaV7UU,5695
|
|
594
|
+
llama_stack_api/llama_stack_api/resource.py,sha256=WDLMV9yeHYwSwxJSt-x-bWEMJU3Dgrl0fwzufTZuyWE,1088
|
|
595
|
+
llama_stack_api/llama_stack_api/router_utils.py,sha256=ylbRZ16gylyFCCHvS-B9cFpl9E1yRsYL8YlsuIFGP8Y,6949
|
|
596
|
+
llama_stack_api/llama_stack_api/safety.py,sha256=JXz6gwcl0YlKBMgkAVg89Atq0AtugvubRaQomAHmTzM,4319
|
|
597
|
+
llama_stack_api/llama_stack_api/schema_utils.py,sha256=YThcm7VlaQdkpOxNvIkn51FfGRlvdVt1TiV-KVBKkyA,7661
|
|
598
|
+
llama_stack_api/llama_stack_api/scoring.py,sha256=ejVkQbmeBBtbBuy8Xgg-b4aHFe6l8zwYnr5R7GV5gn0,2867
|
|
599
|
+
llama_stack_api/llama_stack_api/scoring_functions.py,sha256=0lP_ZENUh12i12ibg-_XNNPKLHi_TvB8H5LyEtBLhSE,7789
|
|
600
|
+
llama_stack_api/llama_stack_api/shields.py,sha256=9dNMyTVL0xcR8_BXCHb_zuAJC7Cz8pX8htRwW2-EDSw,2823
|
|
601
|
+
llama_stack_api/llama_stack_api/tools.py,sha256=eCyZx806VfpBJgsuJF9R3urA8ljF3g0kLapNpx9YRzY,7518
|
|
602
|
+
llama_stack_api/llama_stack_api/vector_io.py,sha256=3tYy8xLhVvx_rMtfi5Pxv0GwTMm1TfMYwq82tFqRz1U,36517
|
|
603
|
+
llama_stack_api/llama_stack_api/vector_stores.py,sha256=DMkwPSg05VJOvBJrVlwFU6EHBQEmarADhIzzgt1jjwE,1709
|
|
604
|
+
llama_stack_api/llama_stack_api/version.py,sha256=V3jdW3iFPdfOt4jWzJA-di7v0zHLYsn11hNtRzkY7uQ,297
|
|
605
|
+
llama_stack_api/llama_stack_api/admin/__init__.py,sha256=VnJn9fbk-dFkRrm1P5UWlAOcZDA2jf6dx9W5nt-WgOY,1049
|
|
606
|
+
llama_stack_api/llama_stack_api/admin/api.py,sha256=m14f4iBUJf-G0qITj66o-TFKCSUiD9U12XRnZ1Slr_w,1961
|
|
607
|
+
llama_stack_api/llama_stack_api/admin/fastapi_routes.py,sha256=3CPWhB86UMlYl3pQ0ZkbF5FLIKIzG2I61esXavoGEjY,3739
|
|
608
|
+
llama_stack_api/llama_stack_api/admin/models.py,sha256=aoDiI1mtM_XemHwqRFFwiVD64LbenXiYU-QK52IJDQU,3932
|
|
609
|
+
llama_stack_api/llama_stack_api/batches/__init__.py,sha256=vnHvv8mzJnFlCGa3V-lTiC0k2mVPDLOGZTqgUDovwKg,999
|
|
610
|
+
llama_stack_api/llama_stack_api/batches/api.py,sha256=49aBQJPOB-x6ohKVWwJ7SORmfm9QSsWak7OBE6L0cMM,1416
|
|
611
|
+
llama_stack_api/llama_stack_api/batches/fastapi_routes.py,sha256=1b0eSB2Wb2K6gvrhpBFTexsOpxuVU1urgfIOnwxx1fc,3864
|
|
612
|
+
llama_stack_api/llama_stack_api/batches/models.py,sha256=Dv9cHaaCqaLi_g5wIkKoy-Mn282Gqh711G2swb5ufGM,2692
|
|
613
|
+
llama_stack_api/llama_stack_api/benchmarks/__init__.py,sha256=_1Vs5xcZb179BxRjTv7OUVL1yvUdzWL92Bsx1aXYMSU,1119
|
|
614
|
+
llama_stack_api/llama_stack_api/benchmarks/api.py,sha256=j0zaRRBCdJS0XSq5dMthK1nUhiRqzPsJaHkQF61KlFc,933
|
|
615
|
+
llama_stack_api/llama_stack_api/benchmarks/fastapi_routes.py,sha256=_ZQ74esmXQRC8dbCb3vypT4z-7KdNoouDDH5T2LmD_c,3560
|
|
616
|
+
llama_stack_api/llama_stack_api/benchmarks/models.py,sha256=h5fWO3KUTnoFzLeIB_lYEVtcgw3D53Rx44WPHE0M7is,3644
|
|
617
|
+
llama_stack_api/llama_stack_api/common/__init__.py,sha256=vUvqRS2CXhASaFzYVspRYa5q8usSCzjKUlZhzNLuiKg,200
|
|
618
|
+
llama_stack_api/llama_stack_api/common/content_types.py,sha256=lwc4VlPKWpRSTBO_U1MHdyItmQUzyNAqoaV9g3wKzF4,2693
|
|
619
|
+
llama_stack_api/llama_stack_api/common/errors.py,sha256=zrOjWerYj5BweLoyoqAbc3HGVSiaXLt10sw6TIJHnZ8,3725
|
|
620
|
+
llama_stack_api/llama_stack_api/common/job_types.py,sha256=1ifNdcNPqWPWw64R58zkhAnVWCj7oYg3utImbvf4NIc,1031
|
|
621
|
+
llama_stack_api/llama_stack_api/common/responses.py,sha256=qhwUdKKYzIhnlPPIah36rN3vVgMXEld3kS14XjtwFC0,2505
|
|
622
|
+
llama_stack_api/llama_stack_api/common/training_types.py,sha256=47eJdnLGfFEesnzRLYr0wysolfql7jpGz7Uh8X-hEec,1468
|
|
623
|
+
llama_stack_api/llama_stack_api/common/type_system.py,sha256=hTfEKuCXU16X0dBNWbzydhAMgKpPVm6lMM6L28gc9gw,3374
|
|
624
|
+
llama_stack_api/llama_stack_api/datasets/__init__.py,sha256=Cy5e0m2kU0rCyRcizrEC60gP1BEdD65-XFBvcCEpRWo,1436
|
|
625
|
+
llama_stack_api/llama_stack_api/datasets/api.py,sha256=DRJAwf8ZYjwVcYoE0pbHZGDHnHsrQJQiVcljvE9qkLc,1046
|
|
626
|
+
llama_stack_api/llama_stack_api/datasets/fastapi_routes.py,sha256=_F_-nnXeYwo8c5nFAEw7z3b8WPhSnGN_Uy61Cxv1F9A,3096
|
|
627
|
+
llama_stack_api/llama_stack_api/datasets/models.py,sha256=-Pkz8nD7se10Z_JzSKuRRwY-vcwAwU6UhWSajwfem_U,4648
|
|
628
|
+
llama_stack_api/llama_stack_api/file_processors/__init__.py,sha256=s9H1EQdDPm5MAmZiZDQbAgY0XXsdo10Bw3WlDu390B4,766
|
|
629
|
+
llama_stack_api/llama_stack_api/file_processors/api.py,sha256=MxrxuEDjTaqEdMu5kxMuAwwaGZy3yiAFku7VtORdWjk,2775
|
|
630
|
+
llama_stack_api/llama_stack_api/file_processors/fastapi_routes.py,sha256=NT1D_goFVmtAXdurOjY2ctgi6aAr4nHtgplz2Nhg5cg,2925
|
|
631
|
+
llama_stack_api/llama_stack_api/file_processors/models.py,sha256=a6_evBoh3PEZVrxJ1lDkWKUy5bZkjCHbydiyMZB9E50,1366
|
|
632
|
+
llama_stack_api/llama_stack_api/files/__init__.py,sha256=7ncmkC_-3WKYu3FIseApV5w4ER7PHyG1M2E6pb2mduo,839
|
|
633
|
+
llama_stack_api/llama_stack_api/files/api.py,sha256=79tc1hRe78AE_QA_BdOfpNpjfYTzLVYg6h4dXNkKu3I,1258
|
|
634
|
+
llama_stack_api/llama_stack_api/files/fastapi_routes.py,sha256=-FadxkQZKXUlYSJtmfZCXCBExAG9HBHttT-j_i0d8Ig,4177
|
|
635
|
+
llama_stack_api/llama_stack_api/files/models.py,sha256=Uz-gPoMZSV8P7eVHdKSDGMTE-B3dFUdM3BXU9s0PdGY,4239
|
|
636
|
+
llama_stack_api/llama_stack_api/inspect_api/__init__.py,sha256=0jRDcUhEmVtXqK3BDX8I2qtcN0S4lFAAcLI-dMpGQ-w,861
|
|
637
|
+
llama_stack_api/llama_stack_api/inspect_api/api.py,sha256=XkdM7jJ3_UlEIE4woEVi5mO2O1aNn9_FPtb18NTnWSM,726
|
|
638
|
+
llama_stack_api/llama_stack_api/inspect_api/fastapi_routes.py,sha256=I7R8roy6einYDzrPN8wNjrRokpoSNZi9zrtmLHS1vDw,2575
|
|
639
|
+
llama_stack_api/llama_stack_api/inspect_api/models.py,sha256=EW69EHkOG8i0GS8KW8Kz6WaPZV74hzwad8dGXWrrKhs,683
|
|
640
|
+
llama_stack_api/llama_stack_api/internal/__init__.py,sha256=hZiF7mONpu54guvMUTW9XpfkETUO55u6hqYOYkz8Bt0,307
|
|
641
|
+
llama_stack_api/llama_stack_api/internal/kvstore.py,sha256=J_lFhhlFcg9uCyn6J758qWSbMIW5nvcfvB66kkitF8g,790
|
|
642
|
+
llama_stack_api/llama_stack_api/internal/sqlstore.py,sha256=IMOmHiNpxrjqvYNmcsdxbGDUdnMvviFo8AlmT9P27IQ,2219
|
|
643
|
+
llama_stack_api/llama_stack_api/providers/__init__.py,sha256=a_187ghsdPNYJ5xLizqKYREJJLBa-lpcIhLp8spgsH8,841
|
|
644
|
+
llama_stack_api/llama_stack_api/providers/api.py,sha256=ytwxri9s6p8j9ClFKgN9mfa1TF0VZh1o8W5cVZR49rc,534
|
|
645
|
+
llama_stack_api/llama_stack_api/providers/fastapi_routes.py,sha256=jb1yrXEk1MdtcgWCToSZtaB-wjKqv5uVKIkvduXoKlM,1962
|
|
646
|
+
llama_stack_api/llama_stack_api/providers/models.py,sha256=nqBzh9je_dou35XFjYGD43hwKgjWy6HIRmGWUrcGqOw,653
|
|
580
647
|
llama_stack_api/providers/__init__.py,sha256=a_187ghsdPNYJ5xLizqKYREJJLBa-lpcIhLp8spgsH8,841
|
|
581
648
|
llama_stack_api/providers/api.py,sha256=ytwxri9s6p8j9ClFKgN9mfa1TF0VZh1o8W5cVZR49rc,534
|
|
582
649
|
llama_stack_api/providers/fastapi_routes.py,sha256=jb1yrXEk1MdtcgWCToSZtaB-wjKqv5uVKIkvduXoKlM,1962
|
|
583
650
|
llama_stack_api/providers/models.py,sha256=nqBzh9je_dou35XFjYGD43hwKgjWy6HIRmGWUrcGqOw,653
|
|
584
|
-
llama_stack-0.4.
|
|
585
|
-
llama_stack-0.4.
|
|
586
|
-
llama_stack-0.4.
|
|
587
|
-
llama_stack-0.4.
|
|
588
|
-
llama_stack-0.4.
|
|
651
|
+
llama_stack-0.4.2.dist-info/METADATA,sha256=otXCJCHPmU0tTV9dG5BsYWmxgbWqrlfeicQgZx5OlCc,12464
|
|
652
|
+
llama_stack-0.4.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
653
|
+
llama_stack-0.4.2.dist-info/entry_points.txt,sha256=E5xoyAM9064aW_y96eSSwZCNT_ANctrvrhLMJnMQlw0,141
|
|
654
|
+
llama_stack-0.4.2.dist-info/top_level.txt,sha256=pyNYneZU5w62BaExic-GC1ph5kk8JI2mJFwzqiZy2cU,28
|
|
655
|
+
llama_stack-0.4.2.dist-info/RECORD,,
|