llama-stack 0.4.3__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (166) hide show
  1. llama_stack/distributions/dell/doc_template.md +209 -0
  2. llama_stack/distributions/meta-reference-gpu/doc_template.md +119 -0
  3. llama_stack/distributions/nvidia/doc_template.md +170 -0
  4. llama_stack/distributions/oci/doc_template.md +140 -0
  5. llama_stack/models/llama/llama3/dog.jpg +0 -0
  6. llama_stack/models/llama/llama3/pasta.jpeg +0 -0
  7. llama_stack/models/llama/resources/dog.jpg +0 -0
  8. llama_stack/models/llama/resources/pasta.jpeg +0 -0
  9. llama_stack/models/llama/resources/small_dog.jpg +0 -0
  10. llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +136 -11
  11. llama_stack/providers/inline/ios/inference/LocalInferenceImpl/LocalInference.h +9 -0
  12. llama_stack/providers/inline/ios/inference/LocalInferenceImpl/LocalInference.swift +189 -0
  13. llama_stack/providers/inline/ios/inference/LocalInferenceImpl/Parsing.swift +238 -0
  14. llama_stack/providers/inline/ios/inference/LocalInferenceImpl/PromptTemplate.swift +12 -0
  15. llama_stack/providers/inline/ios/inference/LocalInferenceImpl/SystemPrompts.swift +89 -0
  16. llama_stack/providers/inline/ios/inference/LocalInferenceImpl.xcodeproj/project.pbxproj +550 -0
  17. llama_stack/providers/inline/ios/inference/LocalInferenceImpl.xcodeproj/project.xcworkspace/contents.xcworkspacedata +7 -0
  18. llama_stack/providers/inline/ios/inference/LocalInferenceImpl.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +8 -0
  19. llama_stack/providers/remote/datasetio/nvidia/README.md +74 -0
  20. llama_stack/providers/remote/eval/nvidia/README.md +134 -0
  21. llama_stack/providers/remote/files/s3/README.md +266 -0
  22. llama_stack/providers/remote/inference/nvidia/NVIDIA.md +203 -0
  23. llama_stack/providers/remote/post_training/nvidia/README.md +151 -0
  24. llama_stack/providers/remote/safety/nvidia/README.md +78 -0
  25. llama_stack/providers/utils/responses/responses_store.py +34 -0
  26. {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/METADATA +2 -2
  27. {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/RECORD +31 -142
  28. llama_stack-0.4.4.dist-info/top_level.txt +1 -0
  29. llama_stack-0.4.3.dist-info/top_level.txt +0 -2
  30. llama_stack_api/__init__.py +0 -945
  31. llama_stack_api/admin/__init__.py +0 -45
  32. llama_stack_api/admin/api.py +0 -72
  33. llama_stack_api/admin/fastapi_routes.py +0 -117
  34. llama_stack_api/admin/models.py +0 -113
  35. llama_stack_api/agents.py +0 -173
  36. llama_stack_api/batches/__init__.py +0 -40
  37. llama_stack_api/batches/api.py +0 -53
  38. llama_stack_api/batches/fastapi_routes.py +0 -113
  39. llama_stack_api/batches/models.py +0 -78
  40. llama_stack_api/benchmarks/__init__.py +0 -43
  41. llama_stack_api/benchmarks/api.py +0 -39
  42. llama_stack_api/benchmarks/fastapi_routes.py +0 -109
  43. llama_stack_api/benchmarks/models.py +0 -109
  44. llama_stack_api/common/__init__.py +0 -5
  45. llama_stack_api/common/content_types.py +0 -101
  46. llama_stack_api/common/errors.py +0 -95
  47. llama_stack_api/common/job_types.py +0 -38
  48. llama_stack_api/common/responses.py +0 -77
  49. llama_stack_api/common/training_types.py +0 -47
  50. llama_stack_api/common/type_system.py +0 -146
  51. llama_stack_api/connectors.py +0 -146
  52. llama_stack_api/conversations.py +0 -270
  53. llama_stack_api/datasetio.py +0 -55
  54. llama_stack_api/datasets/__init__.py +0 -61
  55. llama_stack_api/datasets/api.py +0 -35
  56. llama_stack_api/datasets/fastapi_routes.py +0 -104
  57. llama_stack_api/datasets/models.py +0 -152
  58. llama_stack_api/datatypes.py +0 -373
  59. llama_stack_api/eval.py +0 -137
  60. llama_stack_api/file_processors/__init__.py +0 -27
  61. llama_stack_api/file_processors/api.py +0 -64
  62. llama_stack_api/file_processors/fastapi_routes.py +0 -78
  63. llama_stack_api/file_processors/models.py +0 -42
  64. llama_stack_api/files/__init__.py +0 -35
  65. llama_stack_api/files/api.py +0 -51
  66. llama_stack_api/files/fastapi_routes.py +0 -124
  67. llama_stack_api/files/models.py +0 -107
  68. llama_stack_api/inference.py +0 -1169
  69. llama_stack_api/inspect_api/__init__.py +0 -37
  70. llama_stack_api/inspect_api/api.py +0 -25
  71. llama_stack_api/inspect_api/fastapi_routes.py +0 -76
  72. llama_stack_api/inspect_api/models.py +0 -28
  73. llama_stack_api/internal/__init__.py +0 -9
  74. llama_stack_api/internal/kvstore.py +0 -28
  75. llama_stack_api/internal/sqlstore.py +0 -81
  76. llama_stack_api/llama_stack_api/__init__.py +0 -945
  77. llama_stack_api/llama_stack_api/admin/__init__.py +0 -45
  78. llama_stack_api/llama_stack_api/admin/api.py +0 -72
  79. llama_stack_api/llama_stack_api/admin/fastapi_routes.py +0 -117
  80. llama_stack_api/llama_stack_api/admin/models.py +0 -113
  81. llama_stack_api/llama_stack_api/agents.py +0 -173
  82. llama_stack_api/llama_stack_api/batches/__init__.py +0 -40
  83. llama_stack_api/llama_stack_api/batches/api.py +0 -53
  84. llama_stack_api/llama_stack_api/batches/fastapi_routes.py +0 -113
  85. llama_stack_api/llama_stack_api/batches/models.py +0 -78
  86. llama_stack_api/llama_stack_api/benchmarks/__init__.py +0 -43
  87. llama_stack_api/llama_stack_api/benchmarks/api.py +0 -39
  88. llama_stack_api/llama_stack_api/benchmarks/fastapi_routes.py +0 -109
  89. llama_stack_api/llama_stack_api/benchmarks/models.py +0 -109
  90. llama_stack_api/llama_stack_api/common/__init__.py +0 -5
  91. llama_stack_api/llama_stack_api/common/content_types.py +0 -101
  92. llama_stack_api/llama_stack_api/common/errors.py +0 -95
  93. llama_stack_api/llama_stack_api/common/job_types.py +0 -38
  94. llama_stack_api/llama_stack_api/common/responses.py +0 -77
  95. llama_stack_api/llama_stack_api/common/training_types.py +0 -47
  96. llama_stack_api/llama_stack_api/common/type_system.py +0 -146
  97. llama_stack_api/llama_stack_api/connectors.py +0 -146
  98. llama_stack_api/llama_stack_api/conversations.py +0 -270
  99. llama_stack_api/llama_stack_api/datasetio.py +0 -55
  100. llama_stack_api/llama_stack_api/datasets/__init__.py +0 -61
  101. llama_stack_api/llama_stack_api/datasets/api.py +0 -35
  102. llama_stack_api/llama_stack_api/datasets/fastapi_routes.py +0 -104
  103. llama_stack_api/llama_stack_api/datasets/models.py +0 -152
  104. llama_stack_api/llama_stack_api/datatypes.py +0 -373
  105. llama_stack_api/llama_stack_api/eval.py +0 -137
  106. llama_stack_api/llama_stack_api/file_processors/__init__.py +0 -27
  107. llama_stack_api/llama_stack_api/file_processors/api.py +0 -64
  108. llama_stack_api/llama_stack_api/file_processors/fastapi_routes.py +0 -78
  109. llama_stack_api/llama_stack_api/file_processors/models.py +0 -42
  110. llama_stack_api/llama_stack_api/files/__init__.py +0 -35
  111. llama_stack_api/llama_stack_api/files/api.py +0 -51
  112. llama_stack_api/llama_stack_api/files/fastapi_routes.py +0 -124
  113. llama_stack_api/llama_stack_api/files/models.py +0 -107
  114. llama_stack_api/llama_stack_api/inference.py +0 -1169
  115. llama_stack_api/llama_stack_api/inspect_api/__init__.py +0 -37
  116. llama_stack_api/llama_stack_api/inspect_api/api.py +0 -25
  117. llama_stack_api/llama_stack_api/inspect_api/fastapi_routes.py +0 -76
  118. llama_stack_api/llama_stack_api/inspect_api/models.py +0 -28
  119. llama_stack_api/llama_stack_api/internal/__init__.py +0 -9
  120. llama_stack_api/llama_stack_api/internal/kvstore.py +0 -28
  121. llama_stack_api/llama_stack_api/internal/sqlstore.py +0 -81
  122. llama_stack_api/llama_stack_api/models.py +0 -171
  123. llama_stack_api/llama_stack_api/openai_responses.py +0 -1468
  124. llama_stack_api/llama_stack_api/post_training.py +0 -370
  125. llama_stack_api/llama_stack_api/prompts.py +0 -203
  126. llama_stack_api/llama_stack_api/providers/__init__.py +0 -33
  127. llama_stack_api/llama_stack_api/providers/api.py +0 -16
  128. llama_stack_api/llama_stack_api/providers/fastapi_routes.py +0 -57
  129. llama_stack_api/llama_stack_api/providers/models.py +0 -24
  130. llama_stack_api/llama_stack_api/py.typed +0 -0
  131. llama_stack_api/llama_stack_api/rag_tool.py +0 -168
  132. llama_stack_api/llama_stack_api/resource.py +0 -37
  133. llama_stack_api/llama_stack_api/router_utils.py +0 -160
  134. llama_stack_api/llama_stack_api/safety.py +0 -132
  135. llama_stack_api/llama_stack_api/schema_utils.py +0 -208
  136. llama_stack_api/llama_stack_api/scoring.py +0 -93
  137. llama_stack_api/llama_stack_api/scoring_functions.py +0 -211
  138. llama_stack_api/llama_stack_api/shields.py +0 -93
  139. llama_stack_api/llama_stack_api/tools.py +0 -226
  140. llama_stack_api/llama_stack_api/vector_io.py +0 -941
  141. llama_stack_api/llama_stack_api/vector_stores.py +0 -53
  142. llama_stack_api/llama_stack_api/version.py +0 -9
  143. llama_stack_api/models.py +0 -171
  144. llama_stack_api/openai_responses.py +0 -1468
  145. llama_stack_api/post_training.py +0 -370
  146. llama_stack_api/prompts.py +0 -203
  147. llama_stack_api/providers/__init__.py +0 -33
  148. llama_stack_api/providers/api.py +0 -16
  149. llama_stack_api/providers/fastapi_routes.py +0 -57
  150. llama_stack_api/providers/models.py +0 -24
  151. llama_stack_api/py.typed +0 -0
  152. llama_stack_api/rag_tool.py +0 -168
  153. llama_stack_api/resource.py +0 -37
  154. llama_stack_api/router_utils.py +0 -160
  155. llama_stack_api/safety.py +0 -132
  156. llama_stack_api/schema_utils.py +0 -208
  157. llama_stack_api/scoring.py +0 -93
  158. llama_stack_api/scoring_functions.py +0 -211
  159. llama_stack_api/shields.py +0 -93
  160. llama_stack_api/tools.py +0 -226
  161. llama_stack_api/vector_io.py +0 -941
  162. llama_stack_api/vector_stores.py +0 -53
  163. llama_stack_api/version.py +0 -9
  164. {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/WHEEL +0 -0
  165. {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/entry_points.txt +0 -0
  166. {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/licenses/LICENSE +0 -0
@@ -1,941 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the terms described in the LICENSE file in
5
- # the root directory of this source tree.
6
- # Copyright (c) Meta Platforms, Inc. and affiliates.
7
- # All rights reserved.
8
- #
9
- # This source code is licensed under the terms described in the LICENSE file in
10
- # the root directory of this source tree.
11
- from typing import Annotated, Any, Literal, Protocol, runtime_checkable
12
-
13
- from fastapi import Body, Query
14
- from pydantic import BaseModel, Field, field_validator
15
-
16
- from llama_stack_api.inference import InterleavedContent
17
- from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod
18
- from llama_stack_api.vector_stores import VectorStore
19
- from llama_stack_api.version import LLAMA_STACK_API_V1
20
-
21
-
22
- @json_schema_type
23
- class ChunkMetadata(BaseModel):
24
- """
25
- `ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that
26
- will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`
27
- is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.
28
- Use `Chunk.metadata` for metadata that will be used in the context during inference.
29
- :param chunk_id: The ID of the chunk. If not set, it will be generated based on the document ID and content.
30
- :param document_id: The ID of the document this chunk belongs to.
31
- :param source: The source of the content, such as a URL, file path, or other identifier.
32
- :param created_timestamp: An optional timestamp indicating when the chunk was created.
33
- :param updated_timestamp: An optional timestamp indicating when the chunk was last updated.
34
- :param chunk_window: The window of the chunk, which can be used to group related chunks together.
35
- :param chunk_tokenizer: The tokenizer used to create the chunk. Default is Tiktoken.
36
- :param content_token_count: The number of tokens in the content of the chunk.
37
- :param metadata_token_count: The number of tokens in the metadata of the chunk.
38
- """
39
-
40
- chunk_id: str | None = None
41
- document_id: str | None = None
42
- source: str | None = None
43
- created_timestamp: int | None = None
44
- updated_timestamp: int | None = None
45
- chunk_window: str | None = None
46
- chunk_tokenizer: str | None = None
47
- content_token_count: int | None = None
48
- metadata_token_count: int | None = None
49
-
50
-
51
- @json_schema_type
52
- class Chunk(BaseModel):
53
- """
54
- A chunk of content from file processing.
55
- :param content: The content of the chunk, which can be interleaved text, images, or other types.
56
- :param chunk_id: Unique identifier for the chunk. Must be provided explicitly.
57
- :param metadata: Metadata associated with the chunk that will be used in the model context during inference.
58
- :param chunk_metadata: Metadata for the chunk that will NOT be used in the context during inference.
59
- The `chunk_metadata` is required backend functionality.
60
- """
61
-
62
- content: InterleavedContent
63
- chunk_id: str
64
- metadata: dict[str, Any] = Field(default_factory=dict)
65
- chunk_metadata: ChunkMetadata
66
-
67
- @property
68
- def document_id(self) -> str | None:
69
- """Returns the document_id from either metadata or chunk_metadata, with metadata taking precedence."""
70
- # Check metadata first (takes precedence)
71
- doc_id = self.metadata.get("document_id")
72
- if doc_id is not None:
73
- if not isinstance(doc_id, str):
74
- raise TypeError(f"metadata['document_id'] must be a string, got {type(doc_id).__name__}: {doc_id!r}")
75
- return doc_id
76
-
77
- # Fall back to chunk_metadata if available (Pydantic ensures type safety)
78
- if self.chunk_metadata is not None:
79
- return self.chunk_metadata.document_id
80
-
81
- return None
82
-
83
-
84
- @json_schema_type
85
- class EmbeddedChunk(Chunk):
86
- """
87
- A chunk of content with its embedding vector for vector database operations.
88
- Inherits all fields from Chunk and adds embedding-related fields.
89
- :param embedding: The embedding vector for the chunk content.
90
- :param embedding_model: The model used to generate the embedding (e.g., 'openai/text-embedding-3-small').
91
- :param embedding_dimension: The dimension of the embedding vector.
92
- """
93
-
94
- embedding: list[float]
95
- embedding_model: str
96
- embedding_dimension: int
97
-
98
-
99
- @json_schema_type
100
- class QueryChunksResponse(BaseModel):
101
- """Response from querying chunks in a vector database.
102
-
103
- :param chunks: List of embedded chunks returned from the query
104
- :param scores: Relevance scores corresponding to each returned chunk
105
- """
106
-
107
- chunks: list[EmbeddedChunk]
108
- scores: list[float]
109
-
110
-
111
- @json_schema_type
112
- class VectorStoreFileCounts(BaseModel):
113
- """File processing status counts for a vector store.
114
-
115
- :param completed: Number of files that have been successfully processed
116
- :param cancelled: Number of files that had their processing cancelled
117
- :param failed: Number of files that failed to process
118
- :param in_progress: Number of files currently being processed
119
- :param total: Total number of files in the vector store
120
- """
121
-
122
- completed: int
123
- cancelled: int
124
- failed: int
125
- in_progress: int
126
- total: int
127
-
128
-
129
- # TODO: rename this as OpenAIVectorStore
130
- @json_schema_type
131
- class VectorStoreObject(BaseModel):
132
- """OpenAI Vector Store object.
133
-
134
- :param id: Unique identifier for the vector store
135
- :param object: Object type identifier, always "vector_store"
136
- :param created_at: Timestamp when the vector store was created
137
- :param name: (Optional) Name of the vector store
138
- :param usage_bytes: Storage space used by the vector store in bytes
139
- :param file_counts: File processing status counts for the vector store
140
- :param status: Current status of the vector store
141
- :param expires_after: (Optional) Expiration policy for the vector store
142
- :param expires_at: (Optional) Timestamp when the vector store will expire
143
- :param last_active_at: (Optional) Timestamp of last activity on the vector store
144
- :param metadata: Set of key-value pairs that can be attached to the vector store
145
- """
146
-
147
- id: str
148
- object: str = "vector_store"
149
- created_at: int
150
- name: str | None = None
151
- usage_bytes: int = 0
152
- file_counts: VectorStoreFileCounts
153
- status: str = "completed"
154
- expires_after: dict[str, Any] | None = None
155
- expires_at: int | None = None
156
- last_active_at: int | None = None
157
- metadata: dict[str, Any] = Field(default_factory=dict)
158
-
159
-
160
- @json_schema_type
161
- class VectorStoreCreateRequest(BaseModel):
162
- """Request to create a vector store.
163
-
164
- :param name: (Optional) Name for the vector store
165
- :param file_ids: List of file IDs to include in the vector store
166
- :param expires_after: (Optional) Expiration policy for the vector store
167
- :param chunking_strategy: (Optional) Strategy for splitting files into chunks
168
- :param metadata: Set of key-value pairs that can be attached to the vector store
169
- """
170
-
171
- name: str | None = None
172
- file_ids: list[str] = Field(default_factory=list)
173
- expires_after: dict[str, Any] | None = None
174
- chunking_strategy: dict[str, Any] | None = None
175
- metadata: dict[str, Any] = Field(default_factory=dict)
176
-
177
-
178
- @json_schema_type
179
- class VectorStoreModifyRequest(BaseModel):
180
- """Request to modify a vector store.
181
-
182
- :param name: (Optional) Updated name for the vector store
183
- :param expires_after: (Optional) Updated expiration policy for the vector store
184
- :param metadata: (Optional) Updated set of key-value pairs for the vector store
185
- """
186
-
187
- name: str | None = None
188
- expires_after: dict[str, Any] | None = None
189
- metadata: dict[str, Any] | None = None
190
-
191
-
192
- @json_schema_type
193
- class VectorStoreListResponse(BaseModel):
194
- """Response from listing vector stores.
195
-
196
- :param object: Object type identifier, always "list"
197
- :param data: List of vector store objects
198
- :param first_id: (Optional) ID of the first vector store in the list for pagination
199
- :param last_id: (Optional) ID of the last vector store in the list for pagination
200
- :param has_more: Whether there are more vector stores available beyond this page
201
- """
202
-
203
- object: str = "list"
204
- data: list[VectorStoreObject]
205
- first_id: str | None = None
206
- last_id: str | None = None
207
- has_more: bool = False
208
-
209
-
210
- @json_schema_type
211
- class VectorStoreSearchRequest(BaseModel):
212
- """Request to search a vector store.
213
-
214
- :param query: Search query as a string or list of strings
215
- :param filters: (Optional) Filters based on file attributes to narrow search results
216
- :param max_num_results: Maximum number of results to return, defaults to 10
217
- :param ranking_options: (Optional) Options for ranking and filtering search results
218
- :param rewrite_query: Whether to rewrite the query for better vector search performance
219
- """
220
-
221
- query: str | list[str]
222
- filters: dict[str, Any] | None = None
223
- max_num_results: int = 10
224
- ranking_options: dict[str, Any] | None = None
225
- rewrite_query: bool = False
226
-
227
-
228
- @json_schema_type
229
- class VectorStoreContent(BaseModel):
230
- """Content item from a vector store file or search result.
231
-
232
- :param type: Content type, currently only "text" is supported
233
- :param text: The actual text content
234
- :param embedding: Optional embedding vector for this content chunk
235
- :param chunk_metadata: Optional chunk metadata
236
- :param metadata: Optional user-defined metadata
237
- """
238
-
239
- type: Literal["text"]
240
- text: str
241
- embedding: list[float] | None = None
242
- chunk_metadata: ChunkMetadata | None = None
243
- metadata: dict[str, Any] | None = None
244
-
245
-
246
- @json_schema_type
247
- class VectorStoreSearchResponse(BaseModel):
248
- """Response from searching a vector store.
249
-
250
- :param file_id: Unique identifier of the file containing the result
251
- :param filename: Name of the file containing the result
252
- :param score: Relevance score for this search result
253
- :param attributes: (Optional) Key-value attributes associated with the file
254
- :param content: List of content items matching the search query
255
- """
256
-
257
- file_id: str
258
- filename: str
259
- score: float
260
- attributes: dict[str, str | float | bool] | None = None
261
- content: list[VectorStoreContent]
262
-
263
-
264
- @json_schema_type
265
- class VectorStoreSearchResponsePage(BaseModel):
266
- """Paginated response from searching a vector store.
267
-
268
- :param object: Object type identifier for the search results page
269
- :param search_query: The original search query that was executed
270
- :param data: List of search result objects
271
- :param has_more: Whether there are more results available beyond this page
272
- :param next_page: (Optional) Token for retrieving the next page of results
273
- """
274
-
275
- object: str = "vector_store.search_results.page"
276
- search_query: list[str]
277
- data: list[VectorStoreSearchResponse]
278
- has_more: bool = False
279
- next_page: str | None = None
280
-
281
-
282
- @json_schema_type
283
- class VectorStoreDeleteResponse(BaseModel):
284
- """Response from deleting a vector store.
285
-
286
- :param id: Unique identifier of the deleted vector store
287
- :param object: Object type identifier for the deletion response
288
- :param deleted: Whether the deletion operation was successful
289
- """
290
-
291
- id: str
292
- object: str = "vector_store.deleted"
293
- deleted: bool = True
294
-
295
-
296
- @json_schema_type
297
- class VectorStoreFileContentResponse(BaseModel):
298
- """Represents the parsed content of a vector store file.
299
-
300
- :param object: The object type, which is always `vector_store.file_content.page`
301
- :param data: Parsed content of the file
302
- :param has_more: Indicates if there are more content pages to fetch
303
- :param next_page: The token for the next page, if any
304
- """
305
-
306
- object: Literal["vector_store.file_content.page"] = "vector_store.file_content.page"
307
- data: list[VectorStoreContent]
308
- has_more: bool = False
309
- next_page: str | None = None
310
-
311
-
312
- @json_schema_type
313
- class VectorStoreChunkingStrategyAuto(BaseModel):
314
- """Automatic chunking strategy for vector store files.
315
-
316
- :param type: Strategy type, always "auto" for automatic chunking
317
- """
318
-
319
- type: Literal["auto"] = "auto"
320
-
321
-
322
- @json_schema_type
323
- class VectorStoreChunkingStrategyStaticConfig(BaseModel):
324
- """Configuration for static chunking strategy.
325
-
326
- :param chunk_overlap_tokens: Number of tokens to overlap between adjacent chunks
327
- :param max_chunk_size_tokens: Maximum number of tokens per chunk, must be between 100 and 4096
328
- """
329
-
330
- chunk_overlap_tokens: int = 400
331
- max_chunk_size_tokens: int = Field(800, ge=100, le=4096)
332
-
333
-
334
- @json_schema_type
335
- class VectorStoreChunkingStrategyStatic(BaseModel):
336
- """Static chunking strategy with configurable parameters.
337
-
338
- :param type: Strategy type, always "static" for static chunking
339
- :param static: Configuration parameters for the static chunking strategy
340
- """
341
-
342
- type: Literal["static"] = "static"
343
- static: VectorStoreChunkingStrategyStaticConfig
344
-
345
-
346
- VectorStoreChunkingStrategy = Annotated[
347
- VectorStoreChunkingStrategyAuto | VectorStoreChunkingStrategyStatic,
348
- Field(discriminator="type"),
349
- ]
350
- register_schema(VectorStoreChunkingStrategy, name="VectorStoreChunkingStrategy")
351
-
352
-
353
- class SearchRankingOptions(BaseModel):
354
- """Options for ranking and filtering search results.
355
-
356
- :param ranker: (Optional) Name of the ranking algorithm to use
357
- :param score_threshold: (Optional) Minimum relevance score threshold for results
358
- """
359
-
360
- ranker: str | None = None
361
- # NOTE: OpenAI File Search Tool requires threshold to be between 0 and 1, however
362
- # we don't guarantee that the score is between 0 and 1, so will leave this unconstrained
363
- # and let the provider handle it
364
- score_threshold: float | None = Field(default=0.0)
365
-
366
-
367
- @json_schema_type
368
- class VectorStoreFileLastError(BaseModel):
369
- """Error information for failed vector store file processing.
370
-
371
- :param code: Error code indicating the type of failure
372
- :param message: Human-readable error message describing the failure
373
- """
374
-
375
- code: Literal["server_error"] | Literal["rate_limit_exceeded"]
376
- message: str
377
-
378
-
379
- VectorStoreFileStatus = Literal["completed"] | Literal["in_progress"] | Literal["cancelled"] | Literal["failed"]
380
- register_schema(VectorStoreFileStatus, name="VectorStoreFileStatus")
381
-
382
-
383
- # VectorStoreFileAttributes type with OpenAPI constraints
384
- VectorStoreFileAttributes = Annotated[
385
- dict[str, Annotated[str, Field(max_length=512)] | float | bool],
386
- Field(
387
- max_length=16,
388
- json_schema_extra={
389
- "propertyNames": {"type": "string", "maxLength": 64},
390
- "x-oaiTypeLabel": "map",
391
- },
392
- description=(
393
- "Set of 16 key-value pairs that can be attached to an object. This can be "
394
- "useful for storing additional information about the object in a structured "
395
- "format, and querying for objects via API or the dashboard. Keys are strings "
396
- "with a maximum length of 64 characters. Values are strings with a maximum "
397
- "length of 512 characters, booleans, or numbers."
398
- ),
399
- ),
400
- ]
401
-
402
-
403
- def _sanitize_vector_store_attributes(metadata: dict[str, Any] | None) -> dict[str, str | float | bool]:
404
- """
405
- Sanitize metadata to VectorStoreFileAttributes spec (max 16 properties, primitives only).
406
-
407
- Converts dict[str, Any] to dict[str, str | float | bool]:
408
- - Preserves: str (truncated to 512 chars), bool, int/float (as float)
409
- - Converts: list -> comma-separated string
410
- - Filters: dict, None, other types
411
- - Enforces: max 16 properties, max 64 char keys, max 512 char string values
412
- """
413
- if not metadata:
414
- return {}
415
-
416
- sanitized: dict[str, str | float | bool] = {}
417
- for key, value in metadata.items():
418
- # Enforce max 16 properties
419
- if len(sanitized) >= 16:
420
- break
421
-
422
- # Enforce max 64 char keys
423
- if len(key) > 64:
424
- continue
425
-
426
- # Convert to supported primitive types
427
- if isinstance(value, bool):
428
- sanitized[key] = value
429
- elif isinstance(value, int | float):
430
- sanitized[key] = float(value)
431
- elif isinstance(value, str):
432
- # Enforce max 512 char string values
433
- sanitized[key] = value[:512] if len(value) > 512 else value
434
- elif isinstance(value, list):
435
- # Convert lists to comma-separated strings (max 512 chars)
436
- list_str = ", ".join(str(item) for item in value)
437
- sanitized[key] = list_str[:512] if len(list_str) > 512 else list_str
438
-
439
- return sanitized
440
-
441
-
442
- @json_schema_type
443
- class VectorStoreFileObject(BaseModel):
444
- """OpenAI Vector Store File object.
445
-
446
- :param id: Unique identifier for the file
447
- :param object: Object type identifier, always "vector_store.file"
448
- :param attributes: Key-value attributes associated with the file
449
- :param chunking_strategy: Strategy used for splitting the file into chunks
450
- :param created_at: Timestamp when the file was added to the vector store
451
- :param last_error: (Optional) Error information if file processing failed
452
- :param status: Current processing status of the file
453
- :param usage_bytes: Storage space used by this file in bytes
454
- :param vector_store_id: ID of the vector store containing this file
455
- """
456
-
457
- id: str
458
- object: str = "vector_store.file"
459
- attributes: VectorStoreFileAttributes = Field(default_factory=dict)
460
- chunking_strategy: VectorStoreChunkingStrategy
461
- created_at: int
462
- last_error: VectorStoreFileLastError | None = None
463
- status: VectorStoreFileStatus
464
- usage_bytes: int = 0
465
- vector_store_id: str
466
-
467
- @field_validator("attributes", mode="before")
468
- @classmethod
469
- def _validate_attributes(cls, v: dict[str, Any] | None) -> dict[str, str | float | bool]:
470
- """Sanitize attributes to match VectorStoreFileAttributes OpenAPI spec."""
471
- return _sanitize_vector_store_attributes(v)
472
-
473
-
474
- @json_schema_type
475
- class VectorStoreListFilesResponse(BaseModel):
476
- """Response from listing files in a vector store.
477
-
478
- :param object: Object type identifier, always "list"
479
- :param data: List of vector store file objects
480
- :param first_id: (Optional) ID of the first file in the list for pagination
481
- :param last_id: (Optional) ID of the last file in the list for pagination
482
- :param has_more: Whether there are more files available beyond this page
483
- """
484
-
485
- object: str = "list"
486
- data: list[VectorStoreFileObject]
487
- first_id: str | None = None
488
- last_id: str | None = None
489
- has_more: bool = False
490
-
491
-
492
- @json_schema_type
493
- class VectorStoreFileDeleteResponse(BaseModel):
494
- """Response from deleting a vector store file.
495
-
496
- :param id: Unique identifier of the deleted file
497
- :param object: Object type identifier for the deletion response
498
- :param deleted: Whether the deletion operation was successful
499
- """
500
-
501
- id: str
502
- object: str = "vector_store.file.deleted"
503
- deleted: bool = True
504
-
505
-
506
- @json_schema_type
507
- class VectorStoreFileBatchObject(BaseModel):
508
- """OpenAI Vector Store File Batch object.
509
-
510
- :param id: Unique identifier for the file batch
511
- :param object: Object type identifier, always "vector_store.file_batch"
512
- :param created_at: Timestamp when the file batch was created
513
- :param vector_store_id: ID of the vector store containing the file batch
514
- :param status: Current processing status of the file batch
515
- :param file_counts: File processing status counts for the batch
516
- """
517
-
518
- id: str
519
- object: str = "vector_store.file_batch"
520
- created_at: int
521
- vector_store_id: str
522
- status: VectorStoreFileStatus
523
- file_counts: VectorStoreFileCounts
524
-
525
-
526
- @json_schema_type
527
- class VectorStoreFilesListInBatchResponse(BaseModel):
528
- """Response from listing files in a vector store file batch.
529
-
530
- :param object: Object type identifier, always "list"
531
- :param data: List of vector store file objects in the batch
532
- :param first_id: (Optional) ID of the first file in the list for pagination
533
- :param last_id: (Optional) ID of the last file in the list for pagination
534
- :param has_more: Whether there are more files available beyond this page
535
- """
536
-
537
- object: str = "list"
538
- data: list[VectorStoreFileObject]
539
- first_id: str | None = None
540
- last_id: str | None = None
541
- has_more: bool = False
542
-
543
-
544
- # extra_body can be accessed via .model_extra
545
- @json_schema_type
546
- class OpenAICreateVectorStoreRequestWithExtraBody(BaseModel, extra="allow"):
547
- """Request to create a vector store with extra_body support.
548
-
549
- :param name: (Optional) A name for the vector store
550
- :param file_ids: List of file IDs to include in the vector store
551
- :param expires_after: (Optional) Expiration policy for the vector store
552
- :param chunking_strategy: (Optional) Strategy for splitting files into chunks
553
- :param metadata: Set of key-value pairs that can be attached to the vector store
554
- """
555
-
556
- name: str | None = None
557
- file_ids: list[str] | None = None
558
- expires_after: dict[str, Any] | None = None
559
- chunking_strategy: VectorStoreChunkingStrategy | None = None
560
- metadata: dict[str, Any] | None = None
561
-
562
-
563
- # extra_body can be accessed via .model_extra
564
- @json_schema_type
565
- class OpenAICreateVectorStoreFileBatchRequestWithExtraBody(BaseModel, extra="allow"):
566
- """Request to create a vector store file batch with extra_body support.
567
-
568
- :param file_ids: A list of File IDs that the vector store should use
569
- :param attributes: (Optional) Key-value attributes to store with the files
570
- :param chunking_strategy: (Optional) The chunking strategy used to chunk the file(s). Defaults to auto
571
- """
572
-
573
- file_ids: list[str]
574
- attributes: dict[str, Any] | None = None
575
- chunking_strategy: VectorStoreChunkingStrategy | None = None
576
-
577
-
578
- class VectorStoreTable(Protocol):
579
- def get_vector_store(self, vector_store_id: str) -> VectorStore | None: ...
580
-
581
-
582
- @runtime_checkable
583
- class VectorIO(Protocol):
584
- vector_store_table: VectorStoreTable | None = None
585
-
586
- # this will just block now until chunks are inserted, but it should
587
- # probably return a Job instance which can be polled for completion
588
- # TODO: rename vector_store_id to vector_store_id once Stainless is working
589
- @webmethod(route="/vector-io/insert", method="POST", level=LLAMA_STACK_API_V1)
590
- async def insert_chunks(
591
- self,
592
- vector_store_id: str,
593
- chunks: list[EmbeddedChunk],
594
- ttl_seconds: int | None = None,
595
- ) -> None:
596
- """Insert embedded chunks into a vector database.
597
-
598
- :param vector_store_id: The identifier of the vector database to insert the chunks into.
599
- :param chunks: The embedded chunks to insert. Each `EmbeddedChunk` contains the content, metadata,
600
- and embedding vector ready for storage.
601
- :param ttl_seconds: The time to live of the chunks.
602
- """
603
- ...
604
-
605
- # TODO: rename vector_store_id to vector_store_id once Stainless is working
606
- @webmethod(route="/vector-io/query", method="POST", level=LLAMA_STACK_API_V1)
607
- async def query_chunks(
608
- self,
609
- vector_store_id: str,
610
- query: InterleavedContent,
611
- params: dict[str, Any] | None = None,
612
- ) -> QueryChunksResponse:
613
- """Query chunks from a vector database.
614
-
615
- :param vector_store_id: The identifier of the vector database to query.
616
- :param query: The query to search for.
617
- :param params: The parameters of the query.
618
- :returns: A QueryChunksResponse.
619
- """
620
- ...
621
-
622
- # OpenAI Vector Stores API endpoints
623
- @webmethod(route="/vector_stores", method="POST", level=LLAMA_STACK_API_V1)
624
- async def openai_create_vector_store(
625
- self,
626
- params: Annotated[OpenAICreateVectorStoreRequestWithExtraBody, Body(...)],
627
- ) -> VectorStoreObject:
628
- """Creates a vector store.
629
-
630
- Generate an OpenAI-compatible vector store with the given parameters.
631
- :returns: A VectorStoreObject representing the created vector store.
632
- """
633
- ...
634
-
635
- @webmethod(route="/vector_stores", method="GET", level=LLAMA_STACK_API_V1)
636
- async def openai_list_vector_stores(
637
- self,
638
- limit: int | None = 20,
639
- order: str | None = "desc",
640
- after: str | None = None,
641
- before: str | None = None,
642
- ) -> VectorStoreListResponse:
643
- """Returns a list of vector stores.
644
-
645
- :param limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
646
- :param order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.
647
- :param after: A cursor for use in pagination. `after` is an object ID that defines your place in the list.
648
- :param before: A cursor for use in pagination. `before` is an object ID that defines your place in the list.
649
- :returns: A VectorStoreListResponse containing the list of vector stores.
650
- """
651
- ...
652
-
653
- @webmethod(route="/vector_stores/{vector_store_id}", method="GET", level=LLAMA_STACK_API_V1)
654
- async def openai_retrieve_vector_store(
655
- self,
656
- vector_store_id: str,
657
- ) -> VectorStoreObject:
658
- """Retrieves a vector store.
659
-
660
- :param vector_store_id: The ID of the vector store to retrieve.
661
- :returns: A VectorStoreObject representing the vector store.
662
- """
663
- ...
664
-
665
- @webmethod(
666
- route="/vector_stores/{vector_store_id}",
667
- method="POST",
668
- level=LLAMA_STACK_API_V1,
669
- )
670
- async def openai_update_vector_store(
671
- self,
672
- vector_store_id: str,
673
- name: str | None = None,
674
- expires_after: dict[str, Any] | None = None,
675
- metadata: dict[str, Any] | None = None,
676
- ) -> VectorStoreObject:
677
- """Updates a vector store.
678
-
679
- :param vector_store_id: The ID of the vector store to update.
680
- :param name: The name of the vector store.
681
- :param expires_after: The expiration policy for a vector store.
682
- :param metadata: Set of 16 key-value pairs that can be attached to an object.
683
- :returns: A VectorStoreObject representing the updated vector store.
684
- """
685
- ...
686
-
687
- @webmethod(
688
- route="/vector_stores/{vector_store_id}",
689
- method="DELETE",
690
- level=LLAMA_STACK_API_V1,
691
- )
692
- async def openai_delete_vector_store(
693
- self,
694
- vector_store_id: str,
695
- ) -> VectorStoreDeleteResponse:
696
- """Delete a vector store.
697
-
698
- :param vector_store_id: The ID of the vector store to delete.
699
- :returns: A VectorStoreDeleteResponse indicating the deletion status.
700
- """
701
- ...
702
-
703
- @webmethod(
704
- route="/vector_stores/{vector_store_id}/search",
705
- method="POST",
706
- level=LLAMA_STACK_API_V1,
707
- )
708
- async def openai_search_vector_store(
709
- self,
710
- vector_store_id: str,
711
- query: str | list[str],
712
- filters: dict[str, Any] | None = None,
713
- max_num_results: int | None = 10,
714
- ranking_options: SearchRankingOptions | None = None,
715
- rewrite_query: bool | None = False,
716
- search_mode: (
717
- str | None
718
- ) = "vector", # Using str instead of Literal due to OpenAPI schema generator limitations
719
- ) -> VectorStoreSearchResponsePage:
720
- """Search for chunks in a vector store.
721
-
722
- Searches a vector store for relevant chunks based on a query and optional file attribute filters.
723
-
724
- :param vector_store_id: The ID of the vector store to search.
725
- :param query: The query string or array for performing the search.
726
- :param filters: Filters based on file attributes to narrow the search results.
727
- :param max_num_results: Maximum number of results to return (1 to 50 inclusive, default 10).
728
- :param ranking_options: Ranking options for fine-tuning the search results.
729
- :param rewrite_query: Whether to rewrite the natural language query for vector search (default false)
730
- :param search_mode: The search mode to use - "keyword", "vector", or "hybrid" (default "vector")
731
- :returns: A VectorStoreSearchResponse containing the search results.
732
- """
733
- ...
734
-
735
- @webmethod(
736
- route="/vector_stores/{vector_store_id}/files",
737
- method="POST",
738
- level=LLAMA_STACK_API_V1,
739
- )
740
- async def openai_attach_file_to_vector_store(
741
- self,
742
- vector_store_id: str,
743
- file_id: str,
744
- attributes: dict[str, Any] | None = None,
745
- chunking_strategy: VectorStoreChunkingStrategy | None = None,
746
- ) -> VectorStoreFileObject:
747
- """Attach a file to a vector store.
748
-
749
- :param vector_store_id: The ID of the vector store to attach the file to.
750
- :param file_id: The ID of the file to attach to the vector store.
751
- :param attributes: The key-value attributes stored with the file, which can be used for filtering.
752
- :param chunking_strategy: The chunking strategy to use for the file.
753
- :returns: A VectorStoreFileObject representing the attached file.
754
- """
755
- ...
756
-
757
- @webmethod(
758
- route="/vector_stores/{vector_store_id}/files",
759
- method="GET",
760
- level=LLAMA_STACK_API_V1,
761
- )
762
- async def openai_list_files_in_vector_store(
763
- self,
764
- vector_store_id: str,
765
- limit: int | None = 20,
766
- order: str | None = "desc",
767
- after: str | None = None,
768
- before: str | None = None,
769
- filter: VectorStoreFileStatus | None = None,
770
- ) -> VectorStoreListFilesResponse:
771
- """List files in a vector store.
772
-
773
- :param vector_store_id: The ID of the vector store to list files from.
774
- :param limit: (Optional) A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
775
- :param order: (Optional) Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.
776
- :param after: (Optional) A cursor for use in pagination. `after` is an object ID that defines your place in the list.
777
- :param before: (Optional) A cursor for use in pagination. `before` is an object ID that defines your place in the list.
778
- :param filter: (Optional) Filter by file status to only return files with the specified status.
779
- :returns: A VectorStoreListFilesResponse containing the list of files.
780
- """
781
- ...
782
-
783
- @webmethod(
784
- route="/vector_stores/{vector_store_id}/files/{file_id}",
785
- method="GET",
786
- level=LLAMA_STACK_API_V1,
787
- )
788
- async def openai_retrieve_vector_store_file(
789
- self,
790
- vector_store_id: str,
791
- file_id: str,
792
- ) -> VectorStoreFileObject:
793
- """Retrieves a vector store file.
794
-
795
- :param vector_store_id: The ID of the vector store containing the file to retrieve.
796
- :param file_id: The ID of the file to retrieve.
797
- :returns: A VectorStoreFileObject representing the file.
798
- """
799
- ...
800
-
801
- @webmethod(
802
- route="/vector_stores/{vector_store_id}/files/{file_id}/content",
803
- method="GET",
804
- level=LLAMA_STACK_API_V1,
805
- )
806
- async def openai_retrieve_vector_store_file_contents(
807
- self,
808
- vector_store_id: str,
809
- file_id: str,
810
- include_embeddings: Annotated[bool | None, Query()] = False,
811
- include_metadata: Annotated[bool | None, Query()] = False,
812
- ) -> VectorStoreFileContentResponse:
813
- """Retrieves the contents of a vector store file.
814
-
815
- :param vector_store_id: The ID of the vector store containing the file to retrieve.
816
- :param file_id: The ID of the file to retrieve.
817
- :param include_embeddings: Whether to include embedding vectors in the response.
818
- :param include_metadata: Whether to include chunk metadata in the response.
819
- :returns: File contents, optionally with embeddings and metadata based on query parameters.
820
- """
821
- ...
822
-
823
- @webmethod(
824
- route="/vector_stores/{vector_store_id}/files/{file_id}",
825
- method="POST",
826
- level=LLAMA_STACK_API_V1,
827
- )
828
- async def openai_update_vector_store_file(
829
- self,
830
- vector_store_id: str,
831
- file_id: str,
832
- attributes: dict[str, Any],
833
- ) -> VectorStoreFileObject:
834
- """Updates a vector store file.
835
-
836
- :param vector_store_id: The ID of the vector store containing the file to update.
837
- :param file_id: The ID of the file to update.
838
- :param attributes: The updated key-value attributes to store with the file.
839
- :returns: A VectorStoreFileObject representing the updated file.
840
- """
841
- ...
842
-
843
- @webmethod(
844
- route="/vector_stores/{vector_store_id}/files/{file_id}",
845
- method="DELETE",
846
- level=LLAMA_STACK_API_V1,
847
- )
848
- async def openai_delete_vector_store_file(
849
- self,
850
- vector_store_id: str,
851
- file_id: str,
852
- ) -> VectorStoreFileDeleteResponse:
853
- """Delete a vector store file.
854
-
855
- :param vector_store_id: The ID of the vector store containing the file to delete.
856
- :param file_id: The ID of the file to delete.
857
- :returns: A VectorStoreFileDeleteResponse indicating the deletion status.
858
- """
859
- ...
860
-
861
- @webmethod(
862
- route="/vector_stores/{vector_store_id}/file_batches",
863
- method="POST",
864
- level=LLAMA_STACK_API_V1,
865
- )
866
- async def openai_create_vector_store_file_batch(
867
- self,
868
- vector_store_id: str,
869
- params: Annotated[OpenAICreateVectorStoreFileBatchRequestWithExtraBody, Body(...)],
870
- ) -> VectorStoreFileBatchObject:
871
- """Create a vector store file batch.
872
-
873
- Generate an OpenAI-compatible vector store file batch for the given vector store.
874
- :param vector_store_id: The ID of the vector store to create the file batch for.
875
- :returns: A VectorStoreFileBatchObject representing the created file batch.
876
- """
877
- ...
878
-
879
- @webmethod(
880
- route="/vector_stores/{vector_store_id}/file_batches/{batch_id}",
881
- method="GET",
882
- level=LLAMA_STACK_API_V1,
883
- )
884
- async def openai_retrieve_vector_store_file_batch(
885
- self,
886
- batch_id: str,
887
- vector_store_id: str,
888
- ) -> VectorStoreFileBatchObject:
889
- """Retrieve a vector store file batch.
890
-
891
- :param batch_id: The ID of the file batch to retrieve.
892
- :param vector_store_id: The ID of the vector store containing the file batch.
893
- :returns: A VectorStoreFileBatchObject representing the file batch.
894
- """
895
- ...
896
-
897
- @webmethod(
898
- route="/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
899
- method="GET",
900
- level=LLAMA_STACK_API_V1,
901
- )
902
- async def openai_list_files_in_vector_store_file_batch(
903
- self,
904
- batch_id: str,
905
- vector_store_id: str,
906
- after: str | None = None,
907
- before: str | None = None,
908
- filter: str | None = None,
909
- limit: int | None = 20,
910
- order: str | None = "desc",
911
- ) -> VectorStoreFilesListInBatchResponse:
912
- """Returns a list of vector store files in a batch.
913
-
914
- :param batch_id: The ID of the file batch to list files from.
915
- :param vector_store_id: The ID of the vector store containing the file batch.
916
- :param after: A cursor for use in pagination. `after` is an object ID that defines your place in the list.
917
- :param before: A cursor for use in pagination. `before` is an object ID that defines your place in the list.
918
- :param filter: Filter by file status. One of in_progress, completed, failed, cancelled.
919
- :param limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
920
- :param order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.
921
- :returns: A VectorStoreFilesListInBatchResponse containing the list of files in the batch.
922
- """
923
- ...
924
-
925
- @webmethod(
926
- route="/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
927
- method="POST",
928
- level=LLAMA_STACK_API_V1,
929
- )
930
- async def openai_cancel_vector_store_file_batch(
931
- self,
932
- batch_id: str,
933
- vector_store_id: str,
934
- ) -> VectorStoreFileBatchObject:
935
- """Cancels a vector store file batch.
936
-
937
- :param batch_id: The ID of the file batch to cancel.
938
- :param vector_store_id: The ID of the vector store containing the file batch.
939
- :returns: A VectorStoreFileBatchObject representing the cancelled file batch.
940
- """
941
- ...