llama-stack 0.0.42__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (738) hide show
  1. llama_stack/__init__.py +5 -0
  2. llama_stack/apis/agents/__init__.py +1 -1
  3. llama_stack/apis/agents/agents.py +700 -281
  4. llama_stack/apis/agents/openai_responses.py +1311 -0
  5. llama_stack/{providers/adapters/memory/sample/config.py → apis/batches/__init__.py} +2 -5
  6. llama_stack/apis/batches/batches.py +100 -0
  7. llama_stack/apis/benchmarks/__init__.py +7 -0
  8. llama_stack/apis/benchmarks/benchmarks.py +108 -0
  9. llama_stack/apis/common/content_types.py +143 -0
  10. llama_stack/apis/common/errors.py +103 -0
  11. llama_stack/apis/common/job_types.py +38 -0
  12. llama_stack/apis/common/responses.py +36 -0
  13. llama_stack/apis/common/training_types.py +36 -5
  14. llama_stack/apis/common/type_system.py +158 -0
  15. llama_stack/apis/conversations/__init__.py +31 -0
  16. llama_stack/apis/conversations/conversations.py +286 -0
  17. llama_stack/apis/datasetio/__init__.py +7 -0
  18. llama_stack/apis/datasetio/datasetio.py +59 -0
  19. llama_stack/apis/datasets/__init__.py +7 -0
  20. llama_stack/apis/datasets/datasets.py +251 -0
  21. llama_stack/apis/datatypes.py +160 -0
  22. llama_stack/apis/eval/__init__.py +7 -0
  23. llama_stack/apis/eval/eval.py +169 -0
  24. llama_stack/apis/files/__init__.py +7 -0
  25. llama_stack/apis/files/files.py +199 -0
  26. llama_stack/apis/inference/__init__.py +1 -1
  27. llama_stack/apis/inference/inference.py +1169 -113
  28. llama_stack/apis/inspect/__init__.py +1 -1
  29. llama_stack/apis/inspect/inspect.py +69 -16
  30. llama_stack/apis/models/__init__.py +1 -1
  31. llama_stack/apis/models/models.py +148 -21
  32. llama_stack/apis/post_training/__init__.py +1 -1
  33. llama_stack/apis/post_training/post_training.py +265 -120
  34. llama_stack/{providers/adapters/agents/sample/config.py → apis/prompts/__init__.py} +2 -5
  35. llama_stack/apis/prompts/prompts.py +204 -0
  36. llama_stack/apis/providers/__init__.py +7 -0
  37. llama_stack/apis/providers/providers.py +69 -0
  38. llama_stack/apis/resource.py +37 -0
  39. llama_stack/apis/safety/__init__.py +1 -1
  40. llama_stack/apis/safety/safety.py +95 -12
  41. llama_stack/apis/scoring/__init__.py +7 -0
  42. llama_stack/apis/scoring/scoring.py +93 -0
  43. llama_stack/apis/scoring_functions/__init__.py +7 -0
  44. llama_stack/apis/scoring_functions/scoring_functions.py +208 -0
  45. llama_stack/apis/shields/__init__.py +1 -1
  46. llama_stack/apis/shields/shields.py +76 -33
  47. llama_stack/apis/synthetic_data_generation/__init__.py +1 -1
  48. llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +40 -17
  49. llama_stack/apis/telemetry/__init__.py +1 -1
  50. llama_stack/apis/telemetry/telemetry.py +322 -31
  51. llama_stack/apis/{dataset → tools}/__init__.py +2 -1
  52. llama_stack/apis/tools/rag_tool.py +218 -0
  53. llama_stack/apis/tools/tools.py +221 -0
  54. llama_stack/apis/vector_io/__init__.py +7 -0
  55. llama_stack/apis/vector_io/vector_io.py +960 -0
  56. llama_stack/apis/vector_stores/__init__.py +7 -0
  57. llama_stack/apis/vector_stores/vector_stores.py +51 -0
  58. llama_stack/apis/version.py +9 -0
  59. llama_stack/cli/llama.py +13 -5
  60. llama_stack/cli/stack/_list_deps.py +182 -0
  61. llama_stack/cli/stack/list_apis.py +1 -1
  62. llama_stack/cli/stack/list_deps.py +55 -0
  63. llama_stack/cli/stack/list_providers.py +24 -10
  64. llama_stack/cli/stack/list_stacks.py +56 -0
  65. llama_stack/cli/stack/remove.py +115 -0
  66. llama_stack/cli/stack/run.py +169 -56
  67. llama_stack/cli/stack/stack.py +18 -4
  68. llama_stack/cli/stack/utils.py +151 -0
  69. llama_stack/cli/table.py +23 -61
  70. llama_stack/cli/utils.py +29 -0
  71. llama_stack/core/access_control/access_control.py +131 -0
  72. llama_stack/core/access_control/conditions.py +129 -0
  73. llama_stack/core/access_control/datatypes.py +107 -0
  74. llama_stack/core/build.py +164 -0
  75. llama_stack/core/client.py +205 -0
  76. llama_stack/core/common.sh +37 -0
  77. llama_stack/{distribution → core}/configure.py +74 -55
  78. llama_stack/core/conversations/conversations.py +309 -0
  79. llama_stack/core/datatypes.py +625 -0
  80. llama_stack/core/distribution.py +276 -0
  81. llama_stack/core/external.py +54 -0
  82. llama_stack/core/id_generation.py +42 -0
  83. llama_stack/core/inspect.py +86 -0
  84. llama_stack/core/library_client.py +539 -0
  85. llama_stack/core/prompts/prompts.py +234 -0
  86. llama_stack/core/providers.py +137 -0
  87. llama_stack/core/request_headers.py +115 -0
  88. llama_stack/core/resolver.py +506 -0
  89. llama_stack/core/routers/__init__.py +101 -0
  90. llama_stack/core/routers/datasets.py +73 -0
  91. llama_stack/core/routers/eval_scoring.py +155 -0
  92. llama_stack/core/routers/inference.py +645 -0
  93. llama_stack/core/routers/safety.py +85 -0
  94. llama_stack/core/routers/tool_runtime.py +91 -0
  95. llama_stack/core/routers/vector_io.py +442 -0
  96. llama_stack/core/routing_tables/benchmarks.py +62 -0
  97. llama_stack/core/routing_tables/common.py +254 -0
  98. llama_stack/core/routing_tables/datasets.py +91 -0
  99. llama_stack/core/routing_tables/models.py +163 -0
  100. llama_stack/core/routing_tables/scoring_functions.py +66 -0
  101. llama_stack/core/routing_tables/shields.py +61 -0
  102. llama_stack/core/routing_tables/toolgroups.py +129 -0
  103. llama_stack/core/routing_tables/vector_stores.py +292 -0
  104. llama_stack/core/server/auth.py +187 -0
  105. llama_stack/core/server/auth_providers.py +494 -0
  106. llama_stack/core/server/quota.py +110 -0
  107. llama_stack/core/server/routes.py +141 -0
  108. llama_stack/core/server/server.py +542 -0
  109. llama_stack/core/server/tracing.py +80 -0
  110. llama_stack/core/stack.py +546 -0
  111. llama_stack/core/start_stack.sh +117 -0
  112. llama_stack/core/storage/datatypes.py +283 -0
  113. llama_stack/{cli/model → core/store}/__init__.py +1 -1
  114. llama_stack/core/store/registry.py +199 -0
  115. llama_stack/core/testing_context.py +49 -0
  116. llama_stack/core/ui/app.py +55 -0
  117. llama_stack/core/ui/modules/api.py +32 -0
  118. llama_stack/core/ui/modules/utils.py +42 -0
  119. llama_stack/core/ui/page/distribution/datasets.py +18 -0
  120. llama_stack/core/ui/page/distribution/eval_tasks.py +20 -0
  121. llama_stack/core/ui/page/distribution/models.py +18 -0
  122. llama_stack/core/ui/page/distribution/providers.py +27 -0
  123. llama_stack/core/ui/page/distribution/resources.py +48 -0
  124. llama_stack/core/ui/page/distribution/scoring_functions.py +18 -0
  125. llama_stack/core/ui/page/distribution/shields.py +19 -0
  126. llama_stack/core/ui/page/evaluations/app_eval.py +143 -0
  127. llama_stack/core/ui/page/evaluations/native_eval.py +253 -0
  128. llama_stack/core/ui/page/playground/chat.py +130 -0
  129. llama_stack/core/ui/page/playground/tools.py +352 -0
  130. llama_stack/core/utils/config.py +30 -0
  131. llama_stack/{distribution → core}/utils/config_dirs.py +3 -6
  132. llama_stack/core/utils/config_resolution.py +125 -0
  133. llama_stack/core/utils/context.py +84 -0
  134. llama_stack/core/utils/exec.py +96 -0
  135. llama_stack/{providers/impls/meta_reference/codeshield/config.py → core/utils/image_types.py} +4 -3
  136. llama_stack/{distribution → core}/utils/model_utils.py +2 -2
  137. llama_stack/{distribution → core}/utils/prompt_for_config.py +30 -63
  138. llama_stack/{apis/batch_inference → distributions/dell}/__init__.py +1 -1
  139. llama_stack/distributions/dell/build.yaml +33 -0
  140. llama_stack/distributions/dell/dell.py +158 -0
  141. llama_stack/distributions/dell/run-with-safety.yaml +141 -0
  142. llama_stack/distributions/dell/run.yaml +132 -0
  143. llama_stack/distributions/meta-reference-gpu/__init__.py +7 -0
  144. llama_stack/distributions/meta-reference-gpu/build.yaml +32 -0
  145. llama_stack/distributions/meta-reference-gpu/meta_reference.py +163 -0
  146. llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +154 -0
  147. llama_stack/distributions/meta-reference-gpu/run.yaml +139 -0
  148. llama_stack/{apis/evals → distributions/nvidia}/__init__.py +1 -1
  149. llama_stack/distributions/nvidia/build.yaml +29 -0
  150. llama_stack/distributions/nvidia/nvidia.py +154 -0
  151. llama_stack/distributions/nvidia/run-with-safety.yaml +137 -0
  152. llama_stack/distributions/nvidia/run.yaml +116 -0
  153. llama_stack/distributions/open-benchmark/__init__.py +7 -0
  154. llama_stack/distributions/open-benchmark/build.yaml +36 -0
  155. llama_stack/distributions/open-benchmark/open_benchmark.py +303 -0
  156. llama_stack/distributions/open-benchmark/run.yaml +252 -0
  157. llama_stack/distributions/postgres-demo/__init__.py +7 -0
  158. llama_stack/distributions/postgres-demo/build.yaml +23 -0
  159. llama_stack/distributions/postgres-demo/postgres_demo.py +125 -0
  160. llama_stack/distributions/postgres-demo/run.yaml +115 -0
  161. llama_stack/{apis/memory → distributions/starter}/__init__.py +1 -1
  162. llama_stack/distributions/starter/build.yaml +61 -0
  163. llama_stack/distributions/starter/run-with-postgres-store.yaml +285 -0
  164. llama_stack/distributions/starter/run.yaml +276 -0
  165. llama_stack/distributions/starter/starter.py +345 -0
  166. llama_stack/distributions/starter-gpu/__init__.py +7 -0
  167. llama_stack/distributions/starter-gpu/build.yaml +61 -0
  168. llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +288 -0
  169. llama_stack/distributions/starter-gpu/run.yaml +279 -0
  170. llama_stack/distributions/starter-gpu/starter_gpu.py +20 -0
  171. llama_stack/distributions/template.py +456 -0
  172. llama_stack/distributions/watsonx/__init__.py +7 -0
  173. llama_stack/distributions/watsonx/build.yaml +33 -0
  174. llama_stack/distributions/watsonx/run.yaml +133 -0
  175. llama_stack/distributions/watsonx/watsonx.py +95 -0
  176. llama_stack/env.py +24 -0
  177. llama_stack/log.py +314 -0
  178. llama_stack/models/llama/checkpoint.py +164 -0
  179. llama_stack/models/llama/datatypes.py +164 -0
  180. llama_stack/models/llama/hadamard_utils.py +86 -0
  181. llama_stack/models/llama/llama3/args.py +74 -0
  182. llama_stack/models/llama/llama3/chat_format.py +286 -0
  183. llama_stack/models/llama/llama3/generation.py +376 -0
  184. llama_stack/models/llama/llama3/interface.py +255 -0
  185. llama_stack/models/llama/llama3/model.py +304 -0
  186. llama_stack/models/llama/llama3/multimodal/__init__.py +12 -0
  187. llama_stack/models/llama/llama3/multimodal/encoder_utils.py +180 -0
  188. llama_stack/models/llama/llama3/multimodal/image_transform.py +409 -0
  189. llama_stack/models/llama/llama3/multimodal/model.py +1430 -0
  190. llama_stack/models/llama/llama3/multimodal/utils.py +26 -0
  191. llama_stack/models/llama/llama3/prompt_templates/__init__.py +22 -0
  192. llama_stack/models/llama/llama3/prompt_templates/base.py +39 -0
  193. llama_stack/models/llama/llama3/prompt_templates/system_prompts.py +319 -0
  194. llama_stack/models/llama/llama3/prompt_templates/tool_response.py +62 -0
  195. llama_stack/models/llama/llama3/quantization/loader.py +316 -0
  196. llama_stack/models/llama/llama3/template_data.py +116 -0
  197. llama_stack/models/llama/llama3/tokenizer.model +128000 -0
  198. llama_stack/models/llama/llama3/tokenizer.py +198 -0
  199. llama_stack/models/llama/llama3/tool_utils.py +266 -0
  200. llama_stack/models/llama/llama3_1/__init__.py +12 -0
  201. llama_stack/models/llama/llama3_1/prompt_format.md +358 -0
  202. llama_stack/models/llama/llama3_1/prompts.py +258 -0
  203. llama_stack/models/llama/llama3_2/prompts_text.py +229 -0
  204. llama_stack/models/llama/llama3_2/prompts_vision.py +126 -0
  205. llama_stack/models/llama/llama3_2/text_prompt_format.md +286 -0
  206. llama_stack/models/llama/llama3_2/vision_prompt_format.md +141 -0
  207. llama_stack/models/llama/llama3_3/prompts.py +259 -0
  208. llama_stack/models/llama/llama4/args.py +107 -0
  209. llama_stack/models/llama/llama4/chat_format.py +317 -0
  210. llama_stack/models/llama/llama4/datatypes.py +56 -0
  211. llama_stack/models/llama/llama4/ffn.py +58 -0
  212. llama_stack/models/llama/llama4/generation.py +313 -0
  213. llama_stack/models/llama/llama4/model.py +437 -0
  214. llama_stack/models/llama/llama4/moe.py +214 -0
  215. llama_stack/models/llama/llama4/preprocess.py +435 -0
  216. llama_stack/models/llama/llama4/prompt_format.md +304 -0
  217. llama_stack/models/llama/llama4/prompt_templates/system_prompts.py +136 -0
  218. llama_stack/models/llama/llama4/prompts.py +279 -0
  219. llama_stack/models/llama/llama4/quantization/__init__.py +5 -0
  220. llama_stack/models/llama/llama4/quantization/loader.py +226 -0
  221. llama_stack/models/llama/llama4/tokenizer.model +200000 -0
  222. llama_stack/models/llama/llama4/tokenizer.py +263 -0
  223. llama_stack/models/llama/llama4/vision/__init__.py +5 -0
  224. llama_stack/models/llama/llama4/vision/embedding.py +210 -0
  225. llama_stack/models/llama/llama4/vision/encoder.py +412 -0
  226. llama_stack/models/llama/prompt_format.py +191 -0
  227. llama_stack/models/llama/quantize_impls.py +316 -0
  228. llama_stack/models/llama/sku_list.py +1029 -0
  229. llama_stack/models/llama/sku_types.py +233 -0
  230. llama_stack/models/llama/tokenizer_utils.py +40 -0
  231. llama_stack/providers/datatypes.py +136 -107
  232. llama_stack/providers/inline/__init__.py +5 -0
  233. llama_stack/providers/inline/agents/__init__.py +5 -0
  234. llama_stack/providers/{impls/meta_reference/agents → inline/agents/meta_reference}/__init__.py +12 -5
  235. llama_stack/providers/inline/agents/meta_reference/agent_instance.py +1024 -0
  236. llama_stack/providers/inline/agents/meta_reference/agents.py +383 -0
  237. llama_stack/providers/inline/agents/meta_reference/config.py +37 -0
  238. llama_stack/providers/inline/agents/meta_reference/persistence.py +228 -0
  239. llama_stack/providers/inline/agents/meta_reference/responses/__init__.py +5 -0
  240. llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +423 -0
  241. llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +1226 -0
  242. llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +449 -0
  243. llama_stack/providers/inline/agents/meta_reference/responses/types.py +194 -0
  244. llama_stack/providers/inline/agents/meta_reference/responses/utils.py +365 -0
  245. llama_stack/providers/inline/agents/meta_reference/safety.py +52 -0
  246. llama_stack/providers/inline/batches/__init__.py +5 -0
  247. llama_stack/providers/inline/batches/reference/__init__.py +36 -0
  248. llama_stack/providers/inline/batches/reference/batches.py +679 -0
  249. llama_stack/providers/inline/batches/reference/config.py +40 -0
  250. llama_stack/providers/inline/datasetio/__init__.py +5 -0
  251. llama_stack/providers/inline/datasetio/localfs/__init__.py +20 -0
  252. llama_stack/providers/inline/datasetio/localfs/config.py +23 -0
  253. llama_stack/providers/inline/datasetio/localfs/datasetio.py +113 -0
  254. llama_stack/providers/inline/eval/__init__.py +5 -0
  255. llama_stack/providers/inline/eval/meta_reference/__init__.py +28 -0
  256. llama_stack/providers/inline/eval/meta_reference/config.py +23 -0
  257. llama_stack/providers/inline/eval/meta_reference/eval.py +259 -0
  258. llama_stack/providers/inline/files/localfs/__init__.py +20 -0
  259. llama_stack/providers/inline/files/localfs/config.py +31 -0
  260. llama_stack/providers/inline/files/localfs/files.py +219 -0
  261. llama_stack/providers/inline/inference/__init__.py +5 -0
  262. llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/__init__.py +4 -4
  263. llama_stack/providers/inline/inference/meta_reference/common.py +24 -0
  264. llama_stack/providers/inline/inference/meta_reference/config.py +68 -0
  265. llama_stack/providers/inline/inference/meta_reference/generators.py +211 -0
  266. llama_stack/providers/inline/inference/meta_reference/inference.py +158 -0
  267. llama_stack/providers/inline/inference/meta_reference/model_parallel.py +96 -0
  268. llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/parallel_utils.py +56 -73
  269. llama_stack/providers/inline/inference/sentence_transformers/__init__.py +22 -0
  270. llama_stack/providers/{impls/meta_reference/agents → inline/inference/sentence_transformers}/config.py +6 -4
  271. llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +83 -0
  272. llama_stack/providers/inline/post_training/__init__.py +5 -0
  273. llama_stack/providers/inline/post_training/common/__init__.py +5 -0
  274. llama_stack/providers/inline/post_training/common/utils.py +35 -0
  275. llama_stack/providers/inline/post_training/common/validator.py +36 -0
  276. llama_stack/providers/inline/post_training/huggingface/__init__.py +27 -0
  277. llama_stack/providers/inline/post_training/huggingface/config.py +83 -0
  278. llama_stack/providers/inline/post_training/huggingface/post_training.py +208 -0
  279. llama_stack/providers/inline/post_training/huggingface/recipes/__init__.py +5 -0
  280. llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +519 -0
  281. llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +485 -0
  282. llama_stack/providers/inline/post_training/huggingface/utils.py +269 -0
  283. llama_stack/providers/inline/post_training/torchtune/__init__.py +27 -0
  284. llama_stack/providers/inline/post_training/torchtune/common/__init__.py +5 -0
  285. llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +240 -0
  286. llama_stack/providers/inline/post_training/torchtune/common/utils.py +99 -0
  287. llama_stack/providers/inline/post_training/torchtune/config.py +20 -0
  288. llama_stack/providers/inline/post_training/torchtune/datasets/__init__.py +5 -0
  289. llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +57 -0
  290. llama_stack/providers/inline/post_training/torchtune/datasets/sft.py +78 -0
  291. llama_stack/providers/inline/post_training/torchtune/post_training.py +178 -0
  292. llama_stack/providers/inline/post_training/torchtune/recipes/__init__.py +5 -0
  293. llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +588 -0
  294. llama_stack/providers/inline/safety/__init__.py +5 -0
  295. llama_stack/providers/{impls/meta_reference/codeshield → inline/safety/code_scanner}/__init__.py +4 -2
  296. llama_stack/providers/inline/safety/code_scanner/code_scanner.py +128 -0
  297. llama_stack/providers/{impls/meta_reference/memory → inline/safety/code_scanner}/config.py +5 -3
  298. llama_stack/providers/inline/safety/llama_guard/__init__.py +19 -0
  299. llama_stack/providers/inline/safety/llama_guard/config.py +19 -0
  300. llama_stack/providers/inline/safety/llama_guard/llama_guard.py +489 -0
  301. llama_stack/providers/{adapters/memory/sample → inline/safety/prompt_guard}/__init__.py +4 -4
  302. llama_stack/providers/inline/safety/prompt_guard/config.py +32 -0
  303. llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +131 -0
  304. llama_stack/providers/inline/scoring/__init__.py +5 -0
  305. llama_stack/providers/inline/scoring/basic/__init__.py +25 -0
  306. llama_stack/providers/{adapters/memory/weaviate → inline/scoring/basic}/config.py +5 -7
  307. llama_stack/providers/inline/scoring/basic/scoring.py +126 -0
  308. llama_stack/providers/inline/scoring/basic/scoring_fn/__init__.py +5 -0
  309. llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +240 -0
  310. llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +41 -0
  311. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/__init__.py +5 -0
  312. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/docvqa.py +21 -0
  313. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/equality.py +21 -0
  314. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/ifeval.py +23 -0
  315. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_math_response.py +27 -0
  316. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py +71 -0
  317. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/subset_of.py +21 -0
  318. llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +80 -0
  319. llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +66 -0
  320. llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +58 -0
  321. llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +38 -0
  322. llama_stack/providers/inline/scoring/basic/utils/__init__.py +5 -0
  323. llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py +3319 -0
  324. llama_stack/providers/inline/scoring/basic/utils/math_utils.py +330 -0
  325. llama_stack/providers/inline/scoring/braintrust/__init__.py +27 -0
  326. llama_stack/providers/inline/scoring/braintrust/braintrust.py +230 -0
  327. llama_stack/providers/inline/scoring/braintrust/config.py +21 -0
  328. llama_stack/providers/inline/scoring/braintrust/scoring_fn/__init__.py +5 -0
  329. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/__init__.py +5 -0
  330. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_correctness.py +24 -0
  331. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_relevancy.py +24 -0
  332. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_similarity.py +24 -0
  333. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_entity_recall.py +24 -0
  334. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_precision.py +24 -0
  335. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_recall.py +24 -0
  336. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_relevancy.py +23 -0
  337. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/factuality.py +24 -0
  338. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/faithfulness.py +24 -0
  339. llama_stack/providers/inline/scoring/llm_as_judge/__init__.py +21 -0
  340. llama_stack/providers/inline/scoring/llm_as_judge/config.py +14 -0
  341. llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +113 -0
  342. llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/__init__.py +5 -0
  343. llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/__init__.py +5 -0
  344. llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +96 -0
  345. llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +20 -0
  346. llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +81 -0
  347. llama_stack/providers/inline/telemetry/__init__.py +5 -0
  348. llama_stack/providers/inline/telemetry/meta_reference/__init__.py +21 -0
  349. llama_stack/providers/inline/telemetry/meta_reference/config.py +47 -0
  350. llama_stack/providers/inline/telemetry/meta_reference/telemetry.py +252 -0
  351. llama_stack/providers/inline/tool_runtime/__init__.py +5 -0
  352. llama_stack/providers/inline/tool_runtime/rag/__init__.py +19 -0
  353. llama_stack/providers/{impls/meta_reference/telemetry → inline/tool_runtime/rag}/config.py +5 -3
  354. llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +77 -0
  355. llama_stack/providers/inline/tool_runtime/rag/memory.py +332 -0
  356. llama_stack/providers/inline/vector_io/__init__.py +5 -0
  357. llama_stack/providers/inline/vector_io/chroma/__init__.py +19 -0
  358. llama_stack/providers/inline/vector_io/chroma/config.py +30 -0
  359. llama_stack/providers/inline/vector_io/faiss/__init__.py +21 -0
  360. llama_stack/providers/inline/vector_io/faiss/config.py +26 -0
  361. llama_stack/providers/inline/vector_io/faiss/faiss.py +293 -0
  362. llama_stack/providers/inline/vector_io/milvus/__init__.py +19 -0
  363. llama_stack/providers/inline/vector_io/milvus/config.py +29 -0
  364. llama_stack/providers/inline/vector_io/qdrant/__init__.py +20 -0
  365. llama_stack/providers/inline/vector_io/qdrant/config.py +29 -0
  366. llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +20 -0
  367. llama_stack/providers/inline/vector_io/sqlite_vec/config.py +26 -0
  368. llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +483 -0
  369. llama_stack/providers/registry/agents.py +16 -18
  370. llama_stack/providers/registry/batches.py +26 -0
  371. llama_stack/providers/registry/datasetio.py +49 -0
  372. llama_stack/providers/registry/eval.py +46 -0
  373. llama_stack/providers/registry/files.py +31 -0
  374. llama_stack/providers/registry/inference.py +273 -118
  375. llama_stack/providers/registry/post_training.py +69 -0
  376. llama_stack/providers/registry/safety.py +46 -41
  377. llama_stack/providers/registry/scoring.py +51 -0
  378. llama_stack/providers/registry/tool_runtime.py +87 -0
  379. llama_stack/providers/registry/vector_io.py +828 -0
  380. llama_stack/providers/remote/__init__.py +5 -0
  381. llama_stack/providers/remote/agents/__init__.py +5 -0
  382. llama_stack/providers/remote/datasetio/__init__.py +5 -0
  383. llama_stack/providers/{adapters/memory/chroma → remote/datasetio/huggingface}/__init__.py +7 -4
  384. llama_stack/providers/remote/datasetio/huggingface/config.py +23 -0
  385. llama_stack/providers/remote/datasetio/huggingface/huggingface.py +99 -0
  386. llama_stack/providers/remote/datasetio/nvidia/__init__.py +23 -0
  387. llama_stack/providers/remote/datasetio/nvidia/config.py +61 -0
  388. llama_stack/providers/remote/datasetio/nvidia/datasetio.py +116 -0
  389. llama_stack/providers/remote/eval/__init__.py +5 -0
  390. llama_stack/providers/remote/eval/nvidia/__init__.py +31 -0
  391. llama_stack/providers/remote/eval/nvidia/config.py +29 -0
  392. llama_stack/providers/remote/eval/nvidia/eval.py +162 -0
  393. llama_stack/providers/remote/files/s3/__init__.py +19 -0
  394. llama_stack/providers/remote/files/s3/config.py +42 -0
  395. llama_stack/providers/remote/files/s3/files.py +313 -0
  396. llama_stack/providers/remote/inference/__init__.py +5 -0
  397. llama_stack/providers/{adapters/safety/sample → remote/inference/anthropic}/__init__.py +4 -6
  398. llama_stack/providers/remote/inference/anthropic/anthropic.py +36 -0
  399. llama_stack/providers/remote/inference/anthropic/config.py +28 -0
  400. llama_stack/providers/{impls/meta_reference/telemetry → remote/inference/azure}/__init__.py +4 -4
  401. llama_stack/providers/remote/inference/azure/azure.py +25 -0
  402. llama_stack/providers/remote/inference/azure/config.py +61 -0
  403. llama_stack/providers/{adapters → remote}/inference/bedrock/__init__.py +18 -17
  404. llama_stack/providers/remote/inference/bedrock/bedrock.py +142 -0
  405. llama_stack/providers/{adapters/inference/sample → remote/inference/bedrock}/config.py +3 -4
  406. llama_stack/providers/remote/inference/bedrock/models.py +29 -0
  407. llama_stack/providers/remote/inference/cerebras/__init__.py +19 -0
  408. llama_stack/providers/remote/inference/cerebras/cerebras.py +28 -0
  409. llama_stack/providers/remote/inference/cerebras/config.py +30 -0
  410. llama_stack/providers/{adapters → remote}/inference/databricks/__init__.py +4 -5
  411. llama_stack/providers/remote/inference/databricks/config.py +37 -0
  412. llama_stack/providers/remote/inference/databricks/databricks.py +44 -0
  413. llama_stack/providers/{adapters → remote}/inference/fireworks/__init__.py +8 -4
  414. llama_stack/providers/remote/inference/fireworks/config.py +27 -0
  415. llama_stack/providers/remote/inference/fireworks/fireworks.py +27 -0
  416. llama_stack/providers/{adapters/memory/pgvector → remote/inference/gemini}/__init__.py +4 -4
  417. llama_stack/providers/remote/inference/gemini/config.py +28 -0
  418. llama_stack/providers/remote/inference/gemini/gemini.py +82 -0
  419. llama_stack/providers/remote/inference/groq/__init__.py +15 -0
  420. llama_stack/providers/remote/inference/groq/config.py +34 -0
  421. llama_stack/providers/remote/inference/groq/groq.py +18 -0
  422. llama_stack/providers/remote/inference/llama_openai_compat/__init__.py +15 -0
  423. llama_stack/providers/remote/inference/llama_openai_compat/config.py +34 -0
  424. llama_stack/providers/remote/inference/llama_openai_compat/llama.py +46 -0
  425. llama_stack/providers/remote/inference/nvidia/__init__.py +23 -0
  426. llama_stack/providers/remote/inference/nvidia/config.py +64 -0
  427. llama_stack/providers/remote/inference/nvidia/nvidia.py +61 -0
  428. llama_stack/providers/{adapters/safety/sample/config.py → remote/inference/nvidia/utils.py} +3 -4
  429. llama_stack/providers/{impls/vllm → remote/inference/ollama}/__init__.py +4 -6
  430. llama_stack/providers/remote/inference/ollama/config.py +25 -0
  431. llama_stack/providers/remote/inference/ollama/ollama.py +102 -0
  432. llama_stack/providers/{adapters/telemetry/opentelemetry → remote/inference/openai}/__init__.py +4 -4
  433. llama_stack/providers/remote/inference/openai/config.py +39 -0
  434. llama_stack/providers/remote/inference/openai/openai.py +38 -0
  435. llama_stack/providers/remote/inference/passthrough/__init__.py +23 -0
  436. llama_stack/providers/remote/inference/passthrough/config.py +34 -0
  437. llama_stack/providers/remote/inference/passthrough/passthrough.py +122 -0
  438. llama_stack/providers/remote/inference/runpod/__init__.py +16 -0
  439. llama_stack/providers/remote/inference/runpod/config.py +32 -0
  440. llama_stack/providers/remote/inference/runpod/runpod.py +42 -0
  441. llama_stack/providers/remote/inference/sambanova/__init__.py +16 -0
  442. llama_stack/providers/remote/inference/sambanova/config.py +34 -0
  443. llama_stack/providers/remote/inference/sambanova/sambanova.py +28 -0
  444. llama_stack/providers/{adapters → remote}/inference/tgi/__init__.py +3 -4
  445. llama_stack/providers/remote/inference/tgi/config.py +76 -0
  446. llama_stack/providers/remote/inference/tgi/tgi.py +85 -0
  447. llama_stack/providers/{adapters → remote}/inference/together/__init__.py +8 -4
  448. llama_stack/providers/remote/inference/together/config.py +27 -0
  449. llama_stack/providers/remote/inference/together/together.py +102 -0
  450. llama_stack/providers/remote/inference/vertexai/__init__.py +15 -0
  451. llama_stack/providers/remote/inference/vertexai/config.py +48 -0
  452. llama_stack/providers/remote/inference/vertexai/vertexai.py +54 -0
  453. llama_stack/providers/remote/inference/vllm/__init__.py +22 -0
  454. llama_stack/providers/remote/inference/vllm/config.py +59 -0
  455. llama_stack/providers/remote/inference/vllm/vllm.py +111 -0
  456. llama_stack/providers/remote/inference/watsonx/__init__.py +15 -0
  457. llama_stack/providers/remote/inference/watsonx/config.py +45 -0
  458. llama_stack/providers/remote/inference/watsonx/watsonx.py +336 -0
  459. llama_stack/providers/remote/post_training/__init__.py +5 -0
  460. llama_stack/providers/remote/post_training/nvidia/__init__.py +23 -0
  461. llama_stack/providers/remote/post_training/nvidia/config.py +113 -0
  462. llama_stack/providers/remote/post_training/nvidia/models.py +27 -0
  463. llama_stack/providers/remote/post_training/nvidia/post_training.py +430 -0
  464. llama_stack/providers/remote/post_training/nvidia/utils.py +63 -0
  465. llama_stack/providers/remote/safety/__init__.py +5 -0
  466. llama_stack/providers/remote/safety/bedrock/bedrock.py +111 -0
  467. llama_stack/providers/remote/safety/bedrock/config.py +14 -0
  468. llama_stack/providers/{adapters/inference/sample → remote/safety/nvidia}/__init__.py +5 -4
  469. llama_stack/providers/remote/safety/nvidia/config.py +40 -0
  470. llama_stack/providers/remote/safety/nvidia/nvidia.py +161 -0
  471. llama_stack/providers/{adapters/agents/sample → remote/safety/sambanova}/__init__.py +5 -4
  472. llama_stack/providers/remote/safety/sambanova/config.py +37 -0
  473. llama_stack/providers/remote/safety/sambanova/sambanova.py +98 -0
  474. llama_stack/providers/remote/tool_runtime/__init__.py +5 -0
  475. llama_stack/providers/remote/tool_runtime/bing_search/__init__.py +21 -0
  476. llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +112 -0
  477. llama_stack/providers/remote/tool_runtime/bing_search/config.py +22 -0
  478. llama_stack/providers/remote/tool_runtime/brave_search/__init__.py +20 -0
  479. llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +148 -0
  480. llama_stack/providers/remote/tool_runtime/brave_search/config.py +27 -0
  481. llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py +15 -0
  482. llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py +20 -0
  483. llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +73 -0
  484. llama_stack/providers/remote/tool_runtime/tavily_search/__init__.py +20 -0
  485. llama_stack/providers/remote/tool_runtime/tavily_search/config.py +27 -0
  486. llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +84 -0
  487. llama_stack/providers/remote/tool_runtime/wolfram_alpha/__init__.py +22 -0
  488. llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py +21 -0
  489. llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +140 -0
  490. llama_stack/providers/remote/vector_io/__init__.py +5 -0
  491. llama_stack/providers/remote/vector_io/chroma/__init__.py +17 -0
  492. llama_stack/providers/remote/vector_io/chroma/chroma.py +215 -0
  493. llama_stack/providers/remote/vector_io/chroma/config.py +28 -0
  494. llama_stack/providers/remote/vector_io/milvus/__init__.py +18 -0
  495. llama_stack/providers/remote/vector_io/milvus/config.py +35 -0
  496. llama_stack/providers/remote/vector_io/milvus/milvus.py +375 -0
  497. llama_stack/providers/remote/vector_io/pgvector/__init__.py +17 -0
  498. llama_stack/providers/remote/vector_io/pgvector/config.py +47 -0
  499. llama_stack/providers/remote/vector_io/pgvector/pgvector.py +460 -0
  500. llama_stack/providers/remote/vector_io/qdrant/__init__.py +17 -0
  501. llama_stack/providers/remote/vector_io/qdrant/config.py +37 -0
  502. llama_stack/providers/remote/vector_io/qdrant/qdrant.py +265 -0
  503. llama_stack/providers/remote/vector_io/weaviate/__init__.py +17 -0
  504. llama_stack/providers/remote/vector_io/weaviate/config.py +32 -0
  505. llama_stack/providers/remote/vector_io/weaviate/weaviate.py +393 -0
  506. llama_stack/providers/utils/bedrock/__init__.py +5 -0
  507. llama_stack/providers/utils/bedrock/client.py +74 -0
  508. llama_stack/providers/utils/bedrock/config.py +64 -0
  509. llama_stack/providers/utils/bedrock/refreshable_boto_session.py +112 -0
  510. llama_stack/providers/utils/common/__init__.py +5 -0
  511. llama_stack/providers/utils/common/data_schema_validator.py +103 -0
  512. llama_stack/providers/utils/datasetio/__init__.py +5 -0
  513. llama_stack/providers/utils/datasetio/url_utils.py +47 -0
  514. llama_stack/providers/utils/files/__init__.py +5 -0
  515. llama_stack/providers/utils/files/form_data.py +69 -0
  516. llama_stack/providers/utils/inference/__init__.py +8 -7
  517. llama_stack/providers/utils/inference/embedding_mixin.py +101 -0
  518. llama_stack/providers/utils/inference/inference_store.py +264 -0
  519. llama_stack/providers/utils/inference/litellm_openai_mixin.py +336 -0
  520. llama_stack/providers/utils/inference/model_registry.py +173 -23
  521. llama_stack/providers/utils/inference/openai_compat.py +1261 -49
  522. llama_stack/providers/utils/inference/openai_mixin.py +506 -0
  523. llama_stack/providers/utils/inference/prompt_adapter.py +365 -67
  524. llama_stack/providers/utils/kvstore/api.py +6 -6
  525. llama_stack/providers/utils/kvstore/config.py +28 -48
  526. llama_stack/providers/utils/kvstore/kvstore.py +61 -15
  527. llama_stack/providers/utils/kvstore/mongodb/__init__.py +9 -0
  528. llama_stack/providers/utils/kvstore/mongodb/mongodb.py +82 -0
  529. llama_stack/providers/utils/kvstore/postgres/__init__.py +7 -0
  530. llama_stack/providers/utils/kvstore/postgres/postgres.py +114 -0
  531. llama_stack/providers/utils/kvstore/redis/redis.py +33 -9
  532. llama_stack/providers/utils/kvstore/sqlite/config.py +2 -1
  533. llama_stack/providers/utils/kvstore/sqlite/sqlite.py +123 -22
  534. llama_stack/providers/utils/memory/file_utils.py +1 -1
  535. llama_stack/providers/utils/memory/openai_vector_store_mixin.py +1304 -0
  536. llama_stack/providers/utils/memory/vector_store.py +220 -82
  537. llama_stack/providers/utils/pagination.py +43 -0
  538. llama_stack/providers/utils/responses/__init__.py +5 -0
  539. llama_stack/providers/utils/responses/responses_store.py +292 -0
  540. llama_stack/providers/utils/scheduler.py +270 -0
  541. llama_stack/providers/utils/scoring/__init__.py +5 -0
  542. llama_stack/providers/utils/scoring/aggregation_utils.py +75 -0
  543. llama_stack/providers/utils/scoring/base_scoring_fn.py +114 -0
  544. llama_stack/providers/utils/scoring/basic_scoring_utils.py +26 -0
  545. llama_stack/providers/utils/sqlstore/__init__.py +5 -0
  546. llama_stack/providers/utils/sqlstore/api.py +128 -0
  547. llama_stack/providers/utils/sqlstore/authorized_sqlstore.py +319 -0
  548. llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py +343 -0
  549. llama_stack/providers/utils/sqlstore/sqlstore.py +70 -0
  550. llama_stack/providers/utils/telemetry/trace_protocol.py +142 -0
  551. llama_stack/providers/utils/telemetry/tracing.py +192 -53
  552. llama_stack/providers/utils/tools/__init__.py +5 -0
  553. llama_stack/providers/utils/tools/mcp.py +148 -0
  554. llama_stack/providers/utils/tools/ttl_dict.py +70 -0
  555. llama_stack/providers/utils/vector_io/__init__.py +5 -0
  556. llama_stack/providers/utils/vector_io/vector_utils.py +156 -0
  557. llama_stack/schema_utils.py +118 -0
  558. llama_stack/strong_typing/__init__.py +19 -0
  559. llama_stack/strong_typing/auxiliary.py +228 -0
  560. llama_stack/strong_typing/classdef.py +440 -0
  561. llama_stack/strong_typing/core.py +46 -0
  562. llama_stack/strong_typing/deserializer.py +877 -0
  563. llama_stack/strong_typing/docstring.py +409 -0
  564. llama_stack/strong_typing/exception.py +23 -0
  565. llama_stack/strong_typing/inspection.py +1085 -0
  566. llama_stack/strong_typing/mapping.py +40 -0
  567. llama_stack/strong_typing/name.py +182 -0
  568. llama_stack/strong_typing/py.typed +0 -0
  569. llama_stack/strong_typing/schema.py +792 -0
  570. llama_stack/strong_typing/serialization.py +97 -0
  571. llama_stack/strong_typing/serializer.py +500 -0
  572. llama_stack/strong_typing/slots.py +27 -0
  573. llama_stack/strong_typing/topological.py +89 -0
  574. llama_stack/testing/__init__.py +5 -0
  575. llama_stack/testing/api_recorder.py +956 -0
  576. llama_stack/ui/node_modules/flatted/python/flatted.py +149 -0
  577. llama_stack-0.3.4.dist-info/METADATA +261 -0
  578. llama_stack-0.3.4.dist-info/RECORD +625 -0
  579. {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/WHEEL +1 -1
  580. llama_stack/apis/agents/client.py +0 -292
  581. llama_stack/apis/agents/event_logger.py +0 -184
  582. llama_stack/apis/batch_inference/batch_inference.py +0 -72
  583. llama_stack/apis/common/deployment_types.py +0 -31
  584. llama_stack/apis/dataset/dataset.py +0 -63
  585. llama_stack/apis/evals/evals.py +0 -122
  586. llama_stack/apis/inference/client.py +0 -197
  587. llama_stack/apis/inspect/client.py +0 -82
  588. llama_stack/apis/memory/client.py +0 -155
  589. llama_stack/apis/memory/memory.py +0 -65
  590. llama_stack/apis/memory_banks/__init__.py +0 -7
  591. llama_stack/apis/memory_banks/client.py +0 -101
  592. llama_stack/apis/memory_banks/memory_banks.py +0 -78
  593. llama_stack/apis/models/client.py +0 -83
  594. llama_stack/apis/reward_scoring/__init__.py +0 -7
  595. llama_stack/apis/reward_scoring/reward_scoring.py +0 -55
  596. llama_stack/apis/safety/client.py +0 -105
  597. llama_stack/apis/shields/client.py +0 -79
  598. llama_stack/cli/download.py +0 -340
  599. llama_stack/cli/model/describe.py +0 -82
  600. llama_stack/cli/model/download.py +0 -24
  601. llama_stack/cli/model/list.py +0 -62
  602. llama_stack/cli/model/model.py +0 -34
  603. llama_stack/cli/model/prompt_format.py +0 -112
  604. llama_stack/cli/model/safety_models.py +0 -52
  605. llama_stack/cli/stack/build.py +0 -299
  606. llama_stack/cli/stack/configure.py +0 -178
  607. llama_stack/distribution/build.py +0 -123
  608. llama_stack/distribution/build_conda_env.sh +0 -136
  609. llama_stack/distribution/build_container.sh +0 -142
  610. llama_stack/distribution/common.sh +0 -40
  611. llama_stack/distribution/configure_container.sh +0 -47
  612. llama_stack/distribution/datatypes.py +0 -139
  613. llama_stack/distribution/distribution.py +0 -58
  614. llama_stack/distribution/inspect.py +0 -67
  615. llama_stack/distribution/request_headers.py +0 -57
  616. llama_stack/distribution/resolver.py +0 -323
  617. llama_stack/distribution/routers/__init__.py +0 -48
  618. llama_stack/distribution/routers/routers.py +0 -158
  619. llama_stack/distribution/routers/routing_tables.py +0 -173
  620. llama_stack/distribution/server/endpoints.py +0 -48
  621. llama_stack/distribution/server/server.py +0 -343
  622. llama_stack/distribution/start_conda_env.sh +0 -42
  623. llama_stack/distribution/start_container.sh +0 -64
  624. llama_stack/distribution/templates/local-bedrock-conda-example-build.yaml +0 -10
  625. llama_stack/distribution/templates/local-build.yaml +0 -10
  626. llama_stack/distribution/templates/local-databricks-build.yaml +0 -10
  627. llama_stack/distribution/templates/local-fireworks-build.yaml +0 -10
  628. llama_stack/distribution/templates/local-hf-endpoint-build.yaml +0 -10
  629. llama_stack/distribution/templates/local-hf-serverless-build.yaml +0 -10
  630. llama_stack/distribution/templates/local-ollama-build.yaml +0 -10
  631. llama_stack/distribution/templates/local-tgi-build.yaml +0 -10
  632. llama_stack/distribution/templates/local-together-build.yaml +0 -10
  633. llama_stack/distribution/templates/local-vllm-build.yaml +0 -10
  634. llama_stack/distribution/utils/exec.py +0 -105
  635. llama_stack/providers/adapters/agents/sample/sample.py +0 -18
  636. llama_stack/providers/adapters/inference/bedrock/bedrock.py +0 -451
  637. llama_stack/providers/adapters/inference/bedrock/config.py +0 -55
  638. llama_stack/providers/adapters/inference/databricks/config.py +0 -21
  639. llama_stack/providers/adapters/inference/databricks/databricks.py +0 -125
  640. llama_stack/providers/adapters/inference/fireworks/config.py +0 -20
  641. llama_stack/providers/adapters/inference/fireworks/fireworks.py +0 -130
  642. llama_stack/providers/adapters/inference/ollama/__init__.py +0 -19
  643. llama_stack/providers/adapters/inference/ollama/ollama.py +0 -175
  644. llama_stack/providers/adapters/inference/sample/sample.py +0 -23
  645. llama_stack/providers/adapters/inference/tgi/config.py +0 -43
  646. llama_stack/providers/adapters/inference/tgi/tgi.py +0 -200
  647. llama_stack/providers/adapters/inference/together/config.py +0 -22
  648. llama_stack/providers/adapters/inference/together/together.py +0 -143
  649. llama_stack/providers/adapters/memory/chroma/chroma.py +0 -157
  650. llama_stack/providers/adapters/memory/pgvector/config.py +0 -17
  651. llama_stack/providers/adapters/memory/pgvector/pgvector.py +0 -211
  652. llama_stack/providers/adapters/memory/sample/sample.py +0 -23
  653. llama_stack/providers/adapters/memory/weaviate/__init__.py +0 -15
  654. llama_stack/providers/adapters/memory/weaviate/weaviate.py +0 -190
  655. llama_stack/providers/adapters/safety/bedrock/bedrock.py +0 -113
  656. llama_stack/providers/adapters/safety/bedrock/config.py +0 -16
  657. llama_stack/providers/adapters/safety/sample/sample.py +0 -23
  658. llama_stack/providers/adapters/safety/together/__init__.py +0 -18
  659. llama_stack/providers/adapters/safety/together/config.py +0 -26
  660. llama_stack/providers/adapters/safety/together/together.py +0 -101
  661. llama_stack/providers/adapters/telemetry/opentelemetry/config.py +0 -12
  662. llama_stack/providers/adapters/telemetry/opentelemetry/opentelemetry.py +0 -201
  663. llama_stack/providers/adapters/telemetry/sample/__init__.py +0 -17
  664. llama_stack/providers/adapters/telemetry/sample/config.py +0 -12
  665. llama_stack/providers/adapters/telemetry/sample/sample.py +0 -18
  666. llama_stack/providers/impls/meta_reference/agents/agent_instance.py +0 -844
  667. llama_stack/providers/impls/meta_reference/agents/agents.py +0 -161
  668. llama_stack/providers/impls/meta_reference/agents/persistence.py +0 -84
  669. llama_stack/providers/impls/meta_reference/agents/rag/context_retriever.py +0 -74
  670. llama_stack/providers/impls/meta_reference/agents/safety.py +0 -57
  671. llama_stack/providers/impls/meta_reference/agents/tests/code_execution.py +0 -93
  672. llama_stack/providers/impls/meta_reference/agents/tests/test_chat_agent.py +0 -305
  673. llama_stack/providers/impls/meta_reference/agents/tools/base.py +0 -20
  674. llama_stack/providers/impls/meta_reference/agents/tools/builtin.py +0 -375
  675. llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_env_prefix.py +0 -133
  676. llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_execution.py +0 -256
  677. llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/matplotlib_custom_backend.py +0 -87
  678. llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/utils.py +0 -21
  679. llama_stack/providers/impls/meta_reference/agents/tools/safety.py +0 -43
  680. llama_stack/providers/impls/meta_reference/codeshield/code_scanner.py +0 -58
  681. llama_stack/providers/impls/meta_reference/inference/config.py +0 -45
  682. llama_stack/providers/impls/meta_reference/inference/generation.py +0 -376
  683. llama_stack/providers/impls/meta_reference/inference/inference.py +0 -280
  684. llama_stack/providers/impls/meta_reference/inference/model_parallel.py +0 -99
  685. llama_stack/providers/impls/meta_reference/inference/quantization/fp8_impls.py +0 -184
  686. llama_stack/providers/impls/meta_reference/inference/quantization/fp8_txest_disabled.py +0 -76
  687. llama_stack/providers/impls/meta_reference/inference/quantization/loader.py +0 -97
  688. llama_stack/providers/impls/meta_reference/inference/quantization/scripts/quantize_checkpoint.py +0 -161
  689. llama_stack/providers/impls/meta_reference/memory/__init__.py +0 -19
  690. llama_stack/providers/impls/meta_reference/memory/faiss.py +0 -113
  691. llama_stack/providers/impls/meta_reference/safety/__init__.py +0 -17
  692. llama_stack/providers/impls/meta_reference/safety/base.py +0 -57
  693. llama_stack/providers/impls/meta_reference/safety/config.py +0 -48
  694. llama_stack/providers/impls/meta_reference/safety/llama_guard.py +0 -268
  695. llama_stack/providers/impls/meta_reference/safety/prompt_guard.py +0 -145
  696. llama_stack/providers/impls/meta_reference/safety/safety.py +0 -112
  697. llama_stack/providers/impls/meta_reference/telemetry/console.py +0 -89
  698. llama_stack/providers/impls/vllm/config.py +0 -35
  699. llama_stack/providers/impls/vllm/vllm.py +0 -241
  700. llama_stack/providers/registry/memory.py +0 -78
  701. llama_stack/providers/registry/telemetry.py +0 -44
  702. llama_stack/providers/tests/agents/test_agents.py +0 -210
  703. llama_stack/providers/tests/inference/test_inference.py +0 -257
  704. llama_stack/providers/tests/inference/test_prompt_adapter.py +0 -126
  705. llama_stack/providers/tests/memory/test_memory.py +0 -136
  706. llama_stack/providers/tests/resolver.py +0 -100
  707. llama_stack/providers/tests/safety/test_safety.py +0 -77
  708. llama_stack-0.0.42.dist-info/METADATA +0 -137
  709. llama_stack-0.0.42.dist-info/RECORD +0 -256
  710. /llama_stack/{distribution → core}/__init__.py +0 -0
  711. /llama_stack/{distribution/server → core/access_control}/__init__.py +0 -0
  712. /llama_stack/{distribution/utils → core/conversations}/__init__.py +0 -0
  713. /llama_stack/{providers/adapters → core/prompts}/__init__.py +0 -0
  714. /llama_stack/{providers/adapters/agents → core/routing_tables}/__init__.py +0 -0
  715. /llama_stack/{providers/adapters/inference → core/server}/__init__.py +0 -0
  716. /llama_stack/{providers/adapters/memory → core/storage}/__init__.py +0 -0
  717. /llama_stack/{providers/adapters/safety → core/ui}/__init__.py +0 -0
  718. /llama_stack/{providers/adapters/telemetry → core/ui/modules}/__init__.py +0 -0
  719. /llama_stack/{providers/impls → core/ui/page}/__init__.py +0 -0
  720. /llama_stack/{providers/impls/meta_reference → core/ui/page/distribution}/__init__.py +0 -0
  721. /llama_stack/{providers/impls/meta_reference/agents/rag → core/ui/page/evaluations}/__init__.py +0 -0
  722. /llama_stack/{providers/impls/meta_reference/agents/tests → core/ui/page/playground}/__init__.py +0 -0
  723. /llama_stack/{providers/impls/meta_reference/agents/tools → core/utils}/__init__.py +0 -0
  724. /llama_stack/{distribution → core}/utils/dynamic.py +0 -0
  725. /llama_stack/{distribution → core}/utils/serialize.py +0 -0
  726. /llama_stack/{providers/impls/meta_reference/agents/tools/ipython_tool → distributions}/__init__.py +0 -0
  727. /llama_stack/{providers/impls/meta_reference/inference/quantization → models}/__init__.py +0 -0
  728. /llama_stack/{providers/impls/meta_reference/inference/quantization/scripts → models/llama}/__init__.py +0 -0
  729. /llama_stack/{providers/tests → models/llama/llama3}/__init__.py +0 -0
  730. /llama_stack/{providers/tests/agents → models/llama/llama3/quantization}/__init__.py +0 -0
  731. /llama_stack/{providers/tests/inference → models/llama/llama3_2}/__init__.py +0 -0
  732. /llama_stack/{providers/tests/memory → models/llama/llama3_3}/__init__.py +0 -0
  733. /llama_stack/{providers/tests/safety → models/llama/llama4}/__init__.py +0 -0
  734. /llama_stack/{scripts → models/llama/llama4/prompt_templates}/__init__.py +0 -0
  735. /llama_stack/providers/{adapters → remote}/safety/bedrock/__init__.py +0 -0
  736. {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/entry_points.txt +0 -0
  737. {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info/licenses}/LICENSE +0 -0
  738. {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,141 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ import inspect
8
+ import re
9
+ from collections.abc import Callable
10
+ from typing import Any
11
+
12
+ from aiohttp import hdrs
13
+ from starlette.routing import Route
14
+
15
+ from llama_stack.apis.datatypes import Api, ExternalApiSpec
16
+ from llama_stack.apis.tools import RAGToolRuntime, SpecialToolGroup
17
+ from llama_stack.core.resolver import api_protocol_map
18
+ from llama_stack.schema_utils import WebMethod
19
+
20
+ EndpointFunc = Callable[..., Any]
21
+ PathParams = dict[str, str]
22
+ RouteInfo = tuple[EndpointFunc, str, WebMethod]
23
+ PathImpl = dict[str, RouteInfo]
24
+ RouteImpls = dict[str, PathImpl]
25
+ RouteMatch = tuple[EndpointFunc, PathParams, str, WebMethod]
26
+
27
+
28
+ def toolgroup_protocol_map():
29
+ return {
30
+ SpecialToolGroup.rag_tool: RAGToolRuntime,
31
+ }
32
+
33
+
34
+ def get_all_api_routes(
35
+ external_apis: dict[Api, ExternalApiSpec] | None = None,
36
+ ) -> dict[Api, list[tuple[Route, WebMethod]]]:
37
+ apis = {}
38
+
39
+ protocols = api_protocol_map(external_apis)
40
+ toolgroup_protocols = toolgroup_protocol_map()
41
+ for api, protocol in protocols.items():
42
+ routes = []
43
+ protocol_methods = inspect.getmembers(protocol, predicate=inspect.isfunction)
44
+
45
+ # HACK ALERT
46
+ if api == Api.tool_runtime:
47
+ for tool_group in SpecialToolGroup:
48
+ sub_protocol = toolgroup_protocols[tool_group]
49
+ sub_protocol_methods = inspect.getmembers(sub_protocol, predicate=inspect.isfunction)
50
+ for name, method in sub_protocol_methods:
51
+ if not hasattr(method, "__webmethod__"):
52
+ continue
53
+ protocol_methods.append((f"{tool_group.value}.{name}", method))
54
+
55
+ for name, method in protocol_methods:
56
+ # Get all webmethods for this method (supports multiple decorators)
57
+ webmethods = getattr(method, "__webmethods__", [])
58
+ if not webmethods:
59
+ continue
60
+
61
+ # Create routes for each webmethod decorator
62
+ for webmethod in webmethods:
63
+ path = f"/{webmethod.level}/{webmethod.route.lstrip('/')}"
64
+ if webmethod.method == hdrs.METH_GET:
65
+ http_method = hdrs.METH_GET
66
+ elif webmethod.method == hdrs.METH_DELETE:
67
+ http_method = hdrs.METH_DELETE
68
+ else:
69
+ http_method = hdrs.METH_POST
70
+ routes.append(
71
+ (Route(path=path, methods=[http_method], name=name, endpoint=None), webmethod)
72
+ ) # setting endpoint to None since don't use a Router object
73
+
74
+ apis[api] = routes
75
+
76
+ return apis
77
+
78
+
79
+ def initialize_route_impls(impls, external_apis: dict[Api, ExternalApiSpec] | None = None) -> RouteImpls:
80
+ api_to_routes = get_all_api_routes(external_apis)
81
+ route_impls: RouteImpls = {}
82
+
83
+ def _convert_path_to_regex(path: str) -> str:
84
+ # Convert {param} to named capture groups
85
+ # handle {param:path} as well which allows for forward slashes in the param value
86
+ pattern = re.sub(
87
+ r"{(\w+)(?::path)?}",
88
+ lambda m: f"(?P<{m.group(1)}>{'[^/]+' if not m.group(0).endswith(':path') else '.+'})",
89
+ path,
90
+ )
91
+
92
+ return f"^{pattern}$"
93
+
94
+ for api, api_routes in api_to_routes.items():
95
+ if api not in impls:
96
+ continue
97
+ for route, webmethod in api_routes:
98
+ impl = impls[api]
99
+ func = getattr(impl, route.name)
100
+ # Get the first (and typically only) method from the set, filtering out HEAD
101
+ available_methods = [m for m in route.methods if m != "HEAD"]
102
+ if not available_methods:
103
+ continue # Skip if only HEAD method is available
104
+ method = available_methods[0].lower()
105
+ if method not in route_impls:
106
+ route_impls[method] = {}
107
+ route_impls[method][_convert_path_to_regex(route.path)] = (
108
+ func,
109
+ route.path,
110
+ webmethod,
111
+ )
112
+
113
+ return route_impls
114
+
115
+
116
+ def find_matching_route(method: str, path: str, route_impls: RouteImpls) -> RouteMatch:
117
+ """Find the matching endpoint implementation for a given method and path.
118
+
119
+ Args:
120
+ method: HTTP method (GET, POST, etc.)
121
+ path: URL path to match against
122
+ route_impls: A dictionary of endpoint implementations
123
+
124
+ Returns:
125
+ A tuple of (endpoint_function, path_params, route_path, webmethod_metadata)
126
+
127
+ Raises:
128
+ ValueError: If no matching endpoint is found
129
+ """
130
+ impls = route_impls.get(method.lower())
131
+ if not impls:
132
+ raise ValueError(f"No endpoint found for {path}")
133
+
134
+ for regex, (func, route_path, webmethod) in impls.items():
135
+ match = re.match(regex, path)
136
+ if match:
137
+ # Extract named groups from the regex match
138
+ path_params = match.groupdict()
139
+ return func, path_params, route_path, webmethod
140
+
141
+ raise ValueError(f"No endpoint found for {path}")
@@ -0,0 +1,542 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ import asyncio
8
+ import concurrent.futures
9
+ import functools
10
+ import inspect
11
+ import json
12
+ import logging # allow-direct-logging
13
+ import os
14
+ import sys
15
+ import traceback
16
+ import warnings
17
+ from collections.abc import Callable
18
+ from contextlib import asynccontextmanager
19
+ from importlib.metadata import version as parse_version
20
+ from pathlib import Path
21
+ from typing import Annotated, Any, get_origin
22
+
23
+ import httpx
24
+ import rich.pretty
25
+ import yaml
26
+ from fastapi import Body, FastAPI, HTTPException, Request, Response
27
+ from fastapi import Path as FastapiPath
28
+ from fastapi.exceptions import RequestValidationError
29
+ from fastapi.middleware.cors import CORSMiddleware
30
+ from fastapi.responses import JSONResponse, StreamingResponse
31
+ from openai import BadRequestError
32
+ from pydantic import BaseModel, ValidationError
33
+
34
+ from llama_stack.apis.common.errors import ConflictError, ResourceNotFoundError
35
+ from llama_stack.apis.common.responses import PaginatedResponse
36
+ from llama_stack.core.access_control.access_control import AccessDeniedError
37
+ from llama_stack.core.datatypes import (
38
+ AuthenticationRequiredError,
39
+ LoggingConfig,
40
+ StackRunConfig,
41
+ process_cors_config,
42
+ )
43
+ from llama_stack.core.distribution import builtin_automatically_routed_apis
44
+ from llama_stack.core.external import load_external_apis
45
+ from llama_stack.core.request_headers import (
46
+ PROVIDER_DATA_VAR,
47
+ request_provider_data_context,
48
+ user_from_scope,
49
+ )
50
+ from llama_stack.core.server.routes import get_all_api_routes
51
+ from llama_stack.core.stack import (
52
+ Stack,
53
+ cast_image_name_to_string,
54
+ replace_env_vars,
55
+ )
56
+ from llama_stack.core.utils.config import redact_sensitive_fields
57
+ from llama_stack.core.utils.config_resolution import Mode, resolve_config_or_distro
58
+ from llama_stack.core.utils.context import preserve_contexts_async_generator
59
+ from llama_stack.log import get_logger, setup_logging
60
+ from llama_stack.providers.datatypes import Api
61
+ from llama_stack.providers.inline.telemetry.meta_reference.config import TelemetryConfig
62
+ from llama_stack.providers.inline.telemetry.meta_reference.telemetry import (
63
+ TelemetryAdapter,
64
+ )
65
+ from llama_stack.providers.utils.telemetry.tracing import (
66
+ CURRENT_TRACE_CONTEXT,
67
+ setup_logger,
68
+ )
69
+
70
+ from .auth import AuthenticationMiddleware
71
+ from .quota import QuotaMiddleware
72
+ from .tracing import TracingMiddleware
73
+
74
+ REPO_ROOT = Path(__file__).parent.parent.parent.parent
75
+
76
+ logger = get_logger(name=__name__, category="core::server")
77
+
78
+
79
+ def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
80
+ log = file if hasattr(file, "write") else sys.stderr
81
+ traceback.print_stack(file=log)
82
+ log.write(warnings.formatwarning(message, category, filename, lineno, line))
83
+
84
+
85
+ if os.environ.get("LLAMA_STACK_TRACE_WARNINGS"):
86
+ warnings.showwarning = warn_with_traceback
87
+
88
+
89
+ def create_sse_event(data: Any) -> str:
90
+ if isinstance(data, BaseModel):
91
+ data = data.model_dump_json()
92
+ else:
93
+ data = json.dumps(data)
94
+
95
+ return f"data: {data}\n\n"
96
+
97
+
98
+ async def global_exception_handler(request: Request, exc: Exception):
99
+ traceback.print_exception(exc)
100
+ http_exc = translate_exception(exc)
101
+
102
+ return JSONResponse(status_code=http_exc.status_code, content={"error": {"detail": http_exc.detail}})
103
+
104
+
105
+ def translate_exception(exc: Exception) -> HTTPException | RequestValidationError:
106
+ if isinstance(exc, ValidationError):
107
+ exc = RequestValidationError(exc.errors())
108
+
109
+ if isinstance(exc, RequestValidationError):
110
+ return HTTPException(
111
+ status_code=httpx.codes.BAD_REQUEST,
112
+ detail={
113
+ "errors": [
114
+ {
115
+ "loc": list(error["loc"]),
116
+ "msg": error["msg"],
117
+ "type": error["type"],
118
+ }
119
+ for error in exc.errors()
120
+ ]
121
+ },
122
+ )
123
+ elif isinstance(exc, ConflictError):
124
+ return HTTPException(status_code=httpx.codes.CONFLICT, detail=str(exc))
125
+ elif isinstance(exc, ResourceNotFoundError):
126
+ return HTTPException(status_code=httpx.codes.NOT_FOUND, detail=str(exc))
127
+ elif isinstance(exc, ValueError):
128
+ return HTTPException(status_code=httpx.codes.BAD_REQUEST, detail=f"Invalid value: {str(exc)}")
129
+ elif isinstance(exc, BadRequestError):
130
+ return HTTPException(status_code=httpx.codes.BAD_REQUEST, detail=str(exc))
131
+ elif isinstance(exc, PermissionError | AccessDeniedError):
132
+ return HTTPException(status_code=httpx.codes.FORBIDDEN, detail=f"Permission denied: {str(exc)}")
133
+ elif isinstance(exc, ConnectionError | httpx.ConnectError):
134
+ return HTTPException(status_code=httpx.codes.BAD_GATEWAY, detail=str(exc))
135
+ elif isinstance(exc, asyncio.TimeoutError | TimeoutError):
136
+ return HTTPException(status_code=httpx.codes.GATEWAY_TIMEOUT, detail=f"Operation timed out: {str(exc)}")
137
+ elif isinstance(exc, NotImplementedError):
138
+ return HTTPException(status_code=httpx.codes.NOT_IMPLEMENTED, detail=f"Not implemented: {str(exc)}")
139
+ elif isinstance(exc, AuthenticationRequiredError):
140
+ return HTTPException(status_code=httpx.codes.UNAUTHORIZED, detail=f"Authentication required: {str(exc)}")
141
+ elif hasattr(exc, "status_code") and isinstance(getattr(exc, "status_code", None), int):
142
+ # Handle provider SDK exceptions (e.g., OpenAI's APIStatusError and subclasses)
143
+ # These include AuthenticationError (401), PermissionDeniedError (403), etc.
144
+ # This preserves the actual HTTP status code from the provider
145
+ status_code = exc.status_code
146
+ detail = str(exc)
147
+ return HTTPException(status_code=status_code, detail=detail)
148
+ else:
149
+ return HTTPException(
150
+ status_code=httpx.codes.INTERNAL_SERVER_ERROR,
151
+ detail="Internal server error: An unexpected error occurred.",
152
+ )
153
+
154
+
155
+ class StackApp(FastAPI):
156
+ """
157
+ A wrapper around the FastAPI application to hold a reference to the Stack instance so that we can
158
+ start background tasks (e.g. refresh model registry periodically) from the lifespan context manager.
159
+ """
160
+
161
+ def __init__(self, config: StackRunConfig, *args, **kwargs):
162
+ super().__init__(*args, **kwargs)
163
+ self.stack: Stack = Stack(config)
164
+
165
+ # This code is called from a running event loop managed by uvicorn so we cannot simply call
166
+ # asyncio.run() to initialize the stack. We cannot await either since this is not an async
167
+ # function.
168
+ # As a workaround, we use a thread pool executor to run the initialize() method
169
+ # in a separate thread.
170
+ with concurrent.futures.ThreadPoolExecutor() as executor:
171
+ future = executor.submit(asyncio.run, self.stack.initialize())
172
+ future.result()
173
+
174
+
175
+ @asynccontextmanager
176
+ async def lifespan(app: StackApp):
177
+ logger.info("Starting up")
178
+ assert app.stack is not None
179
+ app.stack.create_registry_refresh_task()
180
+ yield
181
+ logger.info("Shutting down")
182
+ await app.stack.shutdown()
183
+
184
+
185
+ def is_streaming_request(func_name: str, request: Request, **kwargs):
186
+ # TODO: pass the api method and punt it to the Protocol definition directly
187
+ # If there's a stream parameter at top level, use it
188
+ if "stream" in kwargs:
189
+ return kwargs["stream"]
190
+
191
+ # If there's a stream parameter inside a "params" parameter, e.g. openai_chat_completion() use it
192
+ if "params" in kwargs:
193
+ params = kwargs["params"]
194
+ if hasattr(params, "stream"):
195
+ return params.stream
196
+
197
+ return False
198
+
199
+
200
+ async def maybe_await(value):
201
+ if inspect.iscoroutine(value):
202
+ return await value
203
+ return value
204
+
205
+
206
+ async def sse_generator(event_gen_coroutine):
207
+ event_gen = None
208
+ try:
209
+ event_gen = await event_gen_coroutine
210
+ async for item in event_gen:
211
+ yield create_sse_event(item)
212
+ except asyncio.CancelledError:
213
+ logger.info("Generator cancelled")
214
+ if event_gen:
215
+ await event_gen.aclose()
216
+ except Exception as e:
217
+ logger.exception("Error in sse_generator")
218
+ yield create_sse_event(
219
+ {
220
+ "error": {
221
+ "message": str(translate_exception(e)),
222
+ },
223
+ }
224
+ )
225
+
226
+
227
+ async def log_request_pre_validation(request: Request):
228
+ if request.method in ("POST", "PUT", "PATCH"):
229
+ try:
230
+ body_bytes = await request.body()
231
+ if body_bytes:
232
+ try:
233
+ parsed_body = json.loads(body_bytes.decode())
234
+ log_output = rich.pretty.pretty_repr(parsed_body)
235
+ except (json.JSONDecodeError, UnicodeDecodeError):
236
+ log_output = repr(body_bytes)
237
+ logger.debug(f"Incoming raw request body for {request.method} {request.url.path}:\n{log_output}")
238
+ else:
239
+ logger.debug(f"Incoming {request.method} {request.url.path} request with empty body.")
240
+ except Exception as e:
241
+ logger.warning(f"Could not read or log request body for {request.method} {request.url.path}: {e}")
242
+
243
+
244
+ def create_dynamic_typed_route(func: Any, method: str, route: str) -> Callable:
245
+ @functools.wraps(func)
246
+ async def route_handler(request: Request, **kwargs):
247
+ # Get auth attributes from the request scope
248
+ user = user_from_scope(request.scope)
249
+
250
+ await log_request_pre_validation(request)
251
+
252
+ test_context_token = None
253
+ test_context_var = None
254
+ reset_test_context_fn = None
255
+
256
+ # Use context manager with both provider data and auth attributes
257
+ with request_provider_data_context(request.headers, user):
258
+ if os.environ.get("LLAMA_STACK_TEST_INFERENCE_MODE"):
259
+ from llama_stack.core.testing_context import (
260
+ TEST_CONTEXT,
261
+ reset_test_context,
262
+ sync_test_context_from_provider_data,
263
+ )
264
+
265
+ test_context_token = sync_test_context_from_provider_data()
266
+ test_context_var = TEST_CONTEXT
267
+ reset_test_context_fn = reset_test_context
268
+
269
+ is_streaming = is_streaming_request(func.__name__, request, **kwargs)
270
+
271
+ try:
272
+ if is_streaming:
273
+ context_vars = [CURRENT_TRACE_CONTEXT, PROVIDER_DATA_VAR]
274
+ if test_context_var is not None:
275
+ context_vars.append(test_context_var)
276
+ gen = preserve_contexts_async_generator(sse_generator(func(**kwargs)), context_vars)
277
+ return StreamingResponse(gen, media_type="text/event-stream")
278
+ else:
279
+ value = func(**kwargs)
280
+ result = await maybe_await(value)
281
+ if isinstance(result, PaginatedResponse) and result.url is None:
282
+ result.url = route
283
+
284
+ if method.upper() == "DELETE" and result is None:
285
+ return Response(status_code=httpx.codes.NO_CONTENT)
286
+
287
+ return result
288
+ except Exception as e:
289
+ if logger.isEnabledFor(logging.INFO):
290
+ logger.exception(f"Error executing endpoint {route=} {method=}")
291
+ else:
292
+ logger.error(f"Error executing endpoint {route=} {method=}: {str(e)}")
293
+ raise translate_exception(e) from e
294
+ finally:
295
+ if test_context_token is not None and reset_test_context_fn is not None:
296
+ reset_test_context_fn(test_context_token)
297
+
298
+ sig = inspect.signature(func)
299
+
300
+ new_params = [inspect.Parameter("request", inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=Request)]
301
+ new_params.extend(sig.parameters.values())
302
+
303
+ path_params = extract_path_params(route)
304
+ if method == "post":
305
+ # Annotate parameters that are in the path with Path(...) and others with Body(...),
306
+ # but preserve existing File() and Form() annotations for multipart form data
307
+ new_params = (
308
+ [new_params[0]]
309
+ + [
310
+ (
311
+ param.replace(annotation=Annotated[param.annotation, FastapiPath(..., title=param.name)])
312
+ if param.name in path_params
313
+ else (
314
+ param # Keep original annotation if it's already an Annotated type
315
+ if get_origin(param.annotation) is Annotated
316
+ else param.replace(annotation=Annotated[param.annotation, Body(..., embed=True)])
317
+ )
318
+ )
319
+ for param in new_params[1:]
320
+ ]
321
+ )
322
+
323
+ route_handler.__signature__ = sig.replace(parameters=new_params)
324
+
325
+ return route_handler
326
+
327
+
328
+ class ClientVersionMiddleware:
329
+ def __init__(self, app):
330
+ self.app = app
331
+ self.server_version = parse_version("llama-stack")
332
+
333
+ async def __call__(self, scope, receive, send):
334
+ if scope["type"] == "http":
335
+ headers = dict(scope.get("headers", []))
336
+ client_version = headers.get(b"x-llamastack-client-version", b"").decode()
337
+ if client_version:
338
+ try:
339
+ client_version_parts = tuple(map(int, client_version.split(".")[:2]))
340
+ server_version_parts = tuple(map(int, self.server_version.split(".")[:2]))
341
+ if client_version_parts != server_version_parts:
342
+
343
+ async def send_version_error(send):
344
+ await send(
345
+ {
346
+ "type": "http.response.start",
347
+ "status": httpx.codes.UPGRADE_REQUIRED,
348
+ "headers": [[b"content-type", b"application/json"]],
349
+ }
350
+ )
351
+ error_msg = json.dumps(
352
+ {
353
+ "error": {
354
+ "message": f"Client version {client_version} is not compatible with server version {self.server_version}. Please update your client."
355
+ }
356
+ }
357
+ ).encode()
358
+ await send({"type": "http.response.body", "body": error_msg})
359
+
360
+ return await send_version_error(send)
361
+ except (ValueError, IndexError):
362
+ # If version parsing fails, let the request through
363
+ pass
364
+
365
+ return await self.app(scope, receive, send)
366
+
367
+
368
+ def create_app() -> StackApp:
369
+ """Create and configure the FastAPI application.
370
+
371
+ This factory function reads configuration from environment variables:
372
+ - LLAMA_STACK_CONFIG: Path to config file (required)
373
+
374
+ Returns:
375
+ Configured StackApp instance.
376
+ """
377
+ # Initialize logging from environment variables first
378
+ setup_logging()
379
+
380
+ config_file = os.getenv("LLAMA_STACK_CONFIG")
381
+ if config_file is None:
382
+ raise ValueError("LLAMA_STACK_CONFIG environment variable is required")
383
+
384
+ config_file = resolve_config_or_distro(config_file, Mode.RUN)
385
+
386
+ # Load and process configuration
387
+ logger_config = None
388
+ with open(config_file) as fp:
389
+ config_contents = yaml.safe_load(fp)
390
+ if isinstance(config_contents, dict) and (cfg := config_contents.get("logging_config")):
391
+ logger_config = LoggingConfig(**cfg)
392
+ logger = get_logger(name=__name__, category="core::server", config=logger_config)
393
+
394
+ config = replace_env_vars(config_contents)
395
+ config = StackRunConfig(**cast_image_name_to_string(config))
396
+
397
+ _log_run_config(run_config=config)
398
+
399
+ app = StackApp(
400
+ lifespan=lifespan,
401
+ docs_url="/docs",
402
+ redoc_url="/redoc",
403
+ openapi_url="/openapi.json",
404
+ config=config,
405
+ )
406
+
407
+ if not os.environ.get("LLAMA_STACK_DISABLE_VERSION_CHECK"):
408
+ app.add_middleware(ClientVersionMiddleware)
409
+
410
+ impls = app.stack.impls
411
+
412
+ if config.server.auth:
413
+ logger.info(f"Enabling authentication with provider: {config.server.auth.provider_config.type.value}")
414
+ app.add_middleware(AuthenticationMiddleware, auth_config=config.server.auth, impls=impls)
415
+ else:
416
+ if config.server.quota:
417
+ quota = config.server.quota
418
+ logger.warning(
419
+ "Configured authenticated_max_requests (%d) but no auth is enabled; "
420
+ "falling back to anonymous_max_requests (%d) for all the requests",
421
+ quota.authenticated_max_requests,
422
+ quota.anonymous_max_requests,
423
+ )
424
+
425
+ if config.server.quota:
426
+ logger.info("Enabling quota middleware for authenticated and anonymous clients")
427
+
428
+ quota = config.server.quota
429
+ anonymous_max_requests = quota.anonymous_max_requests
430
+ # if auth is disabled, use the anonymous max requests
431
+ authenticated_max_requests = quota.authenticated_max_requests if config.server.auth else anonymous_max_requests
432
+
433
+ kv_config = quota.kvstore
434
+ window_map = {"day": 86400}
435
+ window_seconds = window_map[quota.period.value]
436
+
437
+ app.add_middleware(
438
+ QuotaMiddleware,
439
+ kv_config=kv_config,
440
+ anonymous_max_requests=anonymous_max_requests,
441
+ authenticated_max_requests=authenticated_max_requests,
442
+ window_seconds=window_seconds,
443
+ )
444
+
445
+ if config.server.cors:
446
+ logger.info("Enabling CORS")
447
+ cors_config = process_cors_config(config.server.cors)
448
+ if cors_config:
449
+ app.add_middleware(CORSMiddleware, **cors_config.model_dump())
450
+
451
+ if config.telemetry.enabled:
452
+ setup_logger(impls[Api.telemetry])
453
+ else:
454
+ setup_logger(TelemetryAdapter(TelemetryConfig(), {}))
455
+
456
+ # Load external APIs if configured
457
+ external_apis = load_external_apis(config)
458
+ all_routes = get_all_api_routes(external_apis)
459
+
460
+ if config.apis:
461
+ apis_to_serve = set(config.apis)
462
+ else:
463
+ apis_to_serve = set(impls.keys())
464
+
465
+ for inf in builtin_automatically_routed_apis():
466
+ # if we do not serve the corresponding router API, we should not serve the routing table API
467
+ if inf.router_api.value not in apis_to_serve:
468
+ continue
469
+ apis_to_serve.add(inf.routing_table_api.value)
470
+
471
+ apis_to_serve.add("inspect")
472
+ apis_to_serve.add("providers")
473
+ apis_to_serve.add("prompts")
474
+ apis_to_serve.add("conversations")
475
+ for api_str in apis_to_serve:
476
+ api = Api(api_str)
477
+
478
+ routes = all_routes[api]
479
+ try:
480
+ impl = impls[api]
481
+ except KeyError as e:
482
+ raise ValueError(f"Could not find provider implementation for {api} API") from e
483
+
484
+ for route, _ in routes:
485
+ if not hasattr(impl, route.name):
486
+ # ideally this should be a typing violation already
487
+ raise ValueError(f"Could not find method {route.name} on {impl}!")
488
+
489
+ impl_method = getattr(impl, route.name)
490
+ # Filter out HEAD method since it's automatically handled by FastAPI for GET routes
491
+ available_methods = [m for m in route.methods if m != "HEAD"]
492
+ if not available_methods:
493
+ raise ValueError(f"No methods found for {route.name} on {impl}")
494
+ method = available_methods[0]
495
+ logger.debug(f"{method} {route.path}")
496
+
497
+ with warnings.catch_warnings():
498
+ warnings.filterwarnings("ignore", category=UserWarning, module="pydantic._internal._fields")
499
+ getattr(app, method.lower())(route.path, response_model=None)(
500
+ create_dynamic_typed_route(
501
+ impl_method,
502
+ method.lower(),
503
+ route.path,
504
+ )
505
+ )
506
+
507
+ logger.debug(f"serving APIs: {apis_to_serve}")
508
+
509
+ app.exception_handler(RequestValidationError)(global_exception_handler)
510
+ app.exception_handler(Exception)(global_exception_handler)
511
+
512
+ app.add_middleware(TracingMiddleware, impls=impls, external_apis=external_apis)
513
+
514
+ return app
515
+
516
+
517
+ def _log_run_config(run_config: StackRunConfig):
518
+ """Logs the run config with redacted fields and disabled providers removed."""
519
+ logger.info("Run configuration:")
520
+ safe_config = redact_sensitive_fields(run_config.model_dump(mode="json"))
521
+ clean_config = remove_disabled_providers(safe_config)
522
+ logger.info(yaml.dump(clean_config, indent=2))
523
+
524
+
525
+ def extract_path_params(route: str) -> list[str]:
526
+ segments = route.split("/")
527
+ params = [seg[1:-1] for seg in segments if seg.startswith("{") and seg.endswith("}")]
528
+ # to handle path params like {param:path}
529
+ params = [param.split(":")[0] for param in params]
530
+ return params
531
+
532
+
533
+ def remove_disabled_providers(obj):
534
+ if isinstance(obj, dict):
535
+ keys = ["provider_id", "shield_id", "provider_model_id", "model_id"]
536
+ if any(k in obj and obj[k] in ("__disabled__", "", None) for k in keys):
537
+ return None
538
+ return {k: v for k, v in ((k, remove_disabled_providers(v)) for k, v in obj.items()) if v is not None}
539
+ elif isinstance(obj, list):
540
+ return [item for item in (remove_disabled_providers(i) for i in obj) if item is not None]
541
+ else:
542
+ return obj