llama-stack 0.3.4__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (458) hide show
  1. llama_stack/__init__.py +0 -5
  2. llama_stack/cli/llama.py +3 -3
  3. llama_stack/cli/stack/_list_deps.py +12 -23
  4. llama_stack/cli/stack/list_stacks.py +37 -18
  5. llama_stack/cli/stack/run.py +121 -11
  6. llama_stack/cli/stack/utils.py +0 -127
  7. llama_stack/core/access_control/access_control.py +69 -28
  8. llama_stack/core/access_control/conditions.py +15 -5
  9. llama_stack/core/admin.py +267 -0
  10. llama_stack/core/build.py +6 -74
  11. llama_stack/core/client.py +1 -1
  12. llama_stack/core/configure.py +6 -6
  13. llama_stack/core/conversations/conversations.py +28 -25
  14. llama_stack/core/datatypes.py +271 -79
  15. llama_stack/core/distribution.py +15 -16
  16. llama_stack/core/external.py +3 -3
  17. llama_stack/core/inspect.py +98 -15
  18. llama_stack/core/library_client.py +73 -61
  19. llama_stack/core/prompts/prompts.py +12 -11
  20. llama_stack/core/providers.py +17 -11
  21. llama_stack/core/resolver.py +65 -56
  22. llama_stack/core/routers/__init__.py +8 -12
  23. llama_stack/core/routers/datasets.py +1 -4
  24. llama_stack/core/routers/eval_scoring.py +7 -4
  25. llama_stack/core/routers/inference.py +55 -271
  26. llama_stack/core/routers/safety.py +52 -24
  27. llama_stack/core/routers/tool_runtime.py +6 -48
  28. llama_stack/core/routers/vector_io.py +130 -51
  29. llama_stack/core/routing_tables/benchmarks.py +24 -20
  30. llama_stack/core/routing_tables/common.py +1 -4
  31. llama_stack/core/routing_tables/datasets.py +22 -22
  32. llama_stack/core/routing_tables/models.py +119 -6
  33. llama_stack/core/routing_tables/scoring_functions.py +7 -7
  34. llama_stack/core/routing_tables/shields.py +1 -2
  35. llama_stack/core/routing_tables/toolgroups.py +17 -7
  36. llama_stack/core/routing_tables/vector_stores.py +51 -16
  37. llama_stack/core/server/auth.py +5 -3
  38. llama_stack/core/server/auth_providers.py +36 -20
  39. llama_stack/core/server/fastapi_router_registry.py +84 -0
  40. llama_stack/core/server/quota.py +2 -2
  41. llama_stack/core/server/routes.py +79 -27
  42. llama_stack/core/server/server.py +102 -87
  43. llama_stack/core/stack.py +201 -58
  44. llama_stack/core/storage/datatypes.py +26 -3
  45. llama_stack/{providers/utils → core/storage}/kvstore/__init__.py +2 -0
  46. llama_stack/{providers/utils → core/storage}/kvstore/kvstore.py +55 -24
  47. llama_stack/{providers/utils → core/storage}/kvstore/mongodb/mongodb.py +13 -10
  48. llama_stack/{providers/utils → core/storage}/kvstore/postgres/postgres.py +28 -17
  49. llama_stack/{providers/utils → core/storage}/kvstore/redis/redis.py +41 -16
  50. llama_stack/{providers/utils → core/storage}/kvstore/sqlite/sqlite.py +1 -1
  51. llama_stack/core/storage/sqlstore/__init__.py +17 -0
  52. llama_stack/{providers/utils → core/storage}/sqlstore/authorized_sqlstore.py +69 -49
  53. llama_stack/{providers/utils → core/storage}/sqlstore/sqlalchemy_sqlstore.py +47 -17
  54. llama_stack/{providers/utils → core/storage}/sqlstore/sqlstore.py +25 -8
  55. llama_stack/core/store/registry.py +1 -1
  56. llama_stack/core/utils/config.py +8 -2
  57. llama_stack/core/utils/config_resolution.py +32 -29
  58. llama_stack/core/utils/context.py +4 -10
  59. llama_stack/core/utils/exec.py +9 -0
  60. llama_stack/core/utils/type_inspection.py +45 -0
  61. llama_stack/distributions/dell/{run.yaml → config.yaml} +3 -2
  62. llama_stack/distributions/dell/dell.py +2 -2
  63. llama_stack/distributions/dell/run-with-safety.yaml +3 -2
  64. llama_stack/distributions/meta-reference-gpu/{run.yaml → config.yaml} +3 -2
  65. llama_stack/distributions/meta-reference-gpu/meta_reference.py +2 -2
  66. llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +3 -2
  67. llama_stack/distributions/nvidia/{run.yaml → config.yaml} +4 -4
  68. llama_stack/distributions/nvidia/nvidia.py +1 -1
  69. llama_stack/distributions/nvidia/run-with-safety.yaml +4 -4
  70. llama_stack/{apis/datasetio → distributions/oci}/__init__.py +1 -1
  71. llama_stack/distributions/oci/config.yaml +134 -0
  72. llama_stack/distributions/oci/oci.py +108 -0
  73. llama_stack/distributions/open-benchmark/{run.yaml → config.yaml} +5 -4
  74. llama_stack/distributions/open-benchmark/open_benchmark.py +2 -3
  75. llama_stack/distributions/postgres-demo/{run.yaml → config.yaml} +4 -3
  76. llama_stack/distributions/starter/{run.yaml → config.yaml} +64 -13
  77. llama_stack/distributions/starter/run-with-postgres-store.yaml +64 -13
  78. llama_stack/distributions/starter/starter.py +8 -5
  79. llama_stack/distributions/starter-gpu/{run.yaml → config.yaml} +64 -13
  80. llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +64 -13
  81. llama_stack/distributions/template.py +13 -69
  82. llama_stack/distributions/watsonx/{run.yaml → config.yaml} +4 -3
  83. llama_stack/distributions/watsonx/watsonx.py +1 -1
  84. llama_stack/log.py +28 -11
  85. llama_stack/models/llama/checkpoint.py +6 -6
  86. llama_stack/models/llama/hadamard_utils.py +2 -0
  87. llama_stack/models/llama/llama3/generation.py +3 -1
  88. llama_stack/models/llama/llama3/interface.py +2 -5
  89. llama_stack/models/llama/llama3/multimodal/encoder_utils.py +3 -3
  90. llama_stack/models/llama/llama3/multimodal/image_transform.py +6 -6
  91. llama_stack/models/llama/llama3/prompt_templates/system_prompts.py +1 -1
  92. llama_stack/models/llama/llama3/tool_utils.py +2 -1
  93. llama_stack/models/llama/llama4/prompt_templates/system_prompts.py +1 -1
  94. llama_stack/providers/inline/agents/meta_reference/__init__.py +3 -3
  95. llama_stack/providers/inline/agents/meta_reference/agents.py +44 -261
  96. llama_stack/providers/inline/agents/meta_reference/config.py +6 -1
  97. llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +207 -57
  98. llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +308 -47
  99. llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +162 -96
  100. llama_stack/providers/inline/agents/meta_reference/responses/types.py +23 -8
  101. llama_stack/providers/inline/agents/meta_reference/responses/utils.py +201 -33
  102. llama_stack/providers/inline/agents/meta_reference/safety.py +8 -13
  103. llama_stack/providers/inline/batches/reference/__init__.py +2 -4
  104. llama_stack/providers/inline/batches/reference/batches.py +78 -60
  105. llama_stack/providers/inline/datasetio/localfs/datasetio.py +2 -5
  106. llama_stack/providers/inline/eval/meta_reference/eval.py +16 -61
  107. llama_stack/providers/inline/files/localfs/files.py +37 -28
  108. llama_stack/providers/inline/inference/meta_reference/config.py +2 -2
  109. llama_stack/providers/inline/inference/meta_reference/generators.py +50 -60
  110. llama_stack/providers/inline/inference/meta_reference/inference.py +403 -19
  111. llama_stack/providers/inline/inference/meta_reference/model_parallel.py +7 -26
  112. llama_stack/providers/inline/inference/meta_reference/parallel_utils.py +2 -12
  113. llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +10 -15
  114. llama_stack/providers/inline/post_training/common/validator.py +1 -5
  115. llama_stack/providers/inline/post_training/huggingface/post_training.py +8 -8
  116. llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +18 -10
  117. llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +12 -9
  118. llama_stack/providers/inline/post_training/huggingface/utils.py +27 -6
  119. llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +1 -1
  120. llama_stack/providers/inline/post_training/torchtune/common/utils.py +1 -1
  121. llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +1 -1
  122. llama_stack/providers/inline/post_training/torchtune/post_training.py +8 -8
  123. llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +16 -16
  124. llama_stack/providers/inline/safety/code_scanner/code_scanner.py +13 -9
  125. llama_stack/providers/inline/safety/llama_guard/llama_guard.py +18 -15
  126. llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +9 -9
  127. llama_stack/providers/inline/scoring/basic/scoring.py +6 -13
  128. llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +1 -2
  129. llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +1 -2
  130. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/docvqa.py +2 -2
  131. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/equality.py +2 -2
  132. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/ifeval.py +2 -2
  133. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_math_response.py +2 -2
  134. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py +2 -2
  135. llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/subset_of.py +2 -2
  136. llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +1 -2
  137. llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +1 -2
  138. llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +1 -2
  139. llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +1 -2
  140. llama_stack/providers/inline/scoring/braintrust/braintrust.py +12 -15
  141. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_correctness.py +2 -2
  142. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_relevancy.py +2 -2
  143. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_similarity.py +2 -2
  144. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_entity_recall.py +2 -2
  145. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_precision.py +2 -2
  146. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_recall.py +2 -2
  147. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_relevancy.py +2 -2
  148. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/factuality.py +2 -2
  149. llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/faithfulness.py +2 -2
  150. llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +7 -14
  151. llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +2 -2
  152. llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +1 -2
  153. llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +1 -3
  154. llama_stack/providers/inline/tool_runtime/rag/__init__.py +1 -1
  155. llama_stack/providers/inline/tool_runtime/rag/config.py +8 -1
  156. llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +7 -6
  157. llama_stack/providers/inline/tool_runtime/rag/memory.py +64 -48
  158. llama_stack/providers/inline/vector_io/chroma/__init__.py +1 -1
  159. llama_stack/providers/inline/vector_io/chroma/config.py +1 -1
  160. llama_stack/providers/inline/vector_io/faiss/__init__.py +1 -1
  161. llama_stack/providers/inline/vector_io/faiss/config.py +1 -1
  162. llama_stack/providers/inline/vector_io/faiss/faiss.py +43 -28
  163. llama_stack/providers/inline/vector_io/milvus/__init__.py +1 -1
  164. llama_stack/providers/inline/vector_io/milvus/config.py +1 -1
  165. llama_stack/providers/inline/vector_io/qdrant/__init__.py +1 -1
  166. llama_stack/providers/inline/vector_io/qdrant/config.py +1 -1
  167. llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +1 -1
  168. llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +40 -33
  169. llama_stack/providers/registry/agents.py +7 -3
  170. llama_stack/providers/registry/batches.py +1 -1
  171. llama_stack/providers/registry/datasetio.py +1 -1
  172. llama_stack/providers/registry/eval.py +1 -1
  173. llama_stack/{apis/datasets/__init__.py → providers/registry/file_processors.py} +5 -1
  174. llama_stack/providers/registry/files.py +11 -2
  175. llama_stack/providers/registry/inference.py +22 -3
  176. llama_stack/providers/registry/post_training.py +1 -1
  177. llama_stack/providers/registry/safety.py +1 -1
  178. llama_stack/providers/registry/scoring.py +1 -1
  179. llama_stack/providers/registry/tool_runtime.py +2 -2
  180. llama_stack/providers/registry/vector_io.py +7 -7
  181. llama_stack/providers/remote/datasetio/huggingface/huggingface.py +2 -5
  182. llama_stack/providers/remote/datasetio/nvidia/datasetio.py +1 -4
  183. llama_stack/providers/remote/eval/nvidia/eval.py +15 -9
  184. llama_stack/providers/remote/files/openai/__init__.py +19 -0
  185. llama_stack/providers/remote/files/openai/config.py +28 -0
  186. llama_stack/providers/remote/files/openai/files.py +253 -0
  187. llama_stack/providers/remote/files/s3/files.py +52 -30
  188. llama_stack/providers/remote/inference/anthropic/anthropic.py +2 -1
  189. llama_stack/providers/remote/inference/anthropic/config.py +1 -1
  190. llama_stack/providers/remote/inference/azure/azure.py +1 -3
  191. llama_stack/providers/remote/inference/azure/config.py +8 -7
  192. llama_stack/providers/remote/inference/bedrock/__init__.py +1 -1
  193. llama_stack/providers/remote/inference/bedrock/bedrock.py +82 -105
  194. llama_stack/providers/remote/inference/bedrock/config.py +24 -3
  195. llama_stack/providers/remote/inference/cerebras/cerebras.py +5 -5
  196. llama_stack/providers/remote/inference/cerebras/config.py +12 -5
  197. llama_stack/providers/remote/inference/databricks/config.py +13 -6
  198. llama_stack/providers/remote/inference/databricks/databricks.py +16 -6
  199. llama_stack/providers/remote/inference/fireworks/config.py +5 -5
  200. llama_stack/providers/remote/inference/fireworks/fireworks.py +1 -1
  201. llama_stack/providers/remote/inference/gemini/config.py +1 -1
  202. llama_stack/providers/remote/inference/gemini/gemini.py +13 -14
  203. llama_stack/providers/remote/inference/groq/config.py +5 -5
  204. llama_stack/providers/remote/inference/groq/groq.py +1 -1
  205. llama_stack/providers/remote/inference/llama_openai_compat/config.py +5 -5
  206. llama_stack/providers/remote/inference/llama_openai_compat/llama.py +8 -6
  207. llama_stack/providers/remote/inference/nvidia/__init__.py +1 -1
  208. llama_stack/providers/remote/inference/nvidia/config.py +21 -11
  209. llama_stack/providers/remote/inference/nvidia/nvidia.py +115 -3
  210. llama_stack/providers/remote/inference/nvidia/utils.py +1 -1
  211. llama_stack/providers/remote/inference/oci/__init__.py +17 -0
  212. llama_stack/providers/remote/inference/oci/auth.py +79 -0
  213. llama_stack/providers/remote/inference/oci/config.py +75 -0
  214. llama_stack/providers/remote/inference/oci/oci.py +162 -0
  215. llama_stack/providers/remote/inference/ollama/config.py +7 -5
  216. llama_stack/providers/remote/inference/ollama/ollama.py +17 -8
  217. llama_stack/providers/remote/inference/openai/config.py +4 -4
  218. llama_stack/providers/remote/inference/openai/openai.py +1 -1
  219. llama_stack/providers/remote/inference/passthrough/__init__.py +2 -2
  220. llama_stack/providers/remote/inference/passthrough/config.py +5 -10
  221. llama_stack/providers/remote/inference/passthrough/passthrough.py +97 -75
  222. llama_stack/providers/remote/inference/runpod/config.py +12 -5
  223. llama_stack/providers/remote/inference/runpod/runpod.py +2 -20
  224. llama_stack/providers/remote/inference/sambanova/config.py +5 -5
  225. llama_stack/providers/remote/inference/sambanova/sambanova.py +1 -1
  226. llama_stack/providers/remote/inference/tgi/config.py +7 -6
  227. llama_stack/providers/remote/inference/tgi/tgi.py +19 -11
  228. llama_stack/providers/remote/inference/together/config.py +5 -5
  229. llama_stack/providers/remote/inference/together/together.py +15 -12
  230. llama_stack/providers/remote/inference/vertexai/config.py +1 -1
  231. llama_stack/providers/remote/inference/vllm/config.py +5 -5
  232. llama_stack/providers/remote/inference/vllm/vllm.py +13 -14
  233. llama_stack/providers/remote/inference/watsonx/config.py +4 -4
  234. llama_stack/providers/remote/inference/watsonx/watsonx.py +21 -94
  235. llama_stack/providers/remote/post_training/nvidia/post_training.py +4 -4
  236. llama_stack/providers/remote/post_training/nvidia/utils.py +1 -1
  237. llama_stack/providers/remote/safety/bedrock/bedrock.py +6 -6
  238. llama_stack/providers/remote/safety/bedrock/config.py +1 -1
  239. llama_stack/providers/remote/safety/nvidia/config.py +1 -1
  240. llama_stack/providers/remote/safety/nvidia/nvidia.py +11 -5
  241. llama_stack/providers/remote/safety/sambanova/config.py +1 -1
  242. llama_stack/providers/remote/safety/sambanova/sambanova.py +6 -6
  243. llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +11 -6
  244. llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +12 -7
  245. llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py +8 -2
  246. llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +57 -15
  247. llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +11 -6
  248. llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +11 -6
  249. llama_stack/providers/remote/vector_io/chroma/__init__.py +1 -1
  250. llama_stack/providers/remote/vector_io/chroma/chroma.py +125 -20
  251. llama_stack/providers/remote/vector_io/chroma/config.py +1 -1
  252. llama_stack/providers/remote/vector_io/milvus/__init__.py +1 -1
  253. llama_stack/providers/remote/vector_io/milvus/config.py +1 -1
  254. llama_stack/providers/remote/vector_io/milvus/milvus.py +27 -21
  255. llama_stack/providers/remote/vector_io/pgvector/__init__.py +1 -1
  256. llama_stack/providers/remote/vector_io/pgvector/config.py +1 -1
  257. llama_stack/providers/remote/vector_io/pgvector/pgvector.py +26 -18
  258. llama_stack/providers/remote/vector_io/qdrant/__init__.py +1 -1
  259. llama_stack/providers/remote/vector_io/qdrant/config.py +1 -1
  260. llama_stack/providers/remote/vector_io/qdrant/qdrant.py +141 -24
  261. llama_stack/providers/remote/vector_io/weaviate/__init__.py +1 -1
  262. llama_stack/providers/remote/vector_io/weaviate/config.py +1 -1
  263. llama_stack/providers/remote/vector_io/weaviate/weaviate.py +26 -21
  264. llama_stack/providers/utils/common/data_schema_validator.py +1 -5
  265. llama_stack/providers/utils/files/form_data.py +1 -1
  266. llama_stack/providers/utils/inference/embedding_mixin.py +1 -1
  267. llama_stack/providers/utils/inference/inference_store.py +12 -21
  268. llama_stack/providers/utils/inference/litellm_openai_mixin.py +79 -79
  269. llama_stack/providers/utils/inference/model_registry.py +1 -3
  270. llama_stack/providers/utils/inference/openai_compat.py +44 -1171
  271. llama_stack/providers/utils/inference/openai_mixin.py +68 -42
  272. llama_stack/providers/utils/inference/prompt_adapter.py +50 -265
  273. llama_stack/providers/utils/inference/stream_utils.py +23 -0
  274. llama_stack/providers/utils/memory/__init__.py +2 -0
  275. llama_stack/providers/utils/memory/file_utils.py +1 -1
  276. llama_stack/providers/utils/memory/openai_vector_store_mixin.py +181 -84
  277. llama_stack/providers/utils/memory/vector_store.py +39 -38
  278. llama_stack/providers/utils/pagination.py +1 -1
  279. llama_stack/providers/utils/responses/responses_store.py +15 -25
  280. llama_stack/providers/utils/scoring/aggregation_utils.py +1 -2
  281. llama_stack/providers/utils/scoring/base_scoring_fn.py +1 -2
  282. llama_stack/providers/utils/tools/mcp.py +93 -11
  283. llama_stack/telemetry/constants.py +27 -0
  284. llama_stack/telemetry/helpers.py +43 -0
  285. llama_stack/testing/api_recorder.py +25 -16
  286. {llama_stack-0.3.4.dist-info → llama_stack-0.4.0.dist-info}/METADATA +56 -131
  287. llama_stack-0.4.0.dist-info/RECORD +588 -0
  288. llama_stack-0.4.0.dist-info/top_level.txt +2 -0
  289. llama_stack_api/__init__.py +945 -0
  290. llama_stack_api/admin/__init__.py +45 -0
  291. llama_stack_api/admin/api.py +72 -0
  292. llama_stack_api/admin/fastapi_routes.py +117 -0
  293. llama_stack_api/admin/models.py +113 -0
  294. llama_stack_api/agents.py +173 -0
  295. llama_stack_api/batches/__init__.py +40 -0
  296. llama_stack_api/batches/api.py +53 -0
  297. llama_stack_api/batches/fastapi_routes.py +113 -0
  298. llama_stack_api/batches/models.py +78 -0
  299. llama_stack_api/benchmarks/__init__.py +43 -0
  300. llama_stack_api/benchmarks/api.py +39 -0
  301. llama_stack_api/benchmarks/fastapi_routes.py +109 -0
  302. llama_stack_api/benchmarks/models.py +109 -0
  303. {llama_stack/apis → llama_stack_api}/common/content_types.py +1 -43
  304. {llama_stack/apis → llama_stack_api}/common/errors.py +0 -8
  305. {llama_stack/apis → llama_stack_api}/common/job_types.py +1 -1
  306. llama_stack_api/common/responses.py +77 -0
  307. {llama_stack/apis → llama_stack_api}/common/training_types.py +1 -1
  308. {llama_stack/apis → llama_stack_api}/common/type_system.py +2 -14
  309. llama_stack_api/connectors.py +146 -0
  310. {llama_stack/apis/conversations → llama_stack_api}/conversations.py +23 -39
  311. {llama_stack/apis/datasetio → llama_stack_api}/datasetio.py +4 -8
  312. llama_stack_api/datasets/__init__.py +61 -0
  313. llama_stack_api/datasets/api.py +35 -0
  314. llama_stack_api/datasets/fastapi_routes.py +104 -0
  315. llama_stack_api/datasets/models.py +152 -0
  316. {llama_stack/providers → llama_stack_api}/datatypes.py +166 -10
  317. {llama_stack/apis/eval → llama_stack_api}/eval.py +8 -40
  318. llama_stack_api/file_processors/__init__.py +27 -0
  319. llama_stack_api/file_processors/api.py +64 -0
  320. llama_stack_api/file_processors/fastapi_routes.py +78 -0
  321. llama_stack_api/file_processors/models.py +42 -0
  322. llama_stack_api/files/__init__.py +35 -0
  323. llama_stack_api/files/api.py +51 -0
  324. llama_stack_api/files/fastapi_routes.py +124 -0
  325. llama_stack_api/files/models.py +107 -0
  326. {llama_stack/apis/inference → llama_stack_api}/inference.py +90 -194
  327. llama_stack_api/inspect_api/__init__.py +37 -0
  328. llama_stack_api/inspect_api/api.py +25 -0
  329. llama_stack_api/inspect_api/fastapi_routes.py +76 -0
  330. llama_stack_api/inspect_api/models.py +28 -0
  331. {llama_stack/apis/agents → llama_stack_api/internal}/__init__.py +3 -1
  332. llama_stack/providers/utils/kvstore/api.py → llama_stack_api/internal/kvstore.py +5 -0
  333. llama_stack_api/internal/sqlstore.py +79 -0
  334. {llama_stack/apis/models → llama_stack_api}/models.py +11 -9
  335. {llama_stack/apis/agents → llama_stack_api}/openai_responses.py +184 -27
  336. {llama_stack/apis/post_training → llama_stack_api}/post_training.py +7 -11
  337. {llama_stack/apis/prompts → llama_stack_api}/prompts.py +3 -4
  338. llama_stack_api/providers/__init__.py +33 -0
  339. llama_stack_api/providers/api.py +16 -0
  340. llama_stack_api/providers/fastapi_routes.py +57 -0
  341. llama_stack_api/providers/models.py +24 -0
  342. {llama_stack/apis/tools → llama_stack_api}/rag_tool.py +2 -52
  343. {llama_stack/apis → llama_stack_api}/resource.py +1 -1
  344. llama_stack_api/router_utils.py +160 -0
  345. {llama_stack/apis/safety → llama_stack_api}/safety.py +6 -9
  346. {llama_stack → llama_stack_api}/schema_utils.py +94 -4
  347. {llama_stack/apis/scoring → llama_stack_api}/scoring.py +3 -3
  348. {llama_stack/apis/scoring_functions → llama_stack_api}/scoring_functions.py +9 -6
  349. {llama_stack/apis/shields → llama_stack_api}/shields.py +6 -7
  350. {llama_stack/apis/tools → llama_stack_api}/tools.py +26 -21
  351. {llama_stack/apis/vector_io → llama_stack_api}/vector_io.py +133 -152
  352. {llama_stack/apis/vector_stores → llama_stack_api}/vector_stores.py +1 -1
  353. llama_stack/apis/agents/agents.py +0 -894
  354. llama_stack/apis/batches/__init__.py +0 -9
  355. llama_stack/apis/batches/batches.py +0 -100
  356. llama_stack/apis/benchmarks/__init__.py +0 -7
  357. llama_stack/apis/benchmarks/benchmarks.py +0 -108
  358. llama_stack/apis/common/responses.py +0 -36
  359. llama_stack/apis/conversations/__init__.py +0 -31
  360. llama_stack/apis/datasets/datasets.py +0 -251
  361. llama_stack/apis/datatypes.py +0 -160
  362. llama_stack/apis/eval/__init__.py +0 -7
  363. llama_stack/apis/files/__init__.py +0 -7
  364. llama_stack/apis/files/files.py +0 -199
  365. llama_stack/apis/inference/__init__.py +0 -7
  366. llama_stack/apis/inference/event_logger.py +0 -43
  367. llama_stack/apis/inspect/__init__.py +0 -7
  368. llama_stack/apis/inspect/inspect.py +0 -94
  369. llama_stack/apis/models/__init__.py +0 -7
  370. llama_stack/apis/post_training/__init__.py +0 -7
  371. llama_stack/apis/prompts/__init__.py +0 -9
  372. llama_stack/apis/providers/__init__.py +0 -7
  373. llama_stack/apis/providers/providers.py +0 -69
  374. llama_stack/apis/safety/__init__.py +0 -7
  375. llama_stack/apis/scoring/__init__.py +0 -7
  376. llama_stack/apis/scoring_functions/__init__.py +0 -7
  377. llama_stack/apis/shields/__init__.py +0 -7
  378. llama_stack/apis/synthetic_data_generation/__init__.py +0 -7
  379. llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +0 -77
  380. llama_stack/apis/telemetry/__init__.py +0 -7
  381. llama_stack/apis/telemetry/telemetry.py +0 -423
  382. llama_stack/apis/tools/__init__.py +0 -8
  383. llama_stack/apis/vector_io/__init__.py +0 -7
  384. llama_stack/apis/vector_stores/__init__.py +0 -7
  385. llama_stack/core/server/tracing.py +0 -80
  386. llama_stack/core/ui/app.py +0 -55
  387. llama_stack/core/ui/modules/__init__.py +0 -5
  388. llama_stack/core/ui/modules/api.py +0 -32
  389. llama_stack/core/ui/modules/utils.py +0 -42
  390. llama_stack/core/ui/page/__init__.py +0 -5
  391. llama_stack/core/ui/page/distribution/__init__.py +0 -5
  392. llama_stack/core/ui/page/distribution/datasets.py +0 -18
  393. llama_stack/core/ui/page/distribution/eval_tasks.py +0 -20
  394. llama_stack/core/ui/page/distribution/models.py +0 -18
  395. llama_stack/core/ui/page/distribution/providers.py +0 -27
  396. llama_stack/core/ui/page/distribution/resources.py +0 -48
  397. llama_stack/core/ui/page/distribution/scoring_functions.py +0 -18
  398. llama_stack/core/ui/page/distribution/shields.py +0 -19
  399. llama_stack/core/ui/page/evaluations/__init__.py +0 -5
  400. llama_stack/core/ui/page/evaluations/app_eval.py +0 -143
  401. llama_stack/core/ui/page/evaluations/native_eval.py +0 -253
  402. llama_stack/core/ui/page/playground/__init__.py +0 -5
  403. llama_stack/core/ui/page/playground/chat.py +0 -130
  404. llama_stack/core/ui/page/playground/tools.py +0 -352
  405. llama_stack/distributions/dell/build.yaml +0 -33
  406. llama_stack/distributions/meta-reference-gpu/build.yaml +0 -32
  407. llama_stack/distributions/nvidia/build.yaml +0 -29
  408. llama_stack/distributions/open-benchmark/build.yaml +0 -36
  409. llama_stack/distributions/postgres-demo/__init__.py +0 -7
  410. llama_stack/distributions/postgres-demo/build.yaml +0 -23
  411. llama_stack/distributions/postgres-demo/postgres_demo.py +0 -125
  412. llama_stack/distributions/starter/build.yaml +0 -61
  413. llama_stack/distributions/starter-gpu/build.yaml +0 -61
  414. llama_stack/distributions/watsonx/build.yaml +0 -33
  415. llama_stack/providers/inline/agents/meta_reference/agent_instance.py +0 -1024
  416. llama_stack/providers/inline/agents/meta_reference/persistence.py +0 -228
  417. llama_stack/providers/inline/telemetry/__init__.py +0 -5
  418. llama_stack/providers/inline/telemetry/meta_reference/__init__.py +0 -21
  419. llama_stack/providers/inline/telemetry/meta_reference/config.py +0 -47
  420. llama_stack/providers/inline/telemetry/meta_reference/telemetry.py +0 -252
  421. llama_stack/providers/remote/inference/bedrock/models.py +0 -29
  422. llama_stack/providers/utils/kvstore/sqlite/config.py +0 -20
  423. llama_stack/providers/utils/sqlstore/__init__.py +0 -5
  424. llama_stack/providers/utils/sqlstore/api.py +0 -128
  425. llama_stack/providers/utils/telemetry/__init__.py +0 -5
  426. llama_stack/providers/utils/telemetry/trace_protocol.py +0 -142
  427. llama_stack/providers/utils/telemetry/tracing.py +0 -384
  428. llama_stack/strong_typing/__init__.py +0 -19
  429. llama_stack/strong_typing/auxiliary.py +0 -228
  430. llama_stack/strong_typing/classdef.py +0 -440
  431. llama_stack/strong_typing/core.py +0 -46
  432. llama_stack/strong_typing/deserializer.py +0 -877
  433. llama_stack/strong_typing/docstring.py +0 -409
  434. llama_stack/strong_typing/exception.py +0 -23
  435. llama_stack/strong_typing/inspection.py +0 -1085
  436. llama_stack/strong_typing/mapping.py +0 -40
  437. llama_stack/strong_typing/name.py +0 -182
  438. llama_stack/strong_typing/schema.py +0 -792
  439. llama_stack/strong_typing/serialization.py +0 -97
  440. llama_stack/strong_typing/serializer.py +0 -500
  441. llama_stack/strong_typing/slots.py +0 -27
  442. llama_stack/strong_typing/topological.py +0 -89
  443. llama_stack/ui/node_modules/flatted/python/flatted.py +0 -149
  444. llama_stack-0.3.4.dist-info/RECORD +0 -625
  445. llama_stack-0.3.4.dist-info/top_level.txt +0 -1
  446. /llama_stack/{providers/utils → core/storage}/kvstore/config.py +0 -0
  447. /llama_stack/{providers/utils → core/storage}/kvstore/mongodb/__init__.py +0 -0
  448. /llama_stack/{providers/utils → core/storage}/kvstore/postgres/__init__.py +0 -0
  449. /llama_stack/{providers/utils → core/storage}/kvstore/redis/__init__.py +0 -0
  450. /llama_stack/{providers/utils → core/storage}/kvstore/sqlite/__init__.py +0 -0
  451. /llama_stack/{apis → providers/inline/file_processor}/__init__.py +0 -0
  452. /llama_stack/{apis/common → telemetry}/__init__.py +0 -0
  453. {llama_stack-0.3.4.dist-info → llama_stack-0.4.0.dist-info}/WHEEL +0 -0
  454. {llama_stack-0.3.4.dist-info → llama_stack-0.4.0.dist-info}/entry_points.txt +0 -0
  455. {llama_stack-0.3.4.dist-info → llama_stack-0.4.0.dist-info}/licenses/LICENSE +0 -0
  456. {llama_stack/core/ui → llama_stack_api/common}/__init__.py +0 -0
  457. {llama_stack/strong_typing → llama_stack_api}/py.typed +0 -0
  458. {llama_stack/apis → llama_stack_api}/version.py +0 -0
@@ -1,894 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the terms described in the LICENSE file in
5
- # the root directory of this source tree.
6
-
7
- from collections.abc import AsyncIterator
8
- from datetime import datetime
9
- from enum import StrEnum
10
- from typing import Annotated, Any, Literal, Protocol, runtime_checkable
11
-
12
- from pydantic import BaseModel, ConfigDict, Field
13
-
14
- from llama_stack.apis.common.content_types import URL, ContentDelta, InterleavedContent
15
- from llama_stack.apis.common.responses import Order, PaginatedResponse
16
- from llama_stack.apis.inference import (
17
- CompletionMessage,
18
- ResponseFormat,
19
- SamplingParams,
20
- ToolCall,
21
- ToolChoice,
22
- ToolConfig,
23
- ToolPromptFormat,
24
- ToolResponse,
25
- ToolResponseMessage,
26
- UserMessage,
27
- )
28
- from llama_stack.apis.safety import SafetyViolation
29
- from llama_stack.apis.tools import ToolDef
30
- from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA
31
- from llama_stack.schema_utils import ExtraBodyField, json_schema_type, register_schema, webmethod
32
-
33
- from .openai_responses import (
34
- ListOpenAIResponseInputItem,
35
- ListOpenAIResponseObject,
36
- OpenAIDeleteResponseObject,
37
- OpenAIResponseInput,
38
- OpenAIResponseInputTool,
39
- OpenAIResponseObject,
40
- OpenAIResponseObjectStream,
41
- OpenAIResponseText,
42
- )
43
-
44
-
45
- @json_schema_type
46
- class ResponseGuardrailSpec(BaseModel):
47
- """Specification for a guardrail to apply during response generation.
48
-
49
- :param type: The type/identifier of the guardrail.
50
- """
51
-
52
- type: str
53
- # TODO: more fields to be added for guardrail configuration
54
-
55
-
56
- ResponseGuardrail = str | ResponseGuardrailSpec
57
-
58
-
59
- class Attachment(BaseModel):
60
- """An attachment to an agent turn.
61
-
62
- :param content: The content of the attachment.
63
- :param mime_type: The MIME type of the attachment.
64
- """
65
-
66
- content: InterleavedContent | URL
67
- mime_type: str
68
-
69
-
70
- class Document(BaseModel):
71
- """A document to be used by an agent.
72
-
73
- :param content: The content of the document.
74
- :param mime_type: The MIME type of the document.
75
- """
76
-
77
- content: InterleavedContent | URL
78
- mime_type: str
79
-
80
-
81
- class StepCommon(BaseModel):
82
- """A common step in an agent turn.
83
-
84
- :param turn_id: The ID of the turn.
85
- :param step_id: The ID of the step.
86
- :param started_at: The time the step started.
87
- :param completed_at: The time the step completed.
88
- """
89
-
90
- turn_id: str
91
- step_id: str
92
- started_at: datetime | None = None
93
- completed_at: datetime | None = None
94
-
95
-
96
- class StepType(StrEnum):
97
- """Type of the step in an agent turn.
98
-
99
- :cvar inference: The step is an inference step that calls an LLM.
100
- :cvar tool_execution: The step is a tool execution step that executes a tool call.
101
- :cvar shield_call: The step is a shield call step that checks for safety violations.
102
- :cvar memory_retrieval: The step is a memory retrieval step that retrieves context for vector dbs.
103
- """
104
-
105
- inference = "inference"
106
- tool_execution = "tool_execution"
107
- shield_call = "shield_call"
108
- memory_retrieval = "memory_retrieval"
109
-
110
-
111
- @json_schema_type
112
- class InferenceStep(StepCommon):
113
- """An inference step in an agent turn.
114
-
115
- :param model_response: The response from the LLM.
116
- """
117
-
118
- model_config = ConfigDict(protected_namespaces=())
119
-
120
- step_type: Literal[StepType.inference] = StepType.inference
121
- model_response: CompletionMessage
122
-
123
-
124
- @json_schema_type
125
- class ToolExecutionStep(StepCommon):
126
- """A tool execution step in an agent turn.
127
-
128
- :param tool_calls: The tool calls to execute.
129
- :param tool_responses: The tool responses from the tool calls.
130
- """
131
-
132
- step_type: Literal[StepType.tool_execution] = StepType.tool_execution
133
- tool_calls: list[ToolCall]
134
- tool_responses: list[ToolResponse]
135
-
136
-
137
- @json_schema_type
138
- class ShieldCallStep(StepCommon):
139
- """A shield call step in an agent turn.
140
-
141
- :param violation: The violation from the shield call.
142
- """
143
-
144
- step_type: Literal[StepType.shield_call] = StepType.shield_call
145
- violation: SafetyViolation | None
146
-
147
-
148
- @json_schema_type
149
- class MemoryRetrievalStep(StepCommon):
150
- """A memory retrieval step in an agent turn.
151
-
152
- :param vector_db_ids: The IDs of the vector databases to retrieve context from.
153
- :param inserted_context: The context retrieved from the vector databases.
154
- """
155
-
156
- step_type: Literal[StepType.memory_retrieval] = StepType.memory_retrieval
157
- # TODO: should this be List[str]?
158
- vector_db_ids: str
159
- inserted_context: InterleavedContent
160
-
161
-
162
- Step = Annotated[
163
- InferenceStep | ToolExecutionStep | ShieldCallStep | MemoryRetrievalStep,
164
- Field(discriminator="step_type"),
165
- ]
166
-
167
-
168
- @json_schema_type
169
- class Turn(BaseModel):
170
- """A single turn in an interaction with an Agentic System.
171
-
172
- :param turn_id: Unique identifier for the turn within a session
173
- :param session_id: Unique identifier for the conversation session
174
- :param input_messages: List of messages that initiated this turn
175
- :param steps: Ordered list of processing steps executed during this turn
176
- :param output_message: The model's generated response containing content and metadata
177
- :param output_attachments: (Optional) Files or media attached to the agent's response
178
- :param started_at: Timestamp when the turn began
179
- :param completed_at: (Optional) Timestamp when the turn finished, if completed
180
- """
181
-
182
- turn_id: str
183
- session_id: str
184
- input_messages: list[UserMessage | ToolResponseMessage]
185
- steps: list[Step]
186
- output_message: CompletionMessage
187
- output_attachments: list[Attachment] | None = Field(default_factory=lambda: [])
188
-
189
- started_at: datetime
190
- completed_at: datetime | None = None
191
-
192
-
193
- @json_schema_type
194
- class Session(BaseModel):
195
- """A single session of an interaction with an Agentic System.
196
-
197
- :param session_id: Unique identifier for the conversation session
198
- :param session_name: Human-readable name for the session
199
- :param turns: List of all turns that have occurred in this session
200
- :param started_at: Timestamp when the session was created
201
- """
202
-
203
- session_id: str
204
- session_name: str
205
- turns: list[Turn]
206
- started_at: datetime
207
-
208
-
209
- class AgentToolGroupWithArgs(BaseModel):
210
- name: str
211
- args: dict[str, Any]
212
-
213
-
214
- AgentToolGroup = str | AgentToolGroupWithArgs
215
- register_schema(AgentToolGroup, name="AgentTool")
216
-
217
-
218
- class AgentConfigCommon(BaseModel):
219
- sampling_params: SamplingParams | None = Field(default_factory=SamplingParams)
220
-
221
- input_shields: list[str] | None = Field(default_factory=lambda: [])
222
- output_shields: list[str] | None = Field(default_factory=lambda: [])
223
- toolgroups: list[AgentToolGroup] | None = Field(default_factory=lambda: [])
224
- client_tools: list[ToolDef] | None = Field(default_factory=lambda: [])
225
- tool_choice: ToolChoice | None = Field(default=None, deprecated="use tool_config instead")
226
- tool_prompt_format: ToolPromptFormat | None = Field(default=None, deprecated="use tool_config instead")
227
- tool_config: ToolConfig | None = Field(default=None)
228
-
229
- max_infer_iters: int | None = 10
230
-
231
- def model_post_init(self, __context):
232
- if self.tool_config:
233
- if self.tool_choice and self.tool_config.tool_choice != self.tool_choice:
234
- raise ValueError("tool_choice is deprecated. Use tool_choice in tool_config instead.")
235
- if self.tool_prompt_format and self.tool_config.tool_prompt_format != self.tool_prompt_format:
236
- raise ValueError("tool_prompt_format is deprecated. Use tool_prompt_format in tool_config instead.")
237
- else:
238
- params = {}
239
- if self.tool_choice:
240
- params["tool_choice"] = self.tool_choice
241
- if self.tool_prompt_format:
242
- params["tool_prompt_format"] = self.tool_prompt_format
243
- self.tool_config = ToolConfig(**params)
244
-
245
-
246
- @json_schema_type
247
- class AgentConfig(AgentConfigCommon):
248
- """Configuration for an agent.
249
-
250
- :param model: The model identifier to use for the agent
251
- :param instructions: The system instructions for the agent
252
- :param name: Optional name for the agent, used in telemetry and identification
253
- :param enable_session_persistence: Optional flag indicating whether session data has to be persisted
254
- :param response_format: Optional response format configuration
255
- """
256
-
257
- model: str
258
- instructions: str
259
- name: str | None = None
260
- enable_session_persistence: bool | None = False
261
- response_format: ResponseFormat | None = None
262
-
263
-
264
- @json_schema_type
265
- class Agent(BaseModel):
266
- """An agent instance with configuration and metadata.
267
-
268
- :param agent_id: Unique identifier for the agent
269
- :param agent_config: Configuration settings for the agent
270
- :param created_at: Timestamp when the agent was created
271
- """
272
-
273
- agent_id: str
274
- agent_config: AgentConfig
275
- created_at: datetime
276
-
277
-
278
- class AgentConfigOverridablePerTurn(AgentConfigCommon):
279
- instructions: str | None = None
280
-
281
-
282
- class AgentTurnResponseEventType(StrEnum):
283
- step_start = "step_start"
284
- step_complete = "step_complete"
285
- step_progress = "step_progress"
286
-
287
- turn_start = "turn_start"
288
- turn_complete = "turn_complete"
289
- turn_awaiting_input = "turn_awaiting_input"
290
-
291
-
292
- @json_schema_type
293
- class AgentTurnResponseStepStartPayload(BaseModel):
294
- """Payload for step start events in agent turn responses.
295
-
296
- :param event_type: Type of event being reported
297
- :param step_type: Type of step being executed
298
- :param step_id: Unique identifier for the step within a turn
299
- :param metadata: (Optional) Additional metadata for the step
300
- """
301
-
302
- event_type: Literal[AgentTurnResponseEventType.step_start] = AgentTurnResponseEventType.step_start
303
- step_type: StepType
304
- step_id: str
305
- metadata: dict[str, Any] | None = Field(default_factory=lambda: {})
306
-
307
-
308
- @json_schema_type
309
- class AgentTurnResponseStepCompletePayload(BaseModel):
310
- """Payload for step completion events in agent turn responses.
311
-
312
- :param event_type: Type of event being reported
313
- :param step_type: Type of step being executed
314
- :param step_id: Unique identifier for the step within a turn
315
- :param step_details: Complete details of the executed step
316
- """
317
-
318
- event_type: Literal[AgentTurnResponseEventType.step_complete] = AgentTurnResponseEventType.step_complete
319
- step_type: StepType
320
- step_id: str
321
- step_details: Step
322
-
323
-
324
- @json_schema_type
325
- class AgentTurnResponseStepProgressPayload(BaseModel):
326
- """Payload for step progress events in agent turn responses.
327
-
328
- :param event_type: Type of event being reported
329
- :param step_type: Type of step being executed
330
- :param step_id: Unique identifier for the step within a turn
331
- :param delta: Incremental content changes during step execution
332
- """
333
-
334
- model_config = ConfigDict(protected_namespaces=())
335
-
336
- event_type: Literal[AgentTurnResponseEventType.step_progress] = AgentTurnResponseEventType.step_progress
337
- step_type: StepType
338
- step_id: str
339
-
340
- delta: ContentDelta
341
-
342
-
343
- @json_schema_type
344
- class AgentTurnResponseTurnStartPayload(BaseModel):
345
- """Payload for turn start events in agent turn responses.
346
-
347
- :param event_type: Type of event being reported
348
- :param turn_id: Unique identifier for the turn within a session
349
- """
350
-
351
- event_type: Literal[AgentTurnResponseEventType.turn_start] = AgentTurnResponseEventType.turn_start
352
- turn_id: str
353
-
354
-
355
- @json_schema_type
356
- class AgentTurnResponseTurnCompletePayload(BaseModel):
357
- """Payload for turn completion events in agent turn responses.
358
-
359
- :param event_type: Type of event being reported
360
- :param turn: Complete turn data including all steps and results
361
- """
362
-
363
- event_type: Literal[AgentTurnResponseEventType.turn_complete] = AgentTurnResponseEventType.turn_complete
364
- turn: Turn
365
-
366
-
367
- @json_schema_type
368
- class AgentTurnResponseTurnAwaitingInputPayload(BaseModel):
369
- """Payload for turn awaiting input events in agent turn responses.
370
-
371
- :param event_type: Type of event being reported
372
- :param turn: Turn data when waiting for external tool responses
373
- """
374
-
375
- event_type: Literal[AgentTurnResponseEventType.turn_awaiting_input] = AgentTurnResponseEventType.turn_awaiting_input
376
- turn: Turn
377
-
378
-
379
- AgentTurnResponseEventPayload = Annotated[
380
- AgentTurnResponseStepStartPayload
381
- | AgentTurnResponseStepProgressPayload
382
- | AgentTurnResponseStepCompletePayload
383
- | AgentTurnResponseTurnStartPayload
384
- | AgentTurnResponseTurnCompletePayload
385
- | AgentTurnResponseTurnAwaitingInputPayload,
386
- Field(discriminator="event_type"),
387
- ]
388
- register_schema(AgentTurnResponseEventPayload, name="AgentTurnResponseEventPayload")
389
-
390
-
391
- @json_schema_type
392
- class AgentTurnResponseEvent(BaseModel):
393
- """An event in an agent turn response stream.
394
-
395
- :param payload: Event-specific payload containing event data
396
- """
397
-
398
- payload: AgentTurnResponseEventPayload
399
-
400
-
401
- @json_schema_type
402
- class AgentCreateResponse(BaseModel):
403
- """Response returned when creating a new agent.
404
-
405
- :param agent_id: Unique identifier for the created agent
406
- """
407
-
408
- agent_id: str
409
-
410
-
411
- @json_schema_type
412
- class AgentSessionCreateResponse(BaseModel):
413
- """Response returned when creating a new agent session.
414
-
415
- :param session_id: Unique identifier for the created session
416
- """
417
-
418
- session_id: str
419
-
420
-
421
- @json_schema_type
422
- class AgentTurnCreateRequest(AgentConfigOverridablePerTurn):
423
- """Request to create a new turn for an agent.
424
-
425
- :param agent_id: Unique identifier for the agent
426
- :param session_id: Unique identifier for the conversation session
427
- :param messages: List of messages to start the turn with
428
- :param documents: (Optional) List of documents to provide to the agent
429
- :param toolgroups: (Optional) List of tool groups to make available for this turn
430
- :param stream: (Optional) Whether to stream the response
431
- :param tool_config: (Optional) Tool configuration to override agent defaults
432
- """
433
-
434
- agent_id: str
435
- session_id: str
436
-
437
- # TODO: figure out how we can simplify this and make why
438
- # ToolResponseMessage needs to be here (it is function call
439
- # execution from outside the system)
440
- messages: list[UserMessage | ToolResponseMessage]
441
-
442
- documents: list[Document] | None = None
443
- toolgroups: list[AgentToolGroup] | None = Field(default_factory=lambda: [])
444
-
445
- stream: bool | None = False
446
- tool_config: ToolConfig | None = None
447
-
448
-
449
- @json_schema_type
450
- class AgentTurnResumeRequest(BaseModel):
451
- """Request to resume an agent turn with tool responses.
452
-
453
- :param agent_id: Unique identifier for the agent
454
- :param session_id: Unique identifier for the conversation session
455
- :param turn_id: Unique identifier for the turn within a session
456
- :param tool_responses: List of tool responses to submit to continue the turn
457
- :param stream: (Optional) Whether to stream the response
458
- """
459
-
460
- agent_id: str
461
- session_id: str
462
- turn_id: str
463
- tool_responses: list[ToolResponse]
464
- stream: bool | None = False
465
-
466
-
467
- @json_schema_type
468
- class AgentTurnResponseStreamChunk(BaseModel):
469
- """Streamed agent turn completion response.
470
-
471
- :param event: Individual event in the agent turn response stream
472
- """
473
-
474
- event: AgentTurnResponseEvent
475
-
476
-
477
- @json_schema_type
478
- class AgentStepResponse(BaseModel):
479
- """Response containing details of a specific agent step.
480
-
481
- :param step: The complete step data and execution details
482
- """
483
-
484
- step: Step
485
-
486
-
487
- @runtime_checkable
488
- class Agents(Protocol):
489
- """Agents
490
-
491
- APIs for creating and interacting with agentic systems."""
492
-
493
- @webmethod(
494
- route="/agents",
495
- method="POST",
496
- descriptive_name="create_agent",
497
- deprecated=True,
498
- level=LLAMA_STACK_API_V1,
499
- )
500
- @webmethod(
501
- route="/agents",
502
- method="POST",
503
- descriptive_name="create_agent",
504
- level=LLAMA_STACK_API_V1ALPHA,
505
- )
506
- async def create_agent(
507
- self,
508
- agent_config: AgentConfig,
509
- ) -> AgentCreateResponse:
510
- """Create an agent with the given configuration.
511
-
512
- :param agent_config: The configuration for the agent.
513
- :returns: An AgentCreateResponse with the agent ID.
514
- """
515
- ...
516
-
517
- @webmethod(
518
- route="/agents/{agent_id}/session/{session_id}/turn",
519
- method="POST",
520
- descriptive_name="create_agent_turn",
521
- deprecated=True,
522
- level=LLAMA_STACK_API_V1,
523
- )
524
- @webmethod(
525
- route="/agents/{agent_id}/session/{session_id}/turn",
526
- method="POST",
527
- descriptive_name="create_agent_turn",
528
- level=LLAMA_STACK_API_V1ALPHA,
529
- )
530
- async def create_agent_turn(
531
- self,
532
- agent_id: str,
533
- session_id: str,
534
- messages: list[UserMessage | ToolResponseMessage],
535
- stream: bool | None = False,
536
- documents: list[Document] | None = None,
537
- toolgroups: list[AgentToolGroup] | None = None,
538
- tool_config: ToolConfig | None = None,
539
- ) -> Turn | AsyncIterator[AgentTurnResponseStreamChunk]:
540
- """Create a new turn for an agent.
541
-
542
- :param agent_id: The ID of the agent to create the turn for.
543
- :param session_id: The ID of the session to create the turn for.
544
- :param messages: List of messages to start the turn with.
545
- :param stream: (Optional) If True, generate an SSE event stream of the response. Defaults to False.
546
- :param documents: (Optional) List of documents to create the turn with.
547
- :param toolgroups: (Optional) List of toolgroups to create the turn with, will be used in addition to the agent's config toolgroups for the request.
548
- :param tool_config: (Optional) The tool configuration to create the turn with, will be used to override the agent's tool_config.
549
- :returns: If stream=False, returns a Turn object.
550
- If stream=True, returns an SSE event stream of AgentTurnResponseStreamChunk.
551
- """
552
- ...
553
-
554
- @webmethod(
555
- route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume",
556
- method="POST",
557
- descriptive_name="resume_agent_turn",
558
- deprecated=True,
559
- level=LLAMA_STACK_API_V1,
560
- )
561
- @webmethod(
562
- route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume",
563
- method="POST",
564
- descriptive_name="resume_agent_turn",
565
- level=LLAMA_STACK_API_V1ALPHA,
566
- )
567
- async def resume_agent_turn(
568
- self,
569
- agent_id: str,
570
- session_id: str,
571
- turn_id: str,
572
- tool_responses: list[ToolResponse],
573
- stream: bool | None = False,
574
- ) -> Turn | AsyncIterator[AgentTurnResponseStreamChunk]:
575
- """Resume an agent turn with executed tool call responses.
576
-
577
- When a Turn has the status `awaiting_input` due to pending input from client side tool calls, this endpoint can be used to submit the outputs from the tool calls once they are ready.
578
-
579
- :param agent_id: The ID of the agent to resume.
580
- :param session_id: The ID of the session to resume.
581
- :param turn_id: The ID of the turn to resume.
582
- :param tool_responses: The tool call responses to resume the turn with.
583
- :param stream: Whether to stream the response.
584
- :returns: A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk objects.
585
- """
586
- ...
587
-
588
- @webmethod(
589
- route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}",
590
- method="GET",
591
- deprecated=True,
592
- level=LLAMA_STACK_API_V1,
593
- )
594
- @webmethod(
595
- route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}",
596
- method="GET",
597
- level=LLAMA_STACK_API_V1ALPHA,
598
- )
599
- async def get_agents_turn(
600
- self,
601
- agent_id: str,
602
- session_id: str,
603
- turn_id: str,
604
- ) -> Turn:
605
- """Retrieve an agent turn by its ID.
606
-
607
- :param agent_id: The ID of the agent to get the turn for.
608
- :param session_id: The ID of the session to get the turn for.
609
- :param turn_id: The ID of the turn to get.
610
- :returns: A Turn.
611
- """
612
- ...
613
-
614
- @webmethod(
615
- route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}",
616
- method="GET",
617
- deprecated=True,
618
- level=LLAMA_STACK_API_V1,
619
- )
620
- @webmethod(
621
- route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}",
622
- method="GET",
623
- level=LLAMA_STACK_API_V1ALPHA,
624
- )
625
- async def get_agents_step(
626
- self,
627
- agent_id: str,
628
- session_id: str,
629
- turn_id: str,
630
- step_id: str,
631
- ) -> AgentStepResponse:
632
- """Retrieve an agent step by its ID.
633
-
634
- :param agent_id: The ID of the agent to get the step for.
635
- :param session_id: The ID of the session to get the step for.
636
- :param turn_id: The ID of the turn to get the step for.
637
- :param step_id: The ID of the step to get.
638
- :returns: An AgentStepResponse.
639
- """
640
- ...
641
-
642
- @webmethod(
643
- route="/agents/{agent_id}/session",
644
- method="POST",
645
- descriptive_name="create_agent_session",
646
- deprecated=True,
647
- level=LLAMA_STACK_API_V1,
648
- )
649
- @webmethod(
650
- route="/agents/{agent_id}/session",
651
- method="POST",
652
- descriptive_name="create_agent_session",
653
- level=LLAMA_STACK_API_V1ALPHA,
654
- )
655
- async def create_agent_session(
656
- self,
657
- agent_id: str,
658
- session_name: str,
659
- ) -> AgentSessionCreateResponse:
660
- """Create a new session for an agent.
661
-
662
- :param agent_id: The ID of the agent to create the session for.
663
- :param session_name: The name of the session to create.
664
- :returns: An AgentSessionCreateResponse.
665
- """
666
- ...
667
-
668
- @webmethod(
669
- route="/agents/{agent_id}/session/{session_id}",
670
- method="GET",
671
- deprecated=True,
672
- level=LLAMA_STACK_API_V1,
673
- )
674
- @webmethod(
675
- route="/agents/{agent_id}/session/{session_id}",
676
- method="GET",
677
- level=LLAMA_STACK_API_V1ALPHA,
678
- )
679
- async def get_agents_session(
680
- self,
681
- session_id: str,
682
- agent_id: str,
683
- turn_ids: list[str] | None = None,
684
- ) -> Session:
685
- """Retrieve an agent session by its ID.
686
-
687
- :param session_id: The ID of the session to get.
688
- :param agent_id: The ID of the agent to get the session for.
689
- :param turn_ids: (Optional) List of turn IDs to filter the session by.
690
- :returns: A Session.
691
- """
692
- ...
693
-
694
- @webmethod(
695
- route="/agents/{agent_id}/session/{session_id}",
696
- method="DELETE",
697
- deprecated=True,
698
- level=LLAMA_STACK_API_V1,
699
- )
700
- @webmethod(
701
- route="/agents/{agent_id}/session/{session_id}",
702
- method="DELETE",
703
- level=LLAMA_STACK_API_V1ALPHA,
704
- )
705
- async def delete_agents_session(
706
- self,
707
- session_id: str,
708
- agent_id: str,
709
- ) -> None:
710
- """Delete an agent session by its ID and its associated turns.
711
-
712
- :param session_id: The ID of the session to delete.
713
- :param agent_id: The ID of the agent to delete the session for.
714
- """
715
- ...
716
-
717
- @webmethod(
718
- route="/agents/{agent_id}",
719
- method="DELETE",
720
- deprecated=True,
721
- level=LLAMA_STACK_API_V1,
722
- )
723
- @webmethod(route="/agents/{agent_id}", method="DELETE", level=LLAMA_STACK_API_V1ALPHA)
724
- async def delete_agent(
725
- self,
726
- agent_id: str,
727
- ) -> None:
728
- """Delete an agent by its ID and its associated sessions and turns.
729
-
730
- :param agent_id: The ID of the agent to delete.
731
- """
732
- ...
733
-
734
- @webmethod(route="/agents", method="GET", deprecated=True, level=LLAMA_STACK_API_V1)
735
- @webmethod(route="/agents", method="GET", level=LLAMA_STACK_API_V1ALPHA)
736
- async def list_agents(self, start_index: int | None = None, limit: int | None = None) -> PaginatedResponse:
737
- """List all agents.
738
-
739
- :param start_index: The index to start the pagination from.
740
- :param limit: The number of agents to return.
741
- :returns: A PaginatedResponse.
742
- """
743
- ...
744
-
745
- @webmethod(
746
- route="/agents/{agent_id}",
747
- method="GET",
748
- deprecated=True,
749
- level=LLAMA_STACK_API_V1,
750
- )
751
- @webmethod(route="/agents/{agent_id}", method="GET", level=LLAMA_STACK_API_V1ALPHA)
752
- async def get_agent(self, agent_id: str) -> Agent:
753
- """Describe an agent by its ID.
754
-
755
- :param agent_id: ID of the agent.
756
- :returns: An Agent of the agent.
757
- """
758
- ...
759
-
760
- @webmethod(
761
- route="/agents/{agent_id}/sessions",
762
- method="GET",
763
- deprecated=True,
764
- level=LLAMA_STACK_API_V1,
765
- )
766
- @webmethod(route="/agents/{agent_id}/sessions", method="GET", level=LLAMA_STACK_API_V1ALPHA)
767
- async def list_agent_sessions(
768
- self,
769
- agent_id: str,
770
- start_index: int | None = None,
771
- limit: int | None = None,
772
- ) -> PaginatedResponse:
773
- """List all session(s) of a given agent.
774
-
775
- :param agent_id: The ID of the agent to list sessions for.
776
- :param start_index: The index to start the pagination from.
777
- :param limit: The number of sessions to return.
778
- :returns: A PaginatedResponse.
779
- """
780
- ...
781
-
782
- # We situate the OpenAI Responses API in the Agents API just like we did things
783
- # for Inference. The Responses API, in its intent, serves the same purpose as
784
- # the Agents API above -- it is essentially a lightweight "agentic loop" with
785
- # integrated tool calling.
786
- #
787
- # Both of these APIs are inherently stateful.
788
-
789
- @webmethod(
790
- route="/openai/v1/responses/{response_id}",
791
- method="GET",
792
- level=LLAMA_STACK_API_V1,
793
- deprecated=True,
794
- )
795
- @webmethod(route="/responses/{response_id}", method="GET", level=LLAMA_STACK_API_V1)
796
- async def get_openai_response(
797
- self,
798
- response_id: str,
799
- ) -> OpenAIResponseObject:
800
- """Get a model response.
801
-
802
- :param response_id: The ID of the OpenAI response to retrieve.
803
- :returns: An OpenAIResponseObject.
804
- """
805
- ...
806
-
807
- @webmethod(route="/openai/v1/responses", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
808
- @webmethod(route="/responses", method="POST", level=LLAMA_STACK_API_V1)
809
- async def create_openai_response(
810
- self,
811
- input: str | list[OpenAIResponseInput],
812
- model: str,
813
- instructions: str | None = None,
814
- previous_response_id: str | None = None,
815
- conversation: str | None = None,
816
- store: bool | None = True,
817
- stream: bool | None = False,
818
- temperature: float | None = None,
819
- text: OpenAIResponseText | None = None,
820
- tools: list[OpenAIResponseInputTool] | None = None,
821
- include: list[str] | None = None,
822
- max_infer_iters: int | None = 10, # this is an extension to the OpenAI API
823
- guardrails: Annotated[
824
- list[ResponseGuardrail] | None,
825
- ExtraBodyField(
826
- "List of guardrails to apply during response generation. Guardrails provide safety and content moderation."
827
- ),
828
- ] = None,
829
- ) -> OpenAIResponseObject | AsyncIterator[OpenAIResponseObjectStream]:
830
- """Create a model response.
831
-
832
- :param input: Input message(s) to create the response.
833
- :param model: The underlying LLM used for completions.
834
- :param previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses.
835
- :param conversation: (Optional) The ID of a conversation to add the response to. Must begin with 'conv_'. Input and output messages will be automatically added to the conversation.
836
- :param include: (Optional) Additional fields to include in the response.
837
- :param guardrails: (Optional) List of guardrails to apply during response generation. Can be guardrail IDs (strings) or guardrail specifications.
838
- :returns: An OpenAIResponseObject.
839
- """
840
- ...
841
-
842
- @webmethod(route="/openai/v1/responses", method="GET", level=LLAMA_STACK_API_V1, deprecated=True)
843
- @webmethod(route="/responses", method="GET", level=LLAMA_STACK_API_V1)
844
- async def list_openai_responses(
845
- self,
846
- after: str | None = None,
847
- limit: int | None = 50,
848
- model: str | None = None,
849
- order: Order | None = Order.desc,
850
- ) -> ListOpenAIResponseObject:
851
- """List all responses.
852
-
853
- :param after: The ID of the last response to return.
854
- :param limit: The number of responses to return.
855
- :param model: The model to filter responses by.
856
- :param order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
857
- :returns: A ListOpenAIResponseObject.
858
- """
859
- ...
860
-
861
- @webmethod(
862
- route="/openai/v1/responses/{response_id}/input_items", method="GET", level=LLAMA_STACK_API_V1, deprecated=True
863
- )
864
- @webmethod(route="/responses/{response_id}/input_items", method="GET", level=LLAMA_STACK_API_V1)
865
- async def list_openai_response_input_items(
866
- self,
867
- response_id: str,
868
- after: str | None = None,
869
- before: str | None = None,
870
- include: list[str] | None = None,
871
- limit: int | None = 20,
872
- order: Order | None = Order.desc,
873
- ) -> ListOpenAIResponseInputItem:
874
- """List input items.
875
-
876
- :param response_id: The ID of the response to retrieve input items for.
877
- :param after: An item ID to list items after, used for pagination.
878
- :param before: An item ID to list items before, used for pagination.
879
- :param include: Additional fields to include in the response.
880
- :param limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
881
- :param order: The order to return the input items in. Default is desc.
882
- :returns: An ListOpenAIResponseInputItem.
883
- """
884
- ...
885
-
886
- @webmethod(route="/openai/v1/responses/{response_id}", method="DELETE", level=LLAMA_STACK_API_V1, deprecated=True)
887
- @webmethod(route="/responses/{response_id}", method="DELETE", level=LLAMA_STACK_API_V1)
888
- async def delete_openai_response(self, response_id: str) -> OpenAIDeleteResponseObject:
889
- """Delete a response.
890
-
891
- :param response_id: The ID of the OpenAI response to delete.
892
- :returns: An OpenAIDeleteResponseObject
893
- """
894
- ...