llama-stack 0.0.46__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (312) hide show
  1. llama_stack-0.0.46/LICENSE +22 -0
  2. llama_stack-0.0.46/MANIFEST.in +4 -0
  3. llama_stack-0.0.46/PKG-INFO +153 -0
  4. llama_stack-0.0.46/README.md +122 -0
  5. llama_stack-0.0.46/distributions/bedrock/build.yaml +9 -0
  6. llama_stack-0.0.46/distributions/databricks/build.yaml +9 -0
  7. llama_stack-0.0.46/distributions/fireworks/build.yaml +9 -0
  8. llama_stack-0.0.46/distributions/hf-endpoint/build.yaml +9 -0
  9. llama_stack-0.0.46/distributions/hf-serverless/build.yaml +9 -0
  10. llama_stack-0.0.46/distributions/meta-reference-gpu/build.yaml +13 -0
  11. llama_stack-0.0.46/distributions/meta-reference-quantized-gpu/build.yaml +13 -0
  12. llama_stack-0.0.46/distributions/ollama/build.yaml +12 -0
  13. llama_stack-0.0.46/distributions/tgi/build.yaml +12 -0
  14. llama_stack-0.0.46/distributions/together/build.yaml +9 -0
  15. llama_stack-0.0.46/distributions/vllm/build.yaml +9 -0
  16. llama_stack-0.0.46/llama_stack/__init__.py +5 -0
  17. llama_stack-0.0.46/llama_stack/apis/__init__.py +5 -0
  18. llama_stack-0.0.46/llama_stack/apis/agents/__init__.py +7 -0
  19. llama_stack-0.0.46/llama_stack/apis/agents/agents.py +471 -0
  20. llama_stack-0.0.46/llama_stack/apis/agents/client.py +292 -0
  21. llama_stack-0.0.46/llama_stack/apis/agents/event_logger.py +184 -0
  22. llama_stack-0.0.46/llama_stack/apis/batch_inference/__init__.py +7 -0
  23. llama_stack-0.0.46/llama_stack/apis/batch_inference/batch_inference.py +72 -0
  24. llama_stack-0.0.46/llama_stack/apis/common/__init__.py +5 -0
  25. llama_stack-0.0.46/llama_stack/apis/common/deployment_types.py +31 -0
  26. llama_stack-0.0.46/llama_stack/apis/common/job_types.py +12 -0
  27. llama_stack-0.0.46/llama_stack/apis/common/training_types.py +16 -0
  28. llama_stack-0.0.46/llama_stack/apis/common/type_system.py +65 -0
  29. llama_stack-0.0.46/llama_stack/apis/datasetio/__init__.py +7 -0
  30. llama_stack-0.0.46/llama_stack/apis/datasetio/client.py +103 -0
  31. llama_stack-0.0.46/llama_stack/apis/datasetio/datasetio.py +39 -0
  32. llama_stack-0.0.46/llama_stack/apis/datasets/__init__.py +7 -0
  33. llama_stack-0.0.46/llama_stack/apis/datasets/client.py +116 -0
  34. llama_stack-0.0.46/llama_stack/apis/datasets/datasets.py +54 -0
  35. llama_stack-0.0.46/llama_stack/apis/eval/__init__.py +7 -0
  36. llama_stack-0.0.46/llama_stack/apis/eval/eval.py +70 -0
  37. llama_stack-0.0.46/llama_stack/apis/inference/__init__.py +7 -0
  38. llama_stack-0.0.46/llama_stack/apis/inference/client.py +200 -0
  39. llama_stack-0.0.46/llama_stack/apis/inference/event_logger.py +43 -0
  40. llama_stack-0.0.46/llama_stack/apis/inference/inference.py +249 -0
  41. llama_stack-0.0.46/llama_stack/apis/inspect/__init__.py +7 -0
  42. llama_stack-0.0.46/llama_stack/apis/inspect/client.py +82 -0
  43. llama_stack-0.0.46/llama_stack/apis/inspect/inspect.py +41 -0
  44. llama_stack-0.0.46/llama_stack/apis/memory/__init__.py +7 -0
  45. llama_stack-0.0.46/llama_stack/apis/memory/client.py +155 -0
  46. llama_stack-0.0.46/llama_stack/apis/memory/memory.py +65 -0
  47. llama_stack-0.0.46/llama_stack/apis/memory_banks/__init__.py +7 -0
  48. llama_stack-0.0.46/llama_stack/apis/memory_banks/client.py +116 -0
  49. llama_stack-0.0.46/llama_stack/apis/memory_banks/memory_banks.py +78 -0
  50. llama_stack-0.0.46/llama_stack/apis/models/__init__.py +7 -0
  51. llama_stack-0.0.46/llama_stack/apis/models/client.py +83 -0
  52. llama_stack-0.0.46/llama_stack/apis/models/models.py +42 -0
  53. llama_stack-0.0.46/llama_stack/apis/post_training/__init__.py +7 -0
  54. llama_stack-0.0.46/llama_stack/apis/post_training/post_training.py +229 -0
  55. llama_stack-0.0.46/llama_stack/apis/safety/__init__.py +7 -0
  56. llama_stack-0.0.46/llama_stack/apis/safety/client.py +105 -0
  57. llama_stack-0.0.46/llama_stack/apis/safety/safety.py +52 -0
  58. llama_stack-0.0.46/llama_stack/apis/scoring/__init__.py +7 -0
  59. llama_stack-0.0.46/llama_stack/apis/scoring/client.py +132 -0
  60. llama_stack-0.0.46/llama_stack/apis/scoring/scoring.py +58 -0
  61. llama_stack-0.0.46/llama_stack/apis/scoring_functions/__init__.py +7 -0
  62. llama_stack-0.0.46/llama_stack/apis/scoring_functions/scoring_functions.py +70 -0
  63. llama_stack-0.0.46/llama_stack/apis/shields/__init__.py +7 -0
  64. llama_stack-0.0.46/llama_stack/apis/shields/client.py +79 -0
  65. llama_stack-0.0.46/llama_stack/apis/shields/shields.py +51 -0
  66. llama_stack-0.0.46/llama_stack/apis/synthetic_data_generation/__init__.py +7 -0
  67. llama_stack-0.0.46/llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +54 -0
  68. llama_stack-0.0.46/llama_stack/apis/telemetry/__init__.py +7 -0
  69. llama_stack-0.0.46/llama_stack/apis/telemetry/telemetry.py +132 -0
  70. llama_stack-0.0.46/llama_stack/cli/__init__.py +5 -0
  71. llama_stack-0.0.46/llama_stack/cli/download.py +342 -0
  72. llama_stack-0.0.46/llama_stack/cli/llama.py +48 -0
  73. llama_stack-0.0.46/llama_stack/cli/model/__init__.py +7 -0
  74. llama_stack-0.0.46/llama_stack/cli/model/describe.py +82 -0
  75. llama_stack-0.0.46/llama_stack/cli/model/download.py +24 -0
  76. llama_stack-0.0.46/llama_stack/cli/model/list.py +62 -0
  77. llama_stack-0.0.46/llama_stack/cli/model/model.py +34 -0
  78. llama_stack-0.0.46/llama_stack/cli/model/prompt_format.py +112 -0
  79. llama_stack-0.0.46/llama_stack/cli/model/safety_models.py +52 -0
  80. llama_stack-0.0.46/llama_stack/cli/scripts/__init__.py +5 -0
  81. llama_stack-0.0.46/llama_stack/cli/scripts/install-wheel-from-presigned.sh +38 -0
  82. llama_stack-0.0.46/llama_stack/cli/scripts/run.py +18 -0
  83. llama_stack-0.0.46/llama_stack/cli/stack/__init__.py +7 -0
  84. llama_stack-0.0.46/llama_stack/cli/stack/build.py +300 -0
  85. llama_stack-0.0.46/llama_stack/cli/stack/configure.py +178 -0
  86. llama_stack-0.0.46/llama_stack/cli/stack/list_apis.py +47 -0
  87. llama_stack-0.0.46/llama_stack/cli/stack/list_providers.py +62 -0
  88. llama_stack-0.0.46/llama_stack/cli/stack/run.py +103 -0
  89. llama_stack-0.0.46/llama_stack/cli/stack/stack.py +34 -0
  90. llama_stack-0.0.46/llama_stack/cli/subcommand.py +19 -0
  91. llama_stack-0.0.46/llama_stack/cli/table.py +77 -0
  92. llama_stack-0.0.46/llama_stack/distribution/__init__.py +5 -0
  93. llama_stack-0.0.46/llama_stack/distribution/build.py +124 -0
  94. llama_stack-0.0.46/llama_stack/distribution/build_conda_env.sh +136 -0
  95. llama_stack-0.0.46/llama_stack/distribution/build_container.sh +144 -0
  96. llama_stack-0.0.46/llama_stack/distribution/common.sh +40 -0
  97. llama_stack-0.0.46/llama_stack/distribution/configure.py +184 -0
  98. llama_stack-0.0.46/llama_stack/distribution/configure_container.sh +47 -0
  99. llama_stack-0.0.46/llama_stack/distribution/datatypes.py +148 -0
  100. llama_stack-0.0.46/llama_stack/distribution/distribution.py +66 -0
  101. llama_stack-0.0.46/llama_stack/distribution/inspect.py +67 -0
  102. llama_stack-0.0.46/llama_stack/distribution/request_headers.py +57 -0
  103. llama_stack-0.0.46/llama_stack/distribution/resolver.py +331 -0
  104. llama_stack-0.0.46/llama_stack/distribution/routers/__init__.py +61 -0
  105. llama_stack-0.0.46/llama_stack/distribution/routers/routers.py +248 -0
  106. llama_stack-0.0.46/llama_stack/distribution/routers/routing_tables.py +256 -0
  107. llama_stack-0.0.46/llama_stack/distribution/server/__init__.py +5 -0
  108. llama_stack-0.0.46/llama_stack/distribution/server/endpoints.py +48 -0
  109. llama_stack-0.0.46/llama_stack/distribution/server/server.py +347 -0
  110. llama_stack-0.0.46/llama_stack/distribution/start_conda_env.sh +42 -0
  111. llama_stack-0.0.46/llama_stack/distribution/start_container.sh +64 -0
  112. llama_stack-0.0.46/llama_stack/distribution/utils/__init__.py +5 -0
  113. llama_stack-0.0.46/llama_stack/distribution/utils/config_dirs.py +21 -0
  114. llama_stack-0.0.46/llama_stack/distribution/utils/dynamic.py +13 -0
  115. llama_stack-0.0.46/llama_stack/distribution/utils/exec.py +105 -0
  116. llama_stack-0.0.46/llama_stack/distribution/utils/model_utils.py +13 -0
  117. llama_stack-0.0.46/llama_stack/distribution/utils/prompt_for_config.py +316 -0
  118. llama_stack-0.0.46/llama_stack/distribution/utils/serialize.py +18 -0
  119. llama_stack-0.0.46/llama_stack/providers/__init__.py +5 -0
  120. llama_stack-0.0.46/llama_stack/providers/adapters/__init__.py +5 -0
  121. llama_stack-0.0.46/llama_stack/providers/adapters/agents/__init__.py +5 -0
  122. llama_stack-0.0.46/llama_stack/providers/adapters/agents/sample/__init__.py +17 -0
  123. llama_stack-0.0.46/llama_stack/providers/adapters/agents/sample/config.py +12 -0
  124. llama_stack-0.0.46/llama_stack/providers/adapters/agents/sample/sample.py +18 -0
  125. llama_stack-0.0.46/llama_stack/providers/adapters/inference/__init__.py +5 -0
  126. llama_stack-0.0.46/llama_stack/providers/adapters/inference/bedrock/__init__.py +17 -0
  127. llama_stack-0.0.46/llama_stack/providers/adapters/inference/bedrock/bedrock.py +453 -0
  128. llama_stack-0.0.46/llama_stack/providers/adapters/inference/bedrock/config.py +55 -0
  129. llama_stack-0.0.46/llama_stack/providers/adapters/inference/databricks/__init__.py +17 -0
  130. llama_stack-0.0.46/llama_stack/providers/adapters/inference/databricks/config.py +21 -0
  131. llama_stack-0.0.46/llama_stack/providers/adapters/inference/databricks/databricks.py +127 -0
  132. llama_stack-0.0.46/llama_stack/providers/adapters/inference/fireworks/__init__.py +18 -0
  133. llama_stack-0.0.46/llama_stack/providers/adapters/inference/fireworks/config.py +20 -0
  134. llama_stack-0.0.46/llama_stack/providers/adapters/inference/fireworks/fireworks.py +147 -0
  135. llama_stack-0.0.46/llama_stack/providers/adapters/inference/ollama/__init__.py +19 -0
  136. llama_stack-0.0.46/llama_stack/providers/adapters/inference/ollama/ollama.py +238 -0
  137. llama_stack-0.0.46/llama_stack/providers/adapters/inference/sample/__init__.py +17 -0
  138. llama_stack-0.0.46/llama_stack/providers/adapters/inference/sample/config.py +12 -0
  139. llama_stack-0.0.46/llama_stack/providers/adapters/inference/sample/sample.py +23 -0
  140. llama_stack-0.0.46/llama_stack/providers/adapters/inference/tgi/__init__.py +29 -0
  141. llama_stack-0.0.46/llama_stack/providers/adapters/inference/tgi/config.py +43 -0
  142. llama_stack-0.0.46/llama_stack/providers/adapters/inference/tgi/tgi.py +294 -0
  143. llama_stack-0.0.46/llama_stack/providers/adapters/inference/together/__init__.py +18 -0
  144. llama_stack-0.0.46/llama_stack/providers/adapters/inference/together/config.py +22 -0
  145. llama_stack-0.0.46/llama_stack/providers/adapters/inference/together/together.py +158 -0
  146. llama_stack-0.0.46/llama_stack/providers/adapters/inference/vllm/__init__.py +15 -0
  147. llama_stack-0.0.46/llama_stack/providers/adapters/inference/vllm/config.py +22 -0
  148. llama_stack-0.0.46/llama_stack/providers/adapters/inference/vllm/vllm.py +154 -0
  149. llama_stack-0.0.46/llama_stack/providers/adapters/memory/__init__.py +5 -0
  150. llama_stack-0.0.46/llama_stack/providers/adapters/memory/chroma/__init__.py +15 -0
  151. llama_stack-0.0.46/llama_stack/providers/adapters/memory/chroma/chroma.py +159 -0
  152. llama_stack-0.0.46/llama_stack/providers/adapters/memory/pgvector/__init__.py +15 -0
  153. llama_stack-0.0.46/llama_stack/providers/adapters/memory/pgvector/config.py +17 -0
  154. llama_stack-0.0.46/llama_stack/providers/adapters/memory/pgvector/pgvector.py +213 -0
  155. llama_stack-0.0.46/llama_stack/providers/adapters/memory/qdrant/__init__.py +15 -0
  156. llama_stack-0.0.46/llama_stack/providers/adapters/memory/qdrant/config.py +25 -0
  157. llama_stack-0.0.46/llama_stack/providers/adapters/memory/qdrant/qdrant.py +170 -0
  158. llama_stack-0.0.46/llama_stack/providers/adapters/memory/sample/__init__.py +17 -0
  159. llama_stack-0.0.46/llama_stack/providers/adapters/memory/sample/config.py +12 -0
  160. llama_stack-0.0.46/llama_stack/providers/adapters/memory/sample/sample.py +23 -0
  161. llama_stack-0.0.46/llama_stack/providers/adapters/memory/weaviate/__init__.py +15 -0
  162. llama_stack-0.0.46/llama_stack/providers/adapters/memory/weaviate/config.py +16 -0
  163. llama_stack-0.0.46/llama_stack/providers/adapters/memory/weaviate/weaviate.py +192 -0
  164. llama_stack-0.0.46/llama_stack/providers/adapters/safety/__init__.py +5 -0
  165. llama_stack-0.0.46/llama_stack/providers/adapters/safety/bedrock/__init__.py +18 -0
  166. llama_stack-0.0.46/llama_stack/providers/adapters/safety/bedrock/bedrock.py +113 -0
  167. llama_stack-0.0.46/llama_stack/providers/adapters/safety/bedrock/config.py +16 -0
  168. llama_stack-0.0.46/llama_stack/providers/adapters/safety/sample/__init__.py +17 -0
  169. llama_stack-0.0.46/llama_stack/providers/adapters/safety/sample/config.py +12 -0
  170. llama_stack-0.0.46/llama_stack/providers/adapters/safety/sample/sample.py +23 -0
  171. llama_stack-0.0.46/llama_stack/providers/adapters/safety/together/__init__.py +18 -0
  172. llama_stack-0.0.46/llama_stack/providers/adapters/safety/together/config.py +26 -0
  173. llama_stack-0.0.46/llama_stack/providers/adapters/safety/together/together.py +101 -0
  174. llama_stack-0.0.46/llama_stack/providers/adapters/telemetry/__init__.py +5 -0
  175. llama_stack-0.0.46/llama_stack/providers/adapters/telemetry/opentelemetry/__init__.py +15 -0
  176. llama_stack-0.0.46/llama_stack/providers/adapters/telemetry/opentelemetry/config.py +12 -0
  177. llama_stack-0.0.46/llama_stack/providers/adapters/telemetry/opentelemetry/opentelemetry.py +201 -0
  178. llama_stack-0.0.46/llama_stack/providers/adapters/telemetry/sample/__init__.py +17 -0
  179. llama_stack-0.0.46/llama_stack/providers/adapters/telemetry/sample/config.py +12 -0
  180. llama_stack-0.0.46/llama_stack/providers/adapters/telemetry/sample/sample.py +18 -0
  181. llama_stack-0.0.46/llama_stack/providers/datatypes.py +207 -0
  182. llama_stack-0.0.46/llama_stack/providers/impls/__init__.py +5 -0
  183. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/__init__.py +5 -0
  184. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/__init__.py +27 -0
  185. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/agent_instance.py +844 -0
  186. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/agents.py +193 -0
  187. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/config.py +13 -0
  188. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/persistence.py +84 -0
  189. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/rag/__init__.py +5 -0
  190. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/rag/context_retriever.py +74 -0
  191. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/safety.py +57 -0
  192. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tests/__init__.py +5 -0
  193. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tests/code_execution.py +93 -0
  194. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tests/test_chat_agent.py +306 -0
  195. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tools/__init__.py +5 -0
  196. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tools/base.py +20 -0
  197. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tools/builtin.py +375 -0
  198. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/__init__.py +5 -0
  199. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_env_prefix.py +133 -0
  200. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_execution.py +256 -0
  201. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/matplotlib_custom_backend.py +87 -0
  202. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/utils.py +21 -0
  203. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/agents/tools/safety.py +43 -0
  204. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/codeshield/__init__.py +15 -0
  205. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/codeshield/code_scanner.py +58 -0
  206. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/codeshield/config.py +11 -0
  207. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/datasetio/__init__.py +18 -0
  208. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/datasetio/config.py +9 -0
  209. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/datasetio/datasetio.py +157 -0
  210. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/__init__.py +20 -0
  211. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/config.py +54 -0
  212. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/generation.py +480 -0
  213. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/inference.py +390 -0
  214. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/model_parallel.py +93 -0
  215. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/parallel_utils.py +378 -0
  216. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/quantization/__init__.py +5 -0
  217. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/quantization/fp8_impls.py +184 -0
  218. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/quantization/fp8_txest_disabled.py +76 -0
  219. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/quantization/hadamard_utils.py +92 -0
  220. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/quantization/loader.py +339 -0
  221. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/quantization/scripts/__init__.py +5 -0
  222. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/inference/quantization/scripts/quantize_checkpoint.py +161 -0
  223. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/memory/__init__.py +19 -0
  224. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/memory/config.py +13 -0
  225. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/memory/faiss.py +115 -0
  226. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/safety/__init__.py +17 -0
  227. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/safety/base.py +57 -0
  228. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/safety/config.py +48 -0
  229. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/safety/llama_guard.py +268 -0
  230. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/safety/prompt_guard.py +145 -0
  231. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/safety/safety.py +112 -0
  232. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/scoring/__init__.py +21 -0
  233. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/scoring/config.py +9 -0
  234. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/scoring/scorer/__init__.py +5 -0
  235. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/scoring/scorer/base_scorer.py +37 -0
  236. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/scoring/scorer/equality_scorer.py +49 -0
  237. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/scoring/scoring.py +109 -0
  238. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/telemetry/__init__.py +15 -0
  239. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/telemetry/config.py +13 -0
  240. llama_stack-0.0.46/llama_stack/providers/impls/meta_reference/telemetry/console.py +89 -0
  241. llama_stack-0.0.46/llama_stack/providers/impls/vllm/__init__.py +17 -0
  242. llama_stack-0.0.46/llama_stack/providers/impls/vllm/config.py +46 -0
  243. llama_stack-0.0.46/llama_stack/providers/impls/vllm/vllm.py +230 -0
  244. llama_stack-0.0.46/llama_stack/providers/registry/__init__.py +5 -0
  245. llama_stack-0.0.46/llama_stack/providers/registry/agents.py +43 -0
  246. llama_stack-0.0.46/llama_stack/providers/registry/datasetio.py +22 -0
  247. llama_stack-0.0.46/llama_stack/providers/registry/inference.py +152 -0
  248. llama_stack-0.0.46/llama_stack/providers/registry/memory.py +87 -0
  249. llama_stack-0.0.46/llama_stack/providers/registry/safety.py +73 -0
  250. llama_stack-0.0.46/llama_stack/providers/registry/scoring.py +25 -0
  251. llama_stack-0.0.46/llama_stack/providers/registry/telemetry.py +44 -0
  252. llama_stack-0.0.46/llama_stack/providers/tests/__init__.py +5 -0
  253. llama_stack-0.0.46/llama_stack/providers/tests/agents/__init__.py +5 -0
  254. llama_stack-0.0.46/llama_stack/providers/tests/agents/test_agent_persistence.py +148 -0
  255. llama_stack-0.0.46/llama_stack/providers/tests/agents/test_agents.py +311 -0
  256. llama_stack-0.0.46/llama_stack/providers/tests/datasetio/__init__.py +5 -0
  257. llama_stack-0.0.46/llama_stack/providers/tests/datasetio/test_datasetio.py +135 -0
  258. llama_stack-0.0.46/llama_stack/providers/tests/inference/__init__.py +5 -0
  259. llama_stack-0.0.46/llama_stack/providers/tests/inference/test_inference.py +404 -0
  260. llama_stack-0.0.46/llama_stack/providers/tests/inference/test_prompt_adapter.py +128 -0
  261. llama_stack-0.0.46/llama_stack/providers/tests/memory/__init__.py +5 -0
  262. llama_stack-0.0.46/llama_stack/providers/tests/memory/test_memory.py +161 -0
  263. llama_stack-0.0.46/llama_stack/providers/tests/resolver.py +100 -0
  264. llama_stack-0.0.46/llama_stack/providers/tests/safety/__init__.py +5 -0
  265. llama_stack-0.0.46/llama_stack/providers/tests/safety/test_safety.py +77 -0
  266. llama_stack-0.0.46/llama_stack/providers/tests/scoring/__init__.py +5 -0
  267. llama_stack-0.0.46/llama_stack/providers/tests/scoring/test_scoring.py +69 -0
  268. llama_stack-0.0.46/llama_stack/providers/utils/__init__.py +5 -0
  269. llama_stack-0.0.46/llama_stack/providers/utils/inference/__init__.py +33 -0
  270. llama_stack-0.0.46/llama_stack/providers/utils/inference/model_registry.py +41 -0
  271. llama_stack-0.0.46/llama_stack/providers/utils/inference/openai_compat.py +242 -0
  272. llama_stack-0.0.46/llama_stack/providers/utils/inference/prompt_adapter.py +226 -0
  273. llama_stack-0.0.46/llama_stack/providers/utils/kvstore/__init__.py +7 -0
  274. llama_stack-0.0.46/llama_stack/providers/utils/kvstore/api.py +21 -0
  275. llama_stack-0.0.46/llama_stack/providers/utils/kvstore/config.py +59 -0
  276. llama_stack-0.0.46/llama_stack/providers/utils/kvstore/kvstore.py +51 -0
  277. llama_stack-0.0.46/llama_stack/providers/utils/kvstore/redis/__init__.py +7 -0
  278. llama_stack-0.0.46/llama_stack/providers/utils/kvstore/redis/redis.py +52 -0
  279. llama_stack-0.0.46/llama_stack/providers/utils/kvstore/sqlite/__init__.py +7 -0
  280. llama_stack-0.0.46/llama_stack/providers/utils/kvstore/sqlite/config.py +19 -0
  281. llama_stack-0.0.46/llama_stack/providers/utils/kvstore/sqlite/sqlite.py +73 -0
  282. llama_stack-0.0.46/llama_stack/providers/utils/memory/__init__.py +5 -0
  283. llama_stack-0.0.46/llama_stack/providers/utils/memory/file_utils.py +26 -0
  284. llama_stack-0.0.46/llama_stack/providers/utils/memory/vector_store.py +197 -0
  285. llama_stack-0.0.46/llama_stack/providers/utils/telemetry/__init__.py +5 -0
  286. llama_stack-0.0.46/llama_stack/providers/utils/telemetry/tracing.py +245 -0
  287. llama_stack-0.0.46/llama_stack/scripts/__init__.py +5 -0
  288. llama_stack-0.0.46/llama_stack/templates/bedrock/build.yaml +9 -0
  289. llama_stack-0.0.46/llama_stack/templates/databricks/build.yaml +9 -0
  290. llama_stack-0.0.46/llama_stack/templates/fireworks/build.yaml +9 -0
  291. llama_stack-0.0.46/llama_stack/templates/hf-endpoint/build.yaml +9 -0
  292. llama_stack-0.0.46/llama_stack/templates/hf-serverless/build.yaml +9 -0
  293. llama_stack-0.0.46/llama_stack/templates/meta-reference-gpu/build.yaml +13 -0
  294. llama_stack-0.0.46/llama_stack/templates/meta-reference-quantized-gpu/build.yaml +13 -0
  295. llama_stack-0.0.46/llama_stack/templates/ollama/build.yaml +12 -0
  296. llama_stack-0.0.46/llama_stack/templates/tgi/build.yaml +12 -0
  297. llama_stack-0.0.46/llama_stack/templates/together/build.yaml +9 -0
  298. llama_stack-0.0.46/llama_stack/templates/vllm/build.yaml +9 -0
  299. llama_stack-0.0.46/llama_stack.egg-info/PKG-INFO +153 -0
  300. llama_stack-0.0.46/llama_stack.egg-info/SOURCES.txt +310 -0
  301. llama_stack-0.0.46/llama_stack.egg-info/dependency_links.txt +1 -0
  302. llama_stack-0.0.46/llama_stack.egg-info/entry_points.txt +3 -0
  303. llama_stack-0.0.46/llama_stack.egg-info/requires.txt +12 -0
  304. llama_stack-0.0.46/llama_stack.egg-info/top_level.txt +1 -0
  305. llama_stack-0.0.46/pyproject.toml +3 -0
  306. llama_stack-0.0.46/requirements.txt +12 -0
  307. llama_stack-0.0.46/setup.cfg +4 -0
  308. llama_stack-0.0.46/setup.py +46 -0
  309. llama_stack-0.0.46/tests/test_bedrock_inference.py +446 -0
  310. llama_stack-0.0.46/tests/test_e2e.py +183 -0
  311. llama_stack-0.0.46/tests/test_inference.py +255 -0
  312. llama_stack-0.0.46/tests/test_ollama_inference.py +346 -0
@@ -0,0 +1,22 @@
1
+ MIT License
2
+
3
+ Copyright (c) Meta Platforms, Inc. and affiliates
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,4 @@
1
+ include requirements.txt
2
+ include llama_stack/distribution/*.sh
3
+ include llama_stack/cli/scripts/*.sh
4
+ include llama_stack/templates/*/build.yaml
@@ -0,0 +1,153 @@
1
+ Metadata-Version: 2.1
2
+ Name: llama_stack
3
+ Version: 0.0.46
4
+ Summary: Llama Stack
5
+ Home-page: https://github.com/meta-llama/llama-stack
6
+ Author: Meta Llama
7
+ Author-email: llama-oss@meta.com
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Information Technology
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
15
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
16
+ Requires-Python: >=3.10
17
+ Description-Content-Type: text/markdown
18
+ License-File: LICENSE
19
+ Requires-Dist: blobfile
20
+ Requires-Dist: fire
21
+ Requires-Dist: httpx
22
+ Requires-Dist: huggingface-hub
23
+ Requires-Dist: llama-models>=0.0.46
24
+ Requires-Dist: prompt-toolkit
25
+ Requires-Dist: python-dotenv
26
+ Requires-Dist: pydantic>=2
27
+ Requires-Dist: requests
28
+ Requires-Dist: rich
29
+ Requires-Dist: setuptools
30
+ Requires-Dist: termcolor
31
+
32
+ <img src="https://github.com/user-attachments/assets/2fedfe0f-6df7-4441-98b2-87a1fd95ee1c" width="300" title="Llama Stack Logo" alt="Llama Stack Logo"/>
33
+
34
+ # Llama Stack
35
+
36
+ [![PyPI version](https://img.shields.io/pypi/v/llama_stack.svg)](https://pypi.org/project/llama_stack/)
37
+ [![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-stack)](https://pypi.org/project/llama-stack/)
38
+ [![Discord](https://img.shields.io/discord/1257833999603335178)](https://discord.gg/llama-stack)
39
+
40
+ This repository contains the Llama Stack API specifications as well as API Providers and Llama Stack Distributions.
41
+
42
+ The Llama Stack defines and standardizes the building blocks needed to bring generative AI applications to market. These blocks span the entire development lifecycle: from model training and fine-tuning, through product evaluation, to building and running AI agents in production. Beyond definition, we are building providers for the Llama Stack APIs. These were developing open-source versions and partnering with providers, ensuring developers can assemble AI solutions using consistent, interlocking pieces across platforms. The ultimate goal is to accelerate innovation in the AI space.
43
+
44
+ The Stack APIs are rapidly improving, but still very much work in progress and we invite feedback as well as direct contributions.
45
+
46
+
47
+ ## APIs
48
+
49
+ The Llama Stack consists of the following set of APIs:
50
+
51
+ - Inference
52
+ - Safety
53
+ - Memory
54
+ - Agentic System
55
+ - Evaluation
56
+ - Post Training
57
+ - Synthetic Data Generation
58
+ - Reward Scoring
59
+
60
+ Each of the APIs themselves is a collection of REST endpoints.
61
+
62
+
63
+ ## API Providers
64
+
65
+ A Provider is what makes the API real -- they provide the actual implementation backing the API.
66
+
67
+ As an example, for Inference, we could have the implementation be backed by open source libraries like `[ torch | vLLM | TensorRT ]` as possible options.
68
+
69
+ A provider can also be just a pointer to a remote REST service -- for example, cloud providers or dedicated inference providers could serve these APIs.
70
+
71
+
72
+ ## Llama Stack Distribution
73
+
74
+ A Distribution is where APIs and Providers are assembled together to provide a consistent whole to the end application developer. You can mix-and-match providers -- some could be backed by local code and some could be remote. As a hobbyist, you can serve a small model locally, but can choose a cloud provider for a large model. Regardless, the higher level APIs your app needs to work with don't need to change at all. You can even imagine moving across the server / mobile-device boundary as well always using the same uniform set of APIs for developing Generative AI applications.
75
+
76
+ ## Supported Llama Stack Implementations
77
+ ### API Providers
78
+
79
+
80
+ | **API Provider Builder** | **Environments** | **Agents** | **Inference** | **Memory** | **Safety** | **Telemetry** |
81
+ | :----: | :----: | :----: | :----: | :----: | :----: | :----: |
82
+ | Meta Reference | Single Node | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
83
+ | Fireworks | Hosted | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
84
+ | AWS Bedrock | Hosted | | :heavy_check_mark: | | :heavy_check_mark: | |
85
+ | Together | Hosted | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | |
86
+ | Ollama | Single Node | | :heavy_check_mark: | | |
87
+ | TGI | Hosted and Single Node | | :heavy_check_mark: | | |
88
+ | Chroma | Single Node | | | :heavy_check_mark: | | |
89
+ | PG Vector | Single Node | | | :heavy_check_mark: | | |
90
+ | PyTorch ExecuTorch | On-device iOS | :heavy_check_mark: | :heavy_check_mark: | | |
91
+
92
+ ### Distributions
93
+ | **Distribution Provider** | **Docker** | **Inference** | **Memory** | **Safety** | **Telemetry** |
94
+ | :----: | :----: | :----: | :----: | :----: | :----: |
95
+ | Meta Reference | [Local GPU](https://hub.docker.com/repository/docker/llamastack/llamastack-local-gpu/general), [Local CPU](https://hub.docker.com/repository/docker/llamastack/llamastack-local-cpu/general) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
96
+ | Dell-TGI | [Local TGI + Chroma](https://hub.docker.com/repository/docker/llamastack/llamastack-local-tgi-chroma/general) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
97
+
98
+
99
+
100
+ ## Installation
101
+
102
+ You have two ways to install this repository:
103
+
104
+ 1. **Install as a package**:
105
+ You can install the repository directly from [PyPI](https://pypi.org/project/llama-stack/) by running the following command:
106
+ ```bash
107
+ pip install llama-stack
108
+ ```
109
+
110
+ 2. **Install from source**:
111
+ If you prefer to install from the source code, follow these steps:
112
+ ```bash
113
+ mkdir -p ~/local
114
+ cd ~/local
115
+ git clone git@github.com:meta-llama/llama-stack.git
116
+
117
+ conda create -n stack python=3.10
118
+ conda activate stack
119
+
120
+ cd llama-stack
121
+ $CONDA_PREFIX/bin/pip install -e .
122
+ ```
123
+
124
+ ## Documentations
125
+
126
+ The `llama` CLI makes it easy to work with the Llama Stack set of tools. Please find the following docs for details.
127
+
128
+ * [CLI reference](docs/cli_reference.md)
129
+ * Guide using `llama` CLI to work with Llama models (download, study prompts), and building/starting a Llama Stack distribution.
130
+ * [Getting Started](docs/getting_started.md)
131
+ * Quick guide to start a Llama Stack server.
132
+ * [Jupyter notebook](./docs/getting_started.ipynb) to walk-through how to use simple text and vision inference llama_stack_client APIs
133
+ * [Building a Llama Stack Distribution](docs/building_distro.md)
134
+ * Guide to build a Llama Stack distribution
135
+ * [Distributions](./distributions/)
136
+ * References to start Llama Stack distributions backed with different API providers.
137
+ * [Developer Cookbook](./docs/developer_cookbook.md)
138
+ * References to guides to help you get started based on your developer needs.
139
+ * [Contributing](CONTRIBUTING.md)
140
+ * [Adding a new API Provider](./docs/new_api_provider.md) to walk-through how to add a new API provider.
141
+
142
+ ## Llama Stack Client SDK
143
+
144
+ | **Language** | **Client SDK** | **Package** |
145
+ | :----: | :----: | :----: |
146
+ | Python | [llama-stack-client-python](https://github.com/meta-llama/llama-stack-client-python) | [![PyPI version](https://img.shields.io/pypi/v/llama_stack_client.svg)](https://pypi.org/project/llama_stack_client/)
147
+ | Swift | [llama-stack-client-swift](https://github.com/meta-llama/llama-stack-client-swift) | [![Swift Package Index](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2Fmeta-llama%2Fllama-stack-client-swift%2Fbadge%3Ftype%3Dswift-versions)](https://swiftpackageindex.com/meta-llama/llama-stack-client-swift)
148
+ | Node | [llama-stack-client-node](https://github.com/meta-llama/llama-stack-client-node) | [![NPM version](https://img.shields.io/npm/v/llama-stack-client.svg)](https://npmjs.org/package/llama-stack-client)
149
+ | Kotlin | [llama-stack-client-kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) |
150
+
151
+ Check out our client SDKs for connecting to Llama Stack server in your preferred language, you can choose from [python](https://github.com/meta-llama/llama-stack-client-python), [node](https://github.com/meta-llama/llama-stack-client-node), [swift](https://github.com/meta-llama/llama-stack-client-swift), and [kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) programming languages to quickly build your applications.
152
+
153
+ You can find more example scripts with client SDKs to talk with the Llama Stack server in our [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) repo.
@@ -0,0 +1,122 @@
1
+ <img src="https://github.com/user-attachments/assets/2fedfe0f-6df7-4441-98b2-87a1fd95ee1c" width="300" title="Llama Stack Logo" alt="Llama Stack Logo"/>
2
+
3
+ # Llama Stack
4
+
5
+ [![PyPI version](https://img.shields.io/pypi/v/llama_stack.svg)](https://pypi.org/project/llama_stack/)
6
+ [![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-stack)](https://pypi.org/project/llama-stack/)
7
+ [![Discord](https://img.shields.io/discord/1257833999603335178)](https://discord.gg/llama-stack)
8
+
9
+ This repository contains the Llama Stack API specifications as well as API Providers and Llama Stack Distributions.
10
+
11
+ The Llama Stack defines and standardizes the building blocks needed to bring generative AI applications to market. These blocks span the entire development lifecycle: from model training and fine-tuning, through product evaluation, to building and running AI agents in production. Beyond definition, we are building providers for the Llama Stack APIs. These were developing open-source versions and partnering with providers, ensuring developers can assemble AI solutions using consistent, interlocking pieces across platforms. The ultimate goal is to accelerate innovation in the AI space.
12
+
13
+ The Stack APIs are rapidly improving, but still very much work in progress and we invite feedback as well as direct contributions.
14
+
15
+
16
+ ## APIs
17
+
18
+ The Llama Stack consists of the following set of APIs:
19
+
20
+ - Inference
21
+ - Safety
22
+ - Memory
23
+ - Agentic System
24
+ - Evaluation
25
+ - Post Training
26
+ - Synthetic Data Generation
27
+ - Reward Scoring
28
+
29
+ Each of the APIs themselves is a collection of REST endpoints.
30
+
31
+
32
+ ## API Providers
33
+
34
+ A Provider is what makes the API real -- they provide the actual implementation backing the API.
35
+
36
+ As an example, for Inference, we could have the implementation be backed by open source libraries like `[ torch | vLLM | TensorRT ]` as possible options.
37
+
38
+ A provider can also be just a pointer to a remote REST service -- for example, cloud providers or dedicated inference providers could serve these APIs.
39
+
40
+
41
+ ## Llama Stack Distribution
42
+
43
+ A Distribution is where APIs and Providers are assembled together to provide a consistent whole to the end application developer. You can mix-and-match providers -- some could be backed by local code and some could be remote. As a hobbyist, you can serve a small model locally, but can choose a cloud provider for a large model. Regardless, the higher level APIs your app needs to work with don't need to change at all. You can even imagine moving across the server / mobile-device boundary as well always using the same uniform set of APIs for developing Generative AI applications.
44
+
45
+ ## Supported Llama Stack Implementations
46
+ ### API Providers
47
+
48
+
49
+ | **API Provider Builder** | **Environments** | **Agents** | **Inference** | **Memory** | **Safety** | **Telemetry** |
50
+ | :----: | :----: | :----: | :----: | :----: | :----: | :----: |
51
+ | Meta Reference | Single Node | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
52
+ | Fireworks | Hosted | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
53
+ | AWS Bedrock | Hosted | | :heavy_check_mark: | | :heavy_check_mark: | |
54
+ | Together | Hosted | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | |
55
+ | Ollama | Single Node | | :heavy_check_mark: | | |
56
+ | TGI | Hosted and Single Node | | :heavy_check_mark: | | |
57
+ | Chroma | Single Node | | | :heavy_check_mark: | | |
58
+ | PG Vector | Single Node | | | :heavy_check_mark: | | |
59
+ | PyTorch ExecuTorch | On-device iOS | :heavy_check_mark: | :heavy_check_mark: | | |
60
+
61
+ ### Distributions
62
+ | **Distribution Provider** | **Docker** | **Inference** | **Memory** | **Safety** | **Telemetry** |
63
+ | :----: | :----: | :----: | :----: | :----: | :----: |
64
+ | Meta Reference | [Local GPU](https://hub.docker.com/repository/docker/llamastack/llamastack-local-gpu/general), [Local CPU](https://hub.docker.com/repository/docker/llamastack/llamastack-local-cpu/general) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
65
+ | Dell-TGI | [Local TGI + Chroma](https://hub.docker.com/repository/docker/llamastack/llamastack-local-tgi-chroma/general) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
66
+
67
+
68
+
69
+ ## Installation
70
+
71
+ You have two ways to install this repository:
72
+
73
+ 1. **Install as a package**:
74
+ You can install the repository directly from [PyPI](https://pypi.org/project/llama-stack/) by running the following command:
75
+ ```bash
76
+ pip install llama-stack
77
+ ```
78
+
79
+ 2. **Install from source**:
80
+ If you prefer to install from the source code, follow these steps:
81
+ ```bash
82
+ mkdir -p ~/local
83
+ cd ~/local
84
+ git clone git@github.com:meta-llama/llama-stack.git
85
+
86
+ conda create -n stack python=3.10
87
+ conda activate stack
88
+
89
+ cd llama-stack
90
+ $CONDA_PREFIX/bin/pip install -e .
91
+ ```
92
+
93
+ ## Documentations
94
+
95
+ The `llama` CLI makes it easy to work with the Llama Stack set of tools. Please find the following docs for details.
96
+
97
+ * [CLI reference](docs/cli_reference.md)
98
+ * Guide using `llama` CLI to work with Llama models (download, study prompts), and building/starting a Llama Stack distribution.
99
+ * [Getting Started](docs/getting_started.md)
100
+ * Quick guide to start a Llama Stack server.
101
+ * [Jupyter notebook](./docs/getting_started.ipynb) to walk-through how to use simple text and vision inference llama_stack_client APIs
102
+ * [Building a Llama Stack Distribution](docs/building_distro.md)
103
+ * Guide to build a Llama Stack distribution
104
+ * [Distributions](./distributions/)
105
+ * References to start Llama Stack distributions backed with different API providers.
106
+ * [Developer Cookbook](./docs/developer_cookbook.md)
107
+ * References to guides to help you get started based on your developer needs.
108
+ * [Contributing](CONTRIBUTING.md)
109
+ * [Adding a new API Provider](./docs/new_api_provider.md) to walk-through how to add a new API provider.
110
+
111
+ ## Llama Stack Client SDK
112
+
113
+ | **Language** | **Client SDK** | **Package** |
114
+ | :----: | :----: | :----: |
115
+ | Python | [llama-stack-client-python](https://github.com/meta-llama/llama-stack-client-python) | [![PyPI version](https://img.shields.io/pypi/v/llama_stack_client.svg)](https://pypi.org/project/llama_stack_client/)
116
+ | Swift | [llama-stack-client-swift](https://github.com/meta-llama/llama-stack-client-swift) | [![Swift Package Index](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2Fmeta-llama%2Fllama-stack-client-swift%2Fbadge%3Ftype%3Dswift-versions)](https://swiftpackageindex.com/meta-llama/llama-stack-client-swift)
117
+ | Node | [llama-stack-client-node](https://github.com/meta-llama/llama-stack-client-node) | [![NPM version](https://img.shields.io/npm/v/llama-stack-client.svg)](https://npmjs.org/package/llama-stack-client)
118
+ | Kotlin | [llama-stack-client-kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) |
119
+
120
+ Check out our client SDKs for connecting to Llama Stack server in your preferred language, you can choose from [python](https://github.com/meta-llama/llama-stack-client-python), [node](https://github.com/meta-llama/llama-stack-client-node), [swift](https://github.com/meta-llama/llama-stack-client-swift), and [kotlin](https://github.com/meta-llama/llama-stack-client-kotlin) programming languages to quickly build your applications.
121
+
122
+ You can find more example scripts with client SDKs to talk with the Llama Stack server in our [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) repo.
@@ -0,0 +1,9 @@
1
+ name: bedrock
2
+ distribution_spec:
3
+ description: Use Amazon Bedrock APIs.
4
+ providers:
5
+ inference: remote::bedrock
6
+ memory: meta-reference
7
+ safety: meta-reference
8
+ agents: meta-reference
9
+ telemetry: meta-reference
@@ -0,0 +1,9 @@
1
+ name: databricks
2
+ distribution_spec:
3
+ description: Use Databricks for running LLM inference
4
+ providers:
5
+ inference: remote::databricks
6
+ memory: meta-reference
7
+ safety: meta-reference
8
+ agents: meta-reference
9
+ telemetry: meta-reference
@@ -0,0 +1,9 @@
1
+ name: fireworks
2
+ distribution_spec:
3
+ description: Use Fireworks.ai for running LLM inference
4
+ providers:
5
+ inference: remote::fireworks
6
+ memory: meta-reference
7
+ safety: meta-reference
8
+ agents: meta-reference
9
+ telemetry: meta-reference
@@ -0,0 +1,9 @@
1
+ name: hf-endpoint
2
+ distribution_spec:
3
+ description: "Like local, but use Hugging Face Inference Endpoints for running LLM inference.\nSee https://hf.co/docs/api-endpoints."
4
+ providers:
5
+ inference: remote::hf::endpoint
6
+ memory: meta-reference
7
+ safety: meta-reference
8
+ agents: meta-reference
9
+ telemetry: meta-reference
@@ -0,0 +1,9 @@
1
+ name: hf-serverless
2
+ distribution_spec:
3
+ description: "Like local, but use Hugging Face Inference API (serverless) for running LLM inference.\nSee https://hf.co/docs/api-inference."
4
+ providers:
5
+ inference: remote::hf::serverless
6
+ memory: meta-reference
7
+ safety: meta-reference
8
+ agents: meta-reference
9
+ telemetry: meta-reference
@@ -0,0 +1,13 @@
1
+ name: meta-reference-gpu
2
+ distribution_spec:
3
+ docker_image: pytorch/pytorch:2.5.0-cuda12.4-cudnn9-runtime
4
+ description: Use code from `llama_stack` itself to serve all llama stack APIs
5
+ providers:
6
+ inference: meta-reference
7
+ memory:
8
+ - meta-reference
9
+ - remote::chromadb
10
+ - remote::pgvector
11
+ safety: meta-reference
12
+ agents: meta-reference
13
+ telemetry: meta-reference
@@ -0,0 +1,13 @@
1
+ name: meta-reference-quantized-gpu
2
+ distribution_spec:
3
+ docker_image: pytorch/pytorch:2.5.0-cuda12.4-cudnn9-runtime
4
+ description: Use code from `llama_stack` itself to serve all llama stack APIs
5
+ providers:
6
+ inference: meta-reference-quantized
7
+ memory:
8
+ - meta-reference
9
+ - remote::chromadb
10
+ - remote::pgvector
11
+ safety: meta-reference
12
+ agents: meta-reference
13
+ telemetry: meta-reference
@@ -0,0 +1,12 @@
1
+ name: ollama
2
+ distribution_spec:
3
+ description: Use ollama for running LLM inference
4
+ providers:
5
+ inference: remote::ollama
6
+ memory:
7
+ - meta-reference
8
+ - remote::chromadb
9
+ - remote::pgvector
10
+ safety: meta-reference
11
+ agents: meta-reference
12
+ telemetry: meta-reference
@@ -0,0 +1,12 @@
1
+ name: tgi
2
+ distribution_spec:
3
+ description: Use TGI for running LLM inference
4
+ providers:
5
+ inference: remote::tgi
6
+ memory:
7
+ - meta-reference
8
+ - remote::chromadb
9
+ - remote::pgvector
10
+ safety: meta-reference
11
+ agents: meta-reference
12
+ telemetry: meta-reference
@@ -0,0 +1,9 @@
1
+ name: together
2
+ distribution_spec:
3
+ description: Use Together.ai for running LLM inference
4
+ providers:
5
+ inference: remote::together
6
+ memory: remote::weaviate
7
+ safety: remote::together
8
+ agents: meta-reference
9
+ telemetry: meta-reference
@@ -0,0 +1,9 @@
1
+ name: vllm
2
+ distribution_spec:
3
+ description: Like local, but use vLLM for running LLM inference
4
+ providers:
5
+ inference: vllm
6
+ memory: meta-reference
7
+ safety: meta-reference
8
+ agents: meta-reference
9
+ telemetry: meta-reference
@@ -0,0 +1,5 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
@@ -0,0 +1,5 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
@@ -0,0 +1,7 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the terms described in the LICENSE file in
5
+ # the root directory of this source tree.
6
+
7
+ from .agents import * # noqa: F401 F403