llama-stack 0.0.42__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack/__init__.py +5 -0
- llama_stack/apis/agents/__init__.py +1 -1
- llama_stack/apis/agents/agents.py +700 -281
- llama_stack/apis/agents/openai_responses.py +1311 -0
- llama_stack/{providers/adapters/memory/sample/config.py → apis/batches/__init__.py} +2 -5
- llama_stack/apis/batches/batches.py +100 -0
- llama_stack/apis/benchmarks/__init__.py +7 -0
- llama_stack/apis/benchmarks/benchmarks.py +108 -0
- llama_stack/apis/common/content_types.py +143 -0
- llama_stack/apis/common/errors.py +103 -0
- llama_stack/apis/common/job_types.py +38 -0
- llama_stack/apis/common/responses.py +36 -0
- llama_stack/apis/common/training_types.py +36 -5
- llama_stack/apis/common/type_system.py +158 -0
- llama_stack/apis/conversations/__init__.py +31 -0
- llama_stack/apis/conversations/conversations.py +286 -0
- llama_stack/apis/datasetio/__init__.py +7 -0
- llama_stack/apis/datasetio/datasetio.py +59 -0
- llama_stack/apis/datasets/__init__.py +7 -0
- llama_stack/apis/datasets/datasets.py +251 -0
- llama_stack/apis/datatypes.py +160 -0
- llama_stack/apis/eval/__init__.py +7 -0
- llama_stack/apis/eval/eval.py +169 -0
- llama_stack/apis/files/__init__.py +7 -0
- llama_stack/apis/files/files.py +199 -0
- llama_stack/apis/inference/__init__.py +1 -1
- llama_stack/apis/inference/inference.py +1169 -113
- llama_stack/apis/inspect/__init__.py +1 -1
- llama_stack/apis/inspect/inspect.py +69 -16
- llama_stack/apis/models/__init__.py +1 -1
- llama_stack/apis/models/models.py +148 -21
- llama_stack/apis/post_training/__init__.py +1 -1
- llama_stack/apis/post_training/post_training.py +265 -120
- llama_stack/{providers/adapters/agents/sample/config.py → apis/prompts/__init__.py} +2 -5
- llama_stack/apis/prompts/prompts.py +204 -0
- llama_stack/apis/providers/__init__.py +7 -0
- llama_stack/apis/providers/providers.py +69 -0
- llama_stack/apis/resource.py +37 -0
- llama_stack/apis/safety/__init__.py +1 -1
- llama_stack/apis/safety/safety.py +95 -12
- llama_stack/apis/scoring/__init__.py +7 -0
- llama_stack/apis/scoring/scoring.py +93 -0
- llama_stack/apis/scoring_functions/__init__.py +7 -0
- llama_stack/apis/scoring_functions/scoring_functions.py +208 -0
- llama_stack/apis/shields/__init__.py +1 -1
- llama_stack/apis/shields/shields.py +76 -33
- llama_stack/apis/synthetic_data_generation/__init__.py +1 -1
- llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +40 -17
- llama_stack/apis/telemetry/__init__.py +1 -1
- llama_stack/apis/telemetry/telemetry.py +322 -31
- llama_stack/apis/{dataset → tools}/__init__.py +2 -1
- llama_stack/apis/tools/rag_tool.py +218 -0
- llama_stack/apis/tools/tools.py +221 -0
- llama_stack/apis/vector_io/__init__.py +7 -0
- llama_stack/apis/vector_io/vector_io.py +960 -0
- llama_stack/apis/vector_stores/__init__.py +7 -0
- llama_stack/apis/vector_stores/vector_stores.py +51 -0
- llama_stack/apis/version.py +9 -0
- llama_stack/cli/llama.py +13 -5
- llama_stack/cli/stack/_list_deps.py +182 -0
- llama_stack/cli/stack/list_apis.py +1 -1
- llama_stack/cli/stack/list_deps.py +55 -0
- llama_stack/cli/stack/list_providers.py +24 -10
- llama_stack/cli/stack/list_stacks.py +56 -0
- llama_stack/cli/stack/remove.py +115 -0
- llama_stack/cli/stack/run.py +169 -56
- llama_stack/cli/stack/stack.py +18 -4
- llama_stack/cli/stack/utils.py +151 -0
- llama_stack/cli/table.py +23 -61
- llama_stack/cli/utils.py +29 -0
- llama_stack/core/access_control/access_control.py +131 -0
- llama_stack/core/access_control/conditions.py +129 -0
- llama_stack/core/access_control/datatypes.py +107 -0
- llama_stack/core/build.py +164 -0
- llama_stack/core/client.py +205 -0
- llama_stack/core/common.sh +37 -0
- llama_stack/{distribution → core}/configure.py +74 -55
- llama_stack/core/conversations/conversations.py +309 -0
- llama_stack/core/datatypes.py +625 -0
- llama_stack/core/distribution.py +276 -0
- llama_stack/core/external.py +54 -0
- llama_stack/core/id_generation.py +42 -0
- llama_stack/core/inspect.py +86 -0
- llama_stack/core/library_client.py +539 -0
- llama_stack/core/prompts/prompts.py +234 -0
- llama_stack/core/providers.py +137 -0
- llama_stack/core/request_headers.py +115 -0
- llama_stack/core/resolver.py +506 -0
- llama_stack/core/routers/__init__.py +101 -0
- llama_stack/core/routers/datasets.py +73 -0
- llama_stack/core/routers/eval_scoring.py +155 -0
- llama_stack/core/routers/inference.py +645 -0
- llama_stack/core/routers/safety.py +85 -0
- llama_stack/core/routers/tool_runtime.py +91 -0
- llama_stack/core/routers/vector_io.py +442 -0
- llama_stack/core/routing_tables/benchmarks.py +62 -0
- llama_stack/core/routing_tables/common.py +254 -0
- llama_stack/core/routing_tables/datasets.py +91 -0
- llama_stack/core/routing_tables/models.py +163 -0
- llama_stack/core/routing_tables/scoring_functions.py +66 -0
- llama_stack/core/routing_tables/shields.py +61 -0
- llama_stack/core/routing_tables/toolgroups.py +129 -0
- llama_stack/core/routing_tables/vector_stores.py +292 -0
- llama_stack/core/server/auth.py +187 -0
- llama_stack/core/server/auth_providers.py +494 -0
- llama_stack/core/server/quota.py +110 -0
- llama_stack/core/server/routes.py +141 -0
- llama_stack/core/server/server.py +542 -0
- llama_stack/core/server/tracing.py +80 -0
- llama_stack/core/stack.py +546 -0
- llama_stack/core/start_stack.sh +117 -0
- llama_stack/core/storage/datatypes.py +283 -0
- llama_stack/{cli/model → core/store}/__init__.py +1 -1
- llama_stack/core/store/registry.py +199 -0
- llama_stack/core/testing_context.py +49 -0
- llama_stack/core/ui/app.py +55 -0
- llama_stack/core/ui/modules/api.py +32 -0
- llama_stack/core/ui/modules/utils.py +42 -0
- llama_stack/core/ui/page/distribution/datasets.py +18 -0
- llama_stack/core/ui/page/distribution/eval_tasks.py +20 -0
- llama_stack/core/ui/page/distribution/models.py +18 -0
- llama_stack/core/ui/page/distribution/providers.py +27 -0
- llama_stack/core/ui/page/distribution/resources.py +48 -0
- llama_stack/core/ui/page/distribution/scoring_functions.py +18 -0
- llama_stack/core/ui/page/distribution/shields.py +19 -0
- llama_stack/core/ui/page/evaluations/app_eval.py +143 -0
- llama_stack/core/ui/page/evaluations/native_eval.py +253 -0
- llama_stack/core/ui/page/playground/chat.py +130 -0
- llama_stack/core/ui/page/playground/tools.py +352 -0
- llama_stack/core/utils/config.py +30 -0
- llama_stack/{distribution → core}/utils/config_dirs.py +3 -6
- llama_stack/core/utils/config_resolution.py +125 -0
- llama_stack/core/utils/context.py +84 -0
- llama_stack/core/utils/exec.py +96 -0
- llama_stack/{providers/impls/meta_reference/codeshield/config.py → core/utils/image_types.py} +4 -3
- llama_stack/{distribution → core}/utils/model_utils.py +2 -2
- llama_stack/{distribution → core}/utils/prompt_for_config.py +30 -63
- llama_stack/{apis/batch_inference → distributions/dell}/__init__.py +1 -1
- llama_stack/distributions/dell/build.yaml +33 -0
- llama_stack/distributions/dell/dell.py +158 -0
- llama_stack/distributions/dell/run-with-safety.yaml +141 -0
- llama_stack/distributions/dell/run.yaml +132 -0
- llama_stack/distributions/meta-reference-gpu/__init__.py +7 -0
- llama_stack/distributions/meta-reference-gpu/build.yaml +32 -0
- llama_stack/distributions/meta-reference-gpu/meta_reference.py +163 -0
- llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +154 -0
- llama_stack/distributions/meta-reference-gpu/run.yaml +139 -0
- llama_stack/{apis/evals → distributions/nvidia}/__init__.py +1 -1
- llama_stack/distributions/nvidia/build.yaml +29 -0
- llama_stack/distributions/nvidia/nvidia.py +154 -0
- llama_stack/distributions/nvidia/run-with-safety.yaml +137 -0
- llama_stack/distributions/nvidia/run.yaml +116 -0
- llama_stack/distributions/open-benchmark/__init__.py +7 -0
- llama_stack/distributions/open-benchmark/build.yaml +36 -0
- llama_stack/distributions/open-benchmark/open_benchmark.py +303 -0
- llama_stack/distributions/open-benchmark/run.yaml +252 -0
- llama_stack/distributions/postgres-demo/__init__.py +7 -0
- llama_stack/distributions/postgres-demo/build.yaml +23 -0
- llama_stack/distributions/postgres-demo/postgres_demo.py +125 -0
- llama_stack/distributions/postgres-demo/run.yaml +115 -0
- llama_stack/{apis/memory → distributions/starter}/__init__.py +1 -1
- llama_stack/distributions/starter/build.yaml +61 -0
- llama_stack/distributions/starter/run-with-postgres-store.yaml +285 -0
- llama_stack/distributions/starter/run.yaml +276 -0
- llama_stack/distributions/starter/starter.py +345 -0
- llama_stack/distributions/starter-gpu/__init__.py +7 -0
- llama_stack/distributions/starter-gpu/build.yaml +61 -0
- llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +288 -0
- llama_stack/distributions/starter-gpu/run.yaml +279 -0
- llama_stack/distributions/starter-gpu/starter_gpu.py +20 -0
- llama_stack/distributions/template.py +456 -0
- llama_stack/distributions/watsonx/__init__.py +7 -0
- llama_stack/distributions/watsonx/build.yaml +33 -0
- llama_stack/distributions/watsonx/run.yaml +133 -0
- llama_stack/distributions/watsonx/watsonx.py +95 -0
- llama_stack/env.py +24 -0
- llama_stack/log.py +314 -0
- llama_stack/models/llama/checkpoint.py +164 -0
- llama_stack/models/llama/datatypes.py +164 -0
- llama_stack/models/llama/hadamard_utils.py +86 -0
- llama_stack/models/llama/llama3/args.py +74 -0
- llama_stack/models/llama/llama3/chat_format.py +286 -0
- llama_stack/models/llama/llama3/generation.py +376 -0
- llama_stack/models/llama/llama3/interface.py +255 -0
- llama_stack/models/llama/llama3/model.py +304 -0
- llama_stack/models/llama/llama3/multimodal/__init__.py +12 -0
- llama_stack/models/llama/llama3/multimodal/encoder_utils.py +180 -0
- llama_stack/models/llama/llama3/multimodal/image_transform.py +409 -0
- llama_stack/models/llama/llama3/multimodal/model.py +1430 -0
- llama_stack/models/llama/llama3/multimodal/utils.py +26 -0
- llama_stack/models/llama/llama3/prompt_templates/__init__.py +22 -0
- llama_stack/models/llama/llama3/prompt_templates/base.py +39 -0
- llama_stack/models/llama/llama3/prompt_templates/system_prompts.py +319 -0
- llama_stack/models/llama/llama3/prompt_templates/tool_response.py +62 -0
- llama_stack/models/llama/llama3/quantization/loader.py +316 -0
- llama_stack/models/llama/llama3/template_data.py +116 -0
- llama_stack/models/llama/llama3/tokenizer.model +128000 -0
- llama_stack/models/llama/llama3/tokenizer.py +198 -0
- llama_stack/models/llama/llama3/tool_utils.py +266 -0
- llama_stack/models/llama/llama3_1/__init__.py +12 -0
- llama_stack/models/llama/llama3_1/prompt_format.md +358 -0
- llama_stack/models/llama/llama3_1/prompts.py +258 -0
- llama_stack/models/llama/llama3_2/prompts_text.py +229 -0
- llama_stack/models/llama/llama3_2/prompts_vision.py +126 -0
- llama_stack/models/llama/llama3_2/text_prompt_format.md +286 -0
- llama_stack/models/llama/llama3_2/vision_prompt_format.md +141 -0
- llama_stack/models/llama/llama3_3/prompts.py +259 -0
- llama_stack/models/llama/llama4/args.py +107 -0
- llama_stack/models/llama/llama4/chat_format.py +317 -0
- llama_stack/models/llama/llama4/datatypes.py +56 -0
- llama_stack/models/llama/llama4/ffn.py +58 -0
- llama_stack/models/llama/llama4/generation.py +313 -0
- llama_stack/models/llama/llama4/model.py +437 -0
- llama_stack/models/llama/llama4/moe.py +214 -0
- llama_stack/models/llama/llama4/preprocess.py +435 -0
- llama_stack/models/llama/llama4/prompt_format.md +304 -0
- llama_stack/models/llama/llama4/prompt_templates/system_prompts.py +136 -0
- llama_stack/models/llama/llama4/prompts.py +279 -0
- llama_stack/models/llama/llama4/quantization/__init__.py +5 -0
- llama_stack/models/llama/llama4/quantization/loader.py +226 -0
- llama_stack/models/llama/llama4/tokenizer.model +200000 -0
- llama_stack/models/llama/llama4/tokenizer.py +263 -0
- llama_stack/models/llama/llama4/vision/__init__.py +5 -0
- llama_stack/models/llama/llama4/vision/embedding.py +210 -0
- llama_stack/models/llama/llama4/vision/encoder.py +412 -0
- llama_stack/models/llama/prompt_format.py +191 -0
- llama_stack/models/llama/quantize_impls.py +316 -0
- llama_stack/models/llama/sku_list.py +1029 -0
- llama_stack/models/llama/sku_types.py +233 -0
- llama_stack/models/llama/tokenizer_utils.py +40 -0
- llama_stack/providers/datatypes.py +136 -107
- llama_stack/providers/inline/__init__.py +5 -0
- llama_stack/providers/inline/agents/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/agents/meta_reference}/__init__.py +12 -5
- llama_stack/providers/inline/agents/meta_reference/agent_instance.py +1024 -0
- llama_stack/providers/inline/agents/meta_reference/agents.py +383 -0
- llama_stack/providers/inline/agents/meta_reference/config.py +37 -0
- llama_stack/providers/inline/agents/meta_reference/persistence.py +228 -0
- llama_stack/providers/inline/agents/meta_reference/responses/__init__.py +5 -0
- llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +423 -0
- llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +1226 -0
- llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +449 -0
- llama_stack/providers/inline/agents/meta_reference/responses/types.py +194 -0
- llama_stack/providers/inline/agents/meta_reference/responses/utils.py +365 -0
- llama_stack/providers/inline/agents/meta_reference/safety.py +52 -0
- llama_stack/providers/inline/batches/__init__.py +5 -0
- llama_stack/providers/inline/batches/reference/__init__.py +36 -0
- llama_stack/providers/inline/batches/reference/batches.py +679 -0
- llama_stack/providers/inline/batches/reference/config.py +40 -0
- llama_stack/providers/inline/datasetio/__init__.py +5 -0
- llama_stack/providers/inline/datasetio/localfs/__init__.py +20 -0
- llama_stack/providers/inline/datasetio/localfs/config.py +23 -0
- llama_stack/providers/inline/datasetio/localfs/datasetio.py +113 -0
- llama_stack/providers/inline/eval/__init__.py +5 -0
- llama_stack/providers/inline/eval/meta_reference/__init__.py +28 -0
- llama_stack/providers/inline/eval/meta_reference/config.py +23 -0
- llama_stack/providers/inline/eval/meta_reference/eval.py +259 -0
- llama_stack/providers/inline/files/localfs/__init__.py +20 -0
- llama_stack/providers/inline/files/localfs/config.py +31 -0
- llama_stack/providers/inline/files/localfs/files.py +219 -0
- llama_stack/providers/inline/inference/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/__init__.py +4 -4
- llama_stack/providers/inline/inference/meta_reference/common.py +24 -0
- llama_stack/providers/inline/inference/meta_reference/config.py +68 -0
- llama_stack/providers/inline/inference/meta_reference/generators.py +211 -0
- llama_stack/providers/inline/inference/meta_reference/inference.py +158 -0
- llama_stack/providers/inline/inference/meta_reference/model_parallel.py +96 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/parallel_utils.py +56 -73
- llama_stack/providers/inline/inference/sentence_transformers/__init__.py +22 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/inference/sentence_transformers}/config.py +6 -4
- llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +83 -0
- llama_stack/providers/inline/post_training/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/utils.py +35 -0
- llama_stack/providers/inline/post_training/common/validator.py +36 -0
- llama_stack/providers/inline/post_training/huggingface/__init__.py +27 -0
- llama_stack/providers/inline/post_training/huggingface/config.py +83 -0
- llama_stack/providers/inline/post_training/huggingface/post_training.py +208 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +519 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +485 -0
- llama_stack/providers/inline/post_training/huggingface/utils.py +269 -0
- llama_stack/providers/inline/post_training/torchtune/__init__.py +27 -0
- llama_stack/providers/inline/post_training/torchtune/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +240 -0
- llama_stack/providers/inline/post_training/torchtune/common/utils.py +99 -0
- llama_stack/providers/inline/post_training/torchtune/config.py +20 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +57 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/sft.py +78 -0
- llama_stack/providers/inline/post_training/torchtune/post_training.py +178 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +588 -0
- llama_stack/providers/inline/safety/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/codeshield → inline/safety/code_scanner}/__init__.py +4 -2
- llama_stack/providers/inline/safety/code_scanner/code_scanner.py +128 -0
- llama_stack/providers/{impls/meta_reference/memory → inline/safety/code_scanner}/config.py +5 -3
- llama_stack/providers/inline/safety/llama_guard/__init__.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/config.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/llama_guard.py +489 -0
- llama_stack/providers/{adapters/memory/sample → inline/safety/prompt_guard}/__init__.py +4 -4
- llama_stack/providers/inline/safety/prompt_guard/config.py +32 -0
- llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +131 -0
- llama_stack/providers/inline/scoring/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/__init__.py +25 -0
- llama_stack/providers/{adapters/memory/weaviate → inline/scoring/basic}/config.py +5 -7
- llama_stack/providers/inline/scoring/basic/scoring.py +126 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +240 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +41 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/docvqa.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/equality.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/ifeval.py +23 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_math_response.py +27 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py +71 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/subset_of.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +80 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +66 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +58 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +38 -0
- llama_stack/providers/inline/scoring/basic/utils/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py +3319 -0
- llama_stack/providers/inline/scoring/basic/utils/math_utils.py +330 -0
- llama_stack/providers/inline/scoring/braintrust/__init__.py +27 -0
- llama_stack/providers/inline/scoring/braintrust/braintrust.py +230 -0
- llama_stack/providers/inline/scoring/braintrust/config.py +21 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_correctness.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_relevancy.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_similarity.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_entity_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_precision.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_relevancy.py +23 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/factuality.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/faithfulness.py +24 -0
- llama_stack/providers/inline/scoring/llm_as_judge/__init__.py +21 -0
- llama_stack/providers/inline/scoring/llm_as_judge/config.py +14 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +113 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +96 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +20 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +81 -0
- llama_stack/providers/inline/telemetry/__init__.py +5 -0
- llama_stack/providers/inline/telemetry/meta_reference/__init__.py +21 -0
- llama_stack/providers/inline/telemetry/meta_reference/config.py +47 -0
- llama_stack/providers/inline/telemetry/meta_reference/telemetry.py +252 -0
- llama_stack/providers/inline/tool_runtime/__init__.py +5 -0
- llama_stack/providers/inline/tool_runtime/rag/__init__.py +19 -0
- llama_stack/providers/{impls/meta_reference/telemetry → inline/tool_runtime/rag}/config.py +5 -3
- llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +77 -0
- llama_stack/providers/inline/tool_runtime/rag/memory.py +332 -0
- llama_stack/providers/inline/vector_io/__init__.py +5 -0
- llama_stack/providers/inline/vector_io/chroma/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/chroma/config.py +30 -0
- llama_stack/providers/inline/vector_io/faiss/__init__.py +21 -0
- llama_stack/providers/inline/vector_io/faiss/config.py +26 -0
- llama_stack/providers/inline/vector_io/faiss/faiss.py +293 -0
- llama_stack/providers/inline/vector_io/milvus/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/milvus/config.py +29 -0
- llama_stack/providers/inline/vector_io/qdrant/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/qdrant/config.py +29 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/config.py +26 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +483 -0
- llama_stack/providers/registry/agents.py +16 -18
- llama_stack/providers/registry/batches.py +26 -0
- llama_stack/providers/registry/datasetio.py +49 -0
- llama_stack/providers/registry/eval.py +46 -0
- llama_stack/providers/registry/files.py +31 -0
- llama_stack/providers/registry/inference.py +273 -118
- llama_stack/providers/registry/post_training.py +69 -0
- llama_stack/providers/registry/safety.py +46 -41
- llama_stack/providers/registry/scoring.py +51 -0
- llama_stack/providers/registry/tool_runtime.py +87 -0
- llama_stack/providers/registry/vector_io.py +828 -0
- llama_stack/providers/remote/__init__.py +5 -0
- llama_stack/providers/remote/agents/__init__.py +5 -0
- llama_stack/providers/remote/datasetio/__init__.py +5 -0
- llama_stack/providers/{adapters/memory/chroma → remote/datasetio/huggingface}/__init__.py +7 -4
- llama_stack/providers/remote/datasetio/huggingface/config.py +23 -0
- llama_stack/providers/remote/datasetio/huggingface/huggingface.py +99 -0
- llama_stack/providers/remote/datasetio/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/datasetio/nvidia/config.py +61 -0
- llama_stack/providers/remote/datasetio/nvidia/datasetio.py +116 -0
- llama_stack/providers/remote/eval/__init__.py +5 -0
- llama_stack/providers/remote/eval/nvidia/__init__.py +31 -0
- llama_stack/providers/remote/eval/nvidia/config.py +29 -0
- llama_stack/providers/remote/eval/nvidia/eval.py +162 -0
- llama_stack/providers/remote/files/s3/__init__.py +19 -0
- llama_stack/providers/remote/files/s3/config.py +42 -0
- llama_stack/providers/remote/files/s3/files.py +313 -0
- llama_stack/providers/remote/inference/__init__.py +5 -0
- llama_stack/providers/{adapters/safety/sample → remote/inference/anthropic}/__init__.py +4 -6
- llama_stack/providers/remote/inference/anthropic/anthropic.py +36 -0
- llama_stack/providers/remote/inference/anthropic/config.py +28 -0
- llama_stack/providers/{impls/meta_reference/telemetry → remote/inference/azure}/__init__.py +4 -4
- llama_stack/providers/remote/inference/azure/azure.py +25 -0
- llama_stack/providers/remote/inference/azure/config.py +61 -0
- llama_stack/providers/{adapters → remote}/inference/bedrock/__init__.py +18 -17
- llama_stack/providers/remote/inference/bedrock/bedrock.py +142 -0
- llama_stack/providers/{adapters/inference/sample → remote/inference/bedrock}/config.py +3 -4
- llama_stack/providers/remote/inference/bedrock/models.py +29 -0
- llama_stack/providers/remote/inference/cerebras/__init__.py +19 -0
- llama_stack/providers/remote/inference/cerebras/cerebras.py +28 -0
- llama_stack/providers/remote/inference/cerebras/config.py +30 -0
- llama_stack/providers/{adapters → remote}/inference/databricks/__init__.py +4 -5
- llama_stack/providers/remote/inference/databricks/config.py +37 -0
- llama_stack/providers/remote/inference/databricks/databricks.py +44 -0
- llama_stack/providers/{adapters → remote}/inference/fireworks/__init__.py +8 -4
- llama_stack/providers/remote/inference/fireworks/config.py +27 -0
- llama_stack/providers/remote/inference/fireworks/fireworks.py +27 -0
- llama_stack/providers/{adapters/memory/pgvector → remote/inference/gemini}/__init__.py +4 -4
- llama_stack/providers/remote/inference/gemini/config.py +28 -0
- llama_stack/providers/remote/inference/gemini/gemini.py +82 -0
- llama_stack/providers/remote/inference/groq/__init__.py +15 -0
- llama_stack/providers/remote/inference/groq/config.py +34 -0
- llama_stack/providers/remote/inference/groq/groq.py +18 -0
- llama_stack/providers/remote/inference/llama_openai_compat/__init__.py +15 -0
- llama_stack/providers/remote/inference/llama_openai_compat/config.py +34 -0
- llama_stack/providers/remote/inference/llama_openai_compat/llama.py +46 -0
- llama_stack/providers/remote/inference/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/inference/nvidia/config.py +64 -0
- llama_stack/providers/remote/inference/nvidia/nvidia.py +61 -0
- llama_stack/providers/{adapters/safety/sample/config.py → remote/inference/nvidia/utils.py} +3 -4
- llama_stack/providers/{impls/vllm → remote/inference/ollama}/__init__.py +4 -6
- llama_stack/providers/remote/inference/ollama/config.py +25 -0
- llama_stack/providers/remote/inference/ollama/ollama.py +102 -0
- llama_stack/providers/{adapters/telemetry/opentelemetry → remote/inference/openai}/__init__.py +4 -4
- llama_stack/providers/remote/inference/openai/config.py +39 -0
- llama_stack/providers/remote/inference/openai/openai.py +38 -0
- llama_stack/providers/remote/inference/passthrough/__init__.py +23 -0
- llama_stack/providers/remote/inference/passthrough/config.py +34 -0
- llama_stack/providers/remote/inference/passthrough/passthrough.py +122 -0
- llama_stack/providers/remote/inference/runpod/__init__.py +16 -0
- llama_stack/providers/remote/inference/runpod/config.py +32 -0
- llama_stack/providers/remote/inference/runpod/runpod.py +42 -0
- llama_stack/providers/remote/inference/sambanova/__init__.py +16 -0
- llama_stack/providers/remote/inference/sambanova/config.py +34 -0
- llama_stack/providers/remote/inference/sambanova/sambanova.py +28 -0
- llama_stack/providers/{adapters → remote}/inference/tgi/__init__.py +3 -4
- llama_stack/providers/remote/inference/tgi/config.py +76 -0
- llama_stack/providers/remote/inference/tgi/tgi.py +85 -0
- llama_stack/providers/{adapters → remote}/inference/together/__init__.py +8 -4
- llama_stack/providers/remote/inference/together/config.py +27 -0
- llama_stack/providers/remote/inference/together/together.py +102 -0
- llama_stack/providers/remote/inference/vertexai/__init__.py +15 -0
- llama_stack/providers/remote/inference/vertexai/config.py +48 -0
- llama_stack/providers/remote/inference/vertexai/vertexai.py +54 -0
- llama_stack/providers/remote/inference/vllm/__init__.py +22 -0
- llama_stack/providers/remote/inference/vllm/config.py +59 -0
- llama_stack/providers/remote/inference/vllm/vllm.py +111 -0
- llama_stack/providers/remote/inference/watsonx/__init__.py +15 -0
- llama_stack/providers/remote/inference/watsonx/config.py +45 -0
- llama_stack/providers/remote/inference/watsonx/watsonx.py +336 -0
- llama_stack/providers/remote/post_training/__init__.py +5 -0
- llama_stack/providers/remote/post_training/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/post_training/nvidia/config.py +113 -0
- llama_stack/providers/remote/post_training/nvidia/models.py +27 -0
- llama_stack/providers/remote/post_training/nvidia/post_training.py +430 -0
- llama_stack/providers/remote/post_training/nvidia/utils.py +63 -0
- llama_stack/providers/remote/safety/__init__.py +5 -0
- llama_stack/providers/remote/safety/bedrock/bedrock.py +111 -0
- llama_stack/providers/remote/safety/bedrock/config.py +14 -0
- llama_stack/providers/{adapters/inference/sample → remote/safety/nvidia}/__init__.py +5 -4
- llama_stack/providers/remote/safety/nvidia/config.py +40 -0
- llama_stack/providers/remote/safety/nvidia/nvidia.py +161 -0
- llama_stack/providers/{adapters/agents/sample → remote/safety/sambanova}/__init__.py +5 -4
- llama_stack/providers/remote/safety/sambanova/config.py +37 -0
- llama_stack/providers/remote/safety/sambanova/sambanova.py +98 -0
- llama_stack/providers/remote/tool_runtime/__init__.py +5 -0
- llama_stack/providers/remote/tool_runtime/bing_search/__init__.py +21 -0
- llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +112 -0
- llama_stack/providers/remote/tool_runtime/bing_search/config.py +22 -0
- llama_stack/providers/remote/tool_runtime/brave_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +148 -0
- llama_stack/providers/remote/tool_runtime/brave_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py +15 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py +20 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +73 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +84 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/__init__.py +22 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py +21 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +140 -0
- llama_stack/providers/remote/vector_io/__init__.py +5 -0
- llama_stack/providers/remote/vector_io/chroma/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/chroma/chroma.py +215 -0
- llama_stack/providers/remote/vector_io/chroma/config.py +28 -0
- llama_stack/providers/remote/vector_io/milvus/__init__.py +18 -0
- llama_stack/providers/remote/vector_io/milvus/config.py +35 -0
- llama_stack/providers/remote/vector_io/milvus/milvus.py +375 -0
- llama_stack/providers/remote/vector_io/pgvector/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/pgvector/config.py +47 -0
- llama_stack/providers/remote/vector_io/pgvector/pgvector.py +460 -0
- llama_stack/providers/remote/vector_io/qdrant/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/qdrant/config.py +37 -0
- llama_stack/providers/remote/vector_io/qdrant/qdrant.py +265 -0
- llama_stack/providers/remote/vector_io/weaviate/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/weaviate/config.py +32 -0
- llama_stack/providers/remote/vector_io/weaviate/weaviate.py +393 -0
- llama_stack/providers/utils/bedrock/__init__.py +5 -0
- llama_stack/providers/utils/bedrock/client.py +74 -0
- llama_stack/providers/utils/bedrock/config.py +64 -0
- llama_stack/providers/utils/bedrock/refreshable_boto_session.py +112 -0
- llama_stack/providers/utils/common/__init__.py +5 -0
- llama_stack/providers/utils/common/data_schema_validator.py +103 -0
- llama_stack/providers/utils/datasetio/__init__.py +5 -0
- llama_stack/providers/utils/datasetio/url_utils.py +47 -0
- llama_stack/providers/utils/files/__init__.py +5 -0
- llama_stack/providers/utils/files/form_data.py +69 -0
- llama_stack/providers/utils/inference/__init__.py +8 -7
- llama_stack/providers/utils/inference/embedding_mixin.py +101 -0
- llama_stack/providers/utils/inference/inference_store.py +264 -0
- llama_stack/providers/utils/inference/litellm_openai_mixin.py +336 -0
- llama_stack/providers/utils/inference/model_registry.py +173 -23
- llama_stack/providers/utils/inference/openai_compat.py +1261 -49
- llama_stack/providers/utils/inference/openai_mixin.py +506 -0
- llama_stack/providers/utils/inference/prompt_adapter.py +365 -67
- llama_stack/providers/utils/kvstore/api.py +6 -6
- llama_stack/providers/utils/kvstore/config.py +28 -48
- llama_stack/providers/utils/kvstore/kvstore.py +61 -15
- llama_stack/providers/utils/kvstore/mongodb/__init__.py +9 -0
- llama_stack/providers/utils/kvstore/mongodb/mongodb.py +82 -0
- llama_stack/providers/utils/kvstore/postgres/__init__.py +7 -0
- llama_stack/providers/utils/kvstore/postgres/postgres.py +114 -0
- llama_stack/providers/utils/kvstore/redis/redis.py +33 -9
- llama_stack/providers/utils/kvstore/sqlite/config.py +2 -1
- llama_stack/providers/utils/kvstore/sqlite/sqlite.py +123 -22
- llama_stack/providers/utils/memory/file_utils.py +1 -1
- llama_stack/providers/utils/memory/openai_vector_store_mixin.py +1304 -0
- llama_stack/providers/utils/memory/vector_store.py +220 -82
- llama_stack/providers/utils/pagination.py +43 -0
- llama_stack/providers/utils/responses/__init__.py +5 -0
- llama_stack/providers/utils/responses/responses_store.py +292 -0
- llama_stack/providers/utils/scheduler.py +270 -0
- llama_stack/providers/utils/scoring/__init__.py +5 -0
- llama_stack/providers/utils/scoring/aggregation_utils.py +75 -0
- llama_stack/providers/utils/scoring/base_scoring_fn.py +114 -0
- llama_stack/providers/utils/scoring/basic_scoring_utils.py +26 -0
- llama_stack/providers/utils/sqlstore/__init__.py +5 -0
- llama_stack/providers/utils/sqlstore/api.py +128 -0
- llama_stack/providers/utils/sqlstore/authorized_sqlstore.py +319 -0
- llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py +343 -0
- llama_stack/providers/utils/sqlstore/sqlstore.py +70 -0
- llama_stack/providers/utils/telemetry/trace_protocol.py +142 -0
- llama_stack/providers/utils/telemetry/tracing.py +192 -53
- llama_stack/providers/utils/tools/__init__.py +5 -0
- llama_stack/providers/utils/tools/mcp.py +148 -0
- llama_stack/providers/utils/tools/ttl_dict.py +70 -0
- llama_stack/providers/utils/vector_io/__init__.py +5 -0
- llama_stack/providers/utils/vector_io/vector_utils.py +156 -0
- llama_stack/schema_utils.py +118 -0
- llama_stack/strong_typing/__init__.py +19 -0
- llama_stack/strong_typing/auxiliary.py +228 -0
- llama_stack/strong_typing/classdef.py +440 -0
- llama_stack/strong_typing/core.py +46 -0
- llama_stack/strong_typing/deserializer.py +877 -0
- llama_stack/strong_typing/docstring.py +409 -0
- llama_stack/strong_typing/exception.py +23 -0
- llama_stack/strong_typing/inspection.py +1085 -0
- llama_stack/strong_typing/mapping.py +40 -0
- llama_stack/strong_typing/name.py +182 -0
- llama_stack/strong_typing/py.typed +0 -0
- llama_stack/strong_typing/schema.py +792 -0
- llama_stack/strong_typing/serialization.py +97 -0
- llama_stack/strong_typing/serializer.py +500 -0
- llama_stack/strong_typing/slots.py +27 -0
- llama_stack/strong_typing/topological.py +89 -0
- llama_stack/testing/__init__.py +5 -0
- llama_stack/testing/api_recorder.py +956 -0
- llama_stack/ui/node_modules/flatted/python/flatted.py +149 -0
- llama_stack-0.3.4.dist-info/METADATA +261 -0
- llama_stack-0.3.4.dist-info/RECORD +625 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/WHEEL +1 -1
- llama_stack/apis/agents/client.py +0 -292
- llama_stack/apis/agents/event_logger.py +0 -184
- llama_stack/apis/batch_inference/batch_inference.py +0 -72
- llama_stack/apis/common/deployment_types.py +0 -31
- llama_stack/apis/dataset/dataset.py +0 -63
- llama_stack/apis/evals/evals.py +0 -122
- llama_stack/apis/inference/client.py +0 -197
- llama_stack/apis/inspect/client.py +0 -82
- llama_stack/apis/memory/client.py +0 -155
- llama_stack/apis/memory/memory.py +0 -65
- llama_stack/apis/memory_banks/__init__.py +0 -7
- llama_stack/apis/memory_banks/client.py +0 -101
- llama_stack/apis/memory_banks/memory_banks.py +0 -78
- llama_stack/apis/models/client.py +0 -83
- llama_stack/apis/reward_scoring/__init__.py +0 -7
- llama_stack/apis/reward_scoring/reward_scoring.py +0 -55
- llama_stack/apis/safety/client.py +0 -105
- llama_stack/apis/shields/client.py +0 -79
- llama_stack/cli/download.py +0 -340
- llama_stack/cli/model/describe.py +0 -82
- llama_stack/cli/model/download.py +0 -24
- llama_stack/cli/model/list.py +0 -62
- llama_stack/cli/model/model.py +0 -34
- llama_stack/cli/model/prompt_format.py +0 -112
- llama_stack/cli/model/safety_models.py +0 -52
- llama_stack/cli/stack/build.py +0 -299
- llama_stack/cli/stack/configure.py +0 -178
- llama_stack/distribution/build.py +0 -123
- llama_stack/distribution/build_conda_env.sh +0 -136
- llama_stack/distribution/build_container.sh +0 -142
- llama_stack/distribution/common.sh +0 -40
- llama_stack/distribution/configure_container.sh +0 -47
- llama_stack/distribution/datatypes.py +0 -139
- llama_stack/distribution/distribution.py +0 -58
- llama_stack/distribution/inspect.py +0 -67
- llama_stack/distribution/request_headers.py +0 -57
- llama_stack/distribution/resolver.py +0 -323
- llama_stack/distribution/routers/__init__.py +0 -48
- llama_stack/distribution/routers/routers.py +0 -158
- llama_stack/distribution/routers/routing_tables.py +0 -173
- llama_stack/distribution/server/endpoints.py +0 -48
- llama_stack/distribution/server/server.py +0 -343
- llama_stack/distribution/start_conda_env.sh +0 -42
- llama_stack/distribution/start_container.sh +0 -64
- llama_stack/distribution/templates/local-bedrock-conda-example-build.yaml +0 -10
- llama_stack/distribution/templates/local-build.yaml +0 -10
- llama_stack/distribution/templates/local-databricks-build.yaml +0 -10
- llama_stack/distribution/templates/local-fireworks-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-endpoint-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-serverless-build.yaml +0 -10
- llama_stack/distribution/templates/local-ollama-build.yaml +0 -10
- llama_stack/distribution/templates/local-tgi-build.yaml +0 -10
- llama_stack/distribution/templates/local-together-build.yaml +0 -10
- llama_stack/distribution/templates/local-vllm-build.yaml +0 -10
- llama_stack/distribution/utils/exec.py +0 -105
- llama_stack/providers/adapters/agents/sample/sample.py +0 -18
- llama_stack/providers/adapters/inference/bedrock/bedrock.py +0 -451
- llama_stack/providers/adapters/inference/bedrock/config.py +0 -55
- llama_stack/providers/adapters/inference/databricks/config.py +0 -21
- llama_stack/providers/adapters/inference/databricks/databricks.py +0 -125
- llama_stack/providers/adapters/inference/fireworks/config.py +0 -20
- llama_stack/providers/adapters/inference/fireworks/fireworks.py +0 -130
- llama_stack/providers/adapters/inference/ollama/__init__.py +0 -19
- llama_stack/providers/adapters/inference/ollama/ollama.py +0 -175
- llama_stack/providers/adapters/inference/sample/sample.py +0 -23
- llama_stack/providers/adapters/inference/tgi/config.py +0 -43
- llama_stack/providers/adapters/inference/tgi/tgi.py +0 -200
- llama_stack/providers/adapters/inference/together/config.py +0 -22
- llama_stack/providers/adapters/inference/together/together.py +0 -143
- llama_stack/providers/adapters/memory/chroma/chroma.py +0 -157
- llama_stack/providers/adapters/memory/pgvector/config.py +0 -17
- llama_stack/providers/adapters/memory/pgvector/pgvector.py +0 -211
- llama_stack/providers/adapters/memory/sample/sample.py +0 -23
- llama_stack/providers/adapters/memory/weaviate/__init__.py +0 -15
- llama_stack/providers/adapters/memory/weaviate/weaviate.py +0 -190
- llama_stack/providers/adapters/safety/bedrock/bedrock.py +0 -113
- llama_stack/providers/adapters/safety/bedrock/config.py +0 -16
- llama_stack/providers/adapters/safety/sample/sample.py +0 -23
- llama_stack/providers/adapters/safety/together/__init__.py +0 -18
- llama_stack/providers/adapters/safety/together/config.py +0 -26
- llama_stack/providers/adapters/safety/together/together.py +0 -101
- llama_stack/providers/adapters/telemetry/opentelemetry/config.py +0 -12
- llama_stack/providers/adapters/telemetry/opentelemetry/opentelemetry.py +0 -201
- llama_stack/providers/adapters/telemetry/sample/__init__.py +0 -17
- llama_stack/providers/adapters/telemetry/sample/config.py +0 -12
- llama_stack/providers/adapters/telemetry/sample/sample.py +0 -18
- llama_stack/providers/impls/meta_reference/agents/agent_instance.py +0 -844
- llama_stack/providers/impls/meta_reference/agents/agents.py +0 -161
- llama_stack/providers/impls/meta_reference/agents/persistence.py +0 -84
- llama_stack/providers/impls/meta_reference/agents/rag/context_retriever.py +0 -74
- llama_stack/providers/impls/meta_reference/agents/safety.py +0 -57
- llama_stack/providers/impls/meta_reference/agents/tests/code_execution.py +0 -93
- llama_stack/providers/impls/meta_reference/agents/tests/test_chat_agent.py +0 -305
- llama_stack/providers/impls/meta_reference/agents/tools/base.py +0 -20
- llama_stack/providers/impls/meta_reference/agents/tools/builtin.py +0 -375
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_env_prefix.py +0 -133
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_execution.py +0 -256
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/matplotlib_custom_backend.py +0 -87
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/utils.py +0 -21
- llama_stack/providers/impls/meta_reference/agents/tools/safety.py +0 -43
- llama_stack/providers/impls/meta_reference/codeshield/code_scanner.py +0 -58
- llama_stack/providers/impls/meta_reference/inference/config.py +0 -45
- llama_stack/providers/impls/meta_reference/inference/generation.py +0 -376
- llama_stack/providers/impls/meta_reference/inference/inference.py +0 -280
- llama_stack/providers/impls/meta_reference/inference/model_parallel.py +0 -99
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_impls.py +0 -184
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_txest_disabled.py +0 -76
- llama_stack/providers/impls/meta_reference/inference/quantization/loader.py +0 -97
- llama_stack/providers/impls/meta_reference/inference/quantization/scripts/quantize_checkpoint.py +0 -161
- llama_stack/providers/impls/meta_reference/memory/__init__.py +0 -19
- llama_stack/providers/impls/meta_reference/memory/faiss.py +0 -113
- llama_stack/providers/impls/meta_reference/safety/__init__.py +0 -17
- llama_stack/providers/impls/meta_reference/safety/base.py +0 -57
- llama_stack/providers/impls/meta_reference/safety/config.py +0 -48
- llama_stack/providers/impls/meta_reference/safety/llama_guard.py +0 -268
- llama_stack/providers/impls/meta_reference/safety/prompt_guard.py +0 -145
- llama_stack/providers/impls/meta_reference/safety/safety.py +0 -112
- llama_stack/providers/impls/meta_reference/telemetry/console.py +0 -89
- llama_stack/providers/impls/vllm/config.py +0 -35
- llama_stack/providers/impls/vllm/vllm.py +0 -241
- llama_stack/providers/registry/memory.py +0 -78
- llama_stack/providers/registry/telemetry.py +0 -44
- llama_stack/providers/tests/agents/test_agents.py +0 -210
- llama_stack/providers/tests/inference/test_inference.py +0 -257
- llama_stack/providers/tests/inference/test_prompt_adapter.py +0 -126
- llama_stack/providers/tests/memory/test_memory.py +0 -136
- llama_stack/providers/tests/resolver.py +0 -100
- llama_stack/providers/tests/safety/test_safety.py +0 -77
- llama_stack-0.0.42.dist-info/METADATA +0 -137
- llama_stack-0.0.42.dist-info/RECORD +0 -256
- /llama_stack/{distribution → core}/__init__.py +0 -0
- /llama_stack/{distribution/server → core/access_control}/__init__.py +0 -0
- /llama_stack/{distribution/utils → core/conversations}/__init__.py +0 -0
- /llama_stack/{providers/adapters → core/prompts}/__init__.py +0 -0
- /llama_stack/{providers/adapters/agents → core/routing_tables}/__init__.py +0 -0
- /llama_stack/{providers/adapters/inference → core/server}/__init__.py +0 -0
- /llama_stack/{providers/adapters/memory → core/storage}/__init__.py +0 -0
- /llama_stack/{providers/adapters/safety → core/ui}/__init__.py +0 -0
- /llama_stack/{providers/adapters/telemetry → core/ui/modules}/__init__.py +0 -0
- /llama_stack/{providers/impls → core/ui/page}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference → core/ui/page/distribution}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/rag → core/ui/page/evaluations}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tests → core/ui/page/playground}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools → core/utils}/__init__.py +0 -0
- /llama_stack/{distribution → core}/utils/dynamic.py +0 -0
- /llama_stack/{distribution → core}/utils/serialize.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools/ipython_tool → distributions}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization → models}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization/scripts → models/llama}/__init__.py +0 -0
- /llama_stack/{providers/tests → models/llama/llama3}/__init__.py +0 -0
- /llama_stack/{providers/tests/agents → models/llama/llama3/quantization}/__init__.py +0 -0
- /llama_stack/{providers/tests/inference → models/llama/llama3_2}/__init__.py +0 -0
- /llama_stack/{providers/tests/memory → models/llama/llama3_3}/__init__.py +0 -0
- /llama_stack/{providers/tests/safety → models/llama/llama4}/__init__.py +0 -0
- /llama_stack/{scripts → models/llama/llama4/prompt_templates}/__init__.py +0 -0
- /llama_stack/providers/{adapters → remote}/safety/bedrock/__init__.py +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/entry_points.txt +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info/licenses}/LICENSE +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
import inspect
|
|
8
|
+
import re
|
|
9
|
+
from collections.abc import Callable
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from aiohttp import hdrs
|
|
13
|
+
from starlette.routing import Route
|
|
14
|
+
|
|
15
|
+
from llama_stack.apis.datatypes import Api, ExternalApiSpec
|
|
16
|
+
from llama_stack.apis.tools import RAGToolRuntime, SpecialToolGroup
|
|
17
|
+
from llama_stack.core.resolver import api_protocol_map
|
|
18
|
+
from llama_stack.schema_utils import WebMethod
|
|
19
|
+
|
|
20
|
+
EndpointFunc = Callable[..., Any]
|
|
21
|
+
PathParams = dict[str, str]
|
|
22
|
+
RouteInfo = tuple[EndpointFunc, str, WebMethod]
|
|
23
|
+
PathImpl = dict[str, RouteInfo]
|
|
24
|
+
RouteImpls = dict[str, PathImpl]
|
|
25
|
+
RouteMatch = tuple[EndpointFunc, PathParams, str, WebMethod]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def toolgroup_protocol_map():
|
|
29
|
+
return {
|
|
30
|
+
SpecialToolGroup.rag_tool: RAGToolRuntime,
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_all_api_routes(
|
|
35
|
+
external_apis: dict[Api, ExternalApiSpec] | None = None,
|
|
36
|
+
) -> dict[Api, list[tuple[Route, WebMethod]]]:
|
|
37
|
+
apis = {}
|
|
38
|
+
|
|
39
|
+
protocols = api_protocol_map(external_apis)
|
|
40
|
+
toolgroup_protocols = toolgroup_protocol_map()
|
|
41
|
+
for api, protocol in protocols.items():
|
|
42
|
+
routes = []
|
|
43
|
+
protocol_methods = inspect.getmembers(protocol, predicate=inspect.isfunction)
|
|
44
|
+
|
|
45
|
+
# HACK ALERT
|
|
46
|
+
if api == Api.tool_runtime:
|
|
47
|
+
for tool_group in SpecialToolGroup:
|
|
48
|
+
sub_protocol = toolgroup_protocols[tool_group]
|
|
49
|
+
sub_protocol_methods = inspect.getmembers(sub_protocol, predicate=inspect.isfunction)
|
|
50
|
+
for name, method in sub_protocol_methods:
|
|
51
|
+
if not hasattr(method, "__webmethod__"):
|
|
52
|
+
continue
|
|
53
|
+
protocol_methods.append((f"{tool_group.value}.{name}", method))
|
|
54
|
+
|
|
55
|
+
for name, method in protocol_methods:
|
|
56
|
+
# Get all webmethods for this method (supports multiple decorators)
|
|
57
|
+
webmethods = getattr(method, "__webmethods__", [])
|
|
58
|
+
if not webmethods:
|
|
59
|
+
continue
|
|
60
|
+
|
|
61
|
+
# Create routes for each webmethod decorator
|
|
62
|
+
for webmethod in webmethods:
|
|
63
|
+
path = f"/{webmethod.level}/{webmethod.route.lstrip('/')}"
|
|
64
|
+
if webmethod.method == hdrs.METH_GET:
|
|
65
|
+
http_method = hdrs.METH_GET
|
|
66
|
+
elif webmethod.method == hdrs.METH_DELETE:
|
|
67
|
+
http_method = hdrs.METH_DELETE
|
|
68
|
+
else:
|
|
69
|
+
http_method = hdrs.METH_POST
|
|
70
|
+
routes.append(
|
|
71
|
+
(Route(path=path, methods=[http_method], name=name, endpoint=None), webmethod)
|
|
72
|
+
) # setting endpoint to None since don't use a Router object
|
|
73
|
+
|
|
74
|
+
apis[api] = routes
|
|
75
|
+
|
|
76
|
+
return apis
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def initialize_route_impls(impls, external_apis: dict[Api, ExternalApiSpec] | None = None) -> RouteImpls:
|
|
80
|
+
api_to_routes = get_all_api_routes(external_apis)
|
|
81
|
+
route_impls: RouteImpls = {}
|
|
82
|
+
|
|
83
|
+
def _convert_path_to_regex(path: str) -> str:
|
|
84
|
+
# Convert {param} to named capture groups
|
|
85
|
+
# handle {param:path} as well which allows for forward slashes in the param value
|
|
86
|
+
pattern = re.sub(
|
|
87
|
+
r"{(\w+)(?::path)?}",
|
|
88
|
+
lambda m: f"(?P<{m.group(1)}>{'[^/]+' if not m.group(0).endswith(':path') else '.+'})",
|
|
89
|
+
path,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
return f"^{pattern}$"
|
|
93
|
+
|
|
94
|
+
for api, api_routes in api_to_routes.items():
|
|
95
|
+
if api not in impls:
|
|
96
|
+
continue
|
|
97
|
+
for route, webmethod in api_routes:
|
|
98
|
+
impl = impls[api]
|
|
99
|
+
func = getattr(impl, route.name)
|
|
100
|
+
# Get the first (and typically only) method from the set, filtering out HEAD
|
|
101
|
+
available_methods = [m for m in route.methods if m != "HEAD"]
|
|
102
|
+
if not available_methods:
|
|
103
|
+
continue # Skip if only HEAD method is available
|
|
104
|
+
method = available_methods[0].lower()
|
|
105
|
+
if method not in route_impls:
|
|
106
|
+
route_impls[method] = {}
|
|
107
|
+
route_impls[method][_convert_path_to_regex(route.path)] = (
|
|
108
|
+
func,
|
|
109
|
+
route.path,
|
|
110
|
+
webmethod,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
return route_impls
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def find_matching_route(method: str, path: str, route_impls: RouteImpls) -> RouteMatch:
|
|
117
|
+
"""Find the matching endpoint implementation for a given method and path.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
method: HTTP method (GET, POST, etc.)
|
|
121
|
+
path: URL path to match against
|
|
122
|
+
route_impls: A dictionary of endpoint implementations
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
A tuple of (endpoint_function, path_params, route_path, webmethod_metadata)
|
|
126
|
+
|
|
127
|
+
Raises:
|
|
128
|
+
ValueError: If no matching endpoint is found
|
|
129
|
+
"""
|
|
130
|
+
impls = route_impls.get(method.lower())
|
|
131
|
+
if not impls:
|
|
132
|
+
raise ValueError(f"No endpoint found for {path}")
|
|
133
|
+
|
|
134
|
+
for regex, (func, route_path, webmethod) in impls.items():
|
|
135
|
+
match = re.match(regex, path)
|
|
136
|
+
if match:
|
|
137
|
+
# Extract named groups from the regex match
|
|
138
|
+
path_params = match.groupdict()
|
|
139
|
+
return func, path_params, route_path, webmethod
|
|
140
|
+
|
|
141
|
+
raise ValueError(f"No endpoint found for {path}")
|
|
@@ -0,0 +1,542 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import concurrent.futures
|
|
9
|
+
import functools
|
|
10
|
+
import inspect
|
|
11
|
+
import json
|
|
12
|
+
import logging # allow-direct-logging
|
|
13
|
+
import os
|
|
14
|
+
import sys
|
|
15
|
+
import traceback
|
|
16
|
+
import warnings
|
|
17
|
+
from collections.abc import Callable
|
|
18
|
+
from contextlib import asynccontextmanager
|
|
19
|
+
from importlib.metadata import version as parse_version
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from typing import Annotated, Any, get_origin
|
|
22
|
+
|
|
23
|
+
import httpx
|
|
24
|
+
import rich.pretty
|
|
25
|
+
import yaml
|
|
26
|
+
from fastapi import Body, FastAPI, HTTPException, Request, Response
|
|
27
|
+
from fastapi import Path as FastapiPath
|
|
28
|
+
from fastapi.exceptions import RequestValidationError
|
|
29
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
30
|
+
from fastapi.responses import JSONResponse, StreamingResponse
|
|
31
|
+
from openai import BadRequestError
|
|
32
|
+
from pydantic import BaseModel, ValidationError
|
|
33
|
+
|
|
34
|
+
from llama_stack.apis.common.errors import ConflictError, ResourceNotFoundError
|
|
35
|
+
from llama_stack.apis.common.responses import PaginatedResponse
|
|
36
|
+
from llama_stack.core.access_control.access_control import AccessDeniedError
|
|
37
|
+
from llama_stack.core.datatypes import (
|
|
38
|
+
AuthenticationRequiredError,
|
|
39
|
+
LoggingConfig,
|
|
40
|
+
StackRunConfig,
|
|
41
|
+
process_cors_config,
|
|
42
|
+
)
|
|
43
|
+
from llama_stack.core.distribution import builtin_automatically_routed_apis
|
|
44
|
+
from llama_stack.core.external import load_external_apis
|
|
45
|
+
from llama_stack.core.request_headers import (
|
|
46
|
+
PROVIDER_DATA_VAR,
|
|
47
|
+
request_provider_data_context,
|
|
48
|
+
user_from_scope,
|
|
49
|
+
)
|
|
50
|
+
from llama_stack.core.server.routes import get_all_api_routes
|
|
51
|
+
from llama_stack.core.stack import (
|
|
52
|
+
Stack,
|
|
53
|
+
cast_image_name_to_string,
|
|
54
|
+
replace_env_vars,
|
|
55
|
+
)
|
|
56
|
+
from llama_stack.core.utils.config import redact_sensitive_fields
|
|
57
|
+
from llama_stack.core.utils.config_resolution import Mode, resolve_config_or_distro
|
|
58
|
+
from llama_stack.core.utils.context import preserve_contexts_async_generator
|
|
59
|
+
from llama_stack.log import get_logger, setup_logging
|
|
60
|
+
from llama_stack.providers.datatypes import Api
|
|
61
|
+
from llama_stack.providers.inline.telemetry.meta_reference.config import TelemetryConfig
|
|
62
|
+
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import (
|
|
63
|
+
TelemetryAdapter,
|
|
64
|
+
)
|
|
65
|
+
from llama_stack.providers.utils.telemetry.tracing import (
|
|
66
|
+
CURRENT_TRACE_CONTEXT,
|
|
67
|
+
setup_logger,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
from .auth import AuthenticationMiddleware
|
|
71
|
+
from .quota import QuotaMiddleware
|
|
72
|
+
from .tracing import TracingMiddleware
|
|
73
|
+
|
|
74
|
+
REPO_ROOT = Path(__file__).parent.parent.parent.parent
|
|
75
|
+
|
|
76
|
+
logger = get_logger(name=__name__, category="core::server")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
|
|
80
|
+
log = file if hasattr(file, "write") else sys.stderr
|
|
81
|
+
traceback.print_stack(file=log)
|
|
82
|
+
log.write(warnings.formatwarning(message, category, filename, lineno, line))
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
if os.environ.get("LLAMA_STACK_TRACE_WARNINGS"):
|
|
86
|
+
warnings.showwarning = warn_with_traceback
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def create_sse_event(data: Any) -> str:
|
|
90
|
+
if isinstance(data, BaseModel):
|
|
91
|
+
data = data.model_dump_json()
|
|
92
|
+
else:
|
|
93
|
+
data = json.dumps(data)
|
|
94
|
+
|
|
95
|
+
return f"data: {data}\n\n"
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
async def global_exception_handler(request: Request, exc: Exception):
|
|
99
|
+
traceback.print_exception(exc)
|
|
100
|
+
http_exc = translate_exception(exc)
|
|
101
|
+
|
|
102
|
+
return JSONResponse(status_code=http_exc.status_code, content={"error": {"detail": http_exc.detail}})
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def translate_exception(exc: Exception) -> HTTPException | RequestValidationError:
|
|
106
|
+
if isinstance(exc, ValidationError):
|
|
107
|
+
exc = RequestValidationError(exc.errors())
|
|
108
|
+
|
|
109
|
+
if isinstance(exc, RequestValidationError):
|
|
110
|
+
return HTTPException(
|
|
111
|
+
status_code=httpx.codes.BAD_REQUEST,
|
|
112
|
+
detail={
|
|
113
|
+
"errors": [
|
|
114
|
+
{
|
|
115
|
+
"loc": list(error["loc"]),
|
|
116
|
+
"msg": error["msg"],
|
|
117
|
+
"type": error["type"],
|
|
118
|
+
}
|
|
119
|
+
for error in exc.errors()
|
|
120
|
+
]
|
|
121
|
+
},
|
|
122
|
+
)
|
|
123
|
+
elif isinstance(exc, ConflictError):
|
|
124
|
+
return HTTPException(status_code=httpx.codes.CONFLICT, detail=str(exc))
|
|
125
|
+
elif isinstance(exc, ResourceNotFoundError):
|
|
126
|
+
return HTTPException(status_code=httpx.codes.NOT_FOUND, detail=str(exc))
|
|
127
|
+
elif isinstance(exc, ValueError):
|
|
128
|
+
return HTTPException(status_code=httpx.codes.BAD_REQUEST, detail=f"Invalid value: {str(exc)}")
|
|
129
|
+
elif isinstance(exc, BadRequestError):
|
|
130
|
+
return HTTPException(status_code=httpx.codes.BAD_REQUEST, detail=str(exc))
|
|
131
|
+
elif isinstance(exc, PermissionError | AccessDeniedError):
|
|
132
|
+
return HTTPException(status_code=httpx.codes.FORBIDDEN, detail=f"Permission denied: {str(exc)}")
|
|
133
|
+
elif isinstance(exc, ConnectionError | httpx.ConnectError):
|
|
134
|
+
return HTTPException(status_code=httpx.codes.BAD_GATEWAY, detail=str(exc))
|
|
135
|
+
elif isinstance(exc, asyncio.TimeoutError | TimeoutError):
|
|
136
|
+
return HTTPException(status_code=httpx.codes.GATEWAY_TIMEOUT, detail=f"Operation timed out: {str(exc)}")
|
|
137
|
+
elif isinstance(exc, NotImplementedError):
|
|
138
|
+
return HTTPException(status_code=httpx.codes.NOT_IMPLEMENTED, detail=f"Not implemented: {str(exc)}")
|
|
139
|
+
elif isinstance(exc, AuthenticationRequiredError):
|
|
140
|
+
return HTTPException(status_code=httpx.codes.UNAUTHORIZED, detail=f"Authentication required: {str(exc)}")
|
|
141
|
+
elif hasattr(exc, "status_code") and isinstance(getattr(exc, "status_code", None), int):
|
|
142
|
+
# Handle provider SDK exceptions (e.g., OpenAI's APIStatusError and subclasses)
|
|
143
|
+
# These include AuthenticationError (401), PermissionDeniedError (403), etc.
|
|
144
|
+
# This preserves the actual HTTP status code from the provider
|
|
145
|
+
status_code = exc.status_code
|
|
146
|
+
detail = str(exc)
|
|
147
|
+
return HTTPException(status_code=status_code, detail=detail)
|
|
148
|
+
else:
|
|
149
|
+
return HTTPException(
|
|
150
|
+
status_code=httpx.codes.INTERNAL_SERVER_ERROR,
|
|
151
|
+
detail="Internal server error: An unexpected error occurred.",
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class StackApp(FastAPI):
|
|
156
|
+
"""
|
|
157
|
+
A wrapper around the FastAPI application to hold a reference to the Stack instance so that we can
|
|
158
|
+
start background tasks (e.g. refresh model registry periodically) from the lifespan context manager.
|
|
159
|
+
"""
|
|
160
|
+
|
|
161
|
+
def __init__(self, config: StackRunConfig, *args, **kwargs):
|
|
162
|
+
super().__init__(*args, **kwargs)
|
|
163
|
+
self.stack: Stack = Stack(config)
|
|
164
|
+
|
|
165
|
+
# This code is called from a running event loop managed by uvicorn so we cannot simply call
|
|
166
|
+
# asyncio.run() to initialize the stack. We cannot await either since this is not an async
|
|
167
|
+
# function.
|
|
168
|
+
# As a workaround, we use a thread pool executor to run the initialize() method
|
|
169
|
+
# in a separate thread.
|
|
170
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
171
|
+
future = executor.submit(asyncio.run, self.stack.initialize())
|
|
172
|
+
future.result()
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
@asynccontextmanager
|
|
176
|
+
async def lifespan(app: StackApp):
|
|
177
|
+
logger.info("Starting up")
|
|
178
|
+
assert app.stack is not None
|
|
179
|
+
app.stack.create_registry_refresh_task()
|
|
180
|
+
yield
|
|
181
|
+
logger.info("Shutting down")
|
|
182
|
+
await app.stack.shutdown()
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def is_streaming_request(func_name: str, request: Request, **kwargs):
|
|
186
|
+
# TODO: pass the api method and punt it to the Protocol definition directly
|
|
187
|
+
# If there's a stream parameter at top level, use it
|
|
188
|
+
if "stream" in kwargs:
|
|
189
|
+
return kwargs["stream"]
|
|
190
|
+
|
|
191
|
+
# If there's a stream parameter inside a "params" parameter, e.g. openai_chat_completion() use it
|
|
192
|
+
if "params" in kwargs:
|
|
193
|
+
params = kwargs["params"]
|
|
194
|
+
if hasattr(params, "stream"):
|
|
195
|
+
return params.stream
|
|
196
|
+
|
|
197
|
+
return False
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
async def maybe_await(value):
|
|
201
|
+
if inspect.iscoroutine(value):
|
|
202
|
+
return await value
|
|
203
|
+
return value
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
async def sse_generator(event_gen_coroutine):
|
|
207
|
+
event_gen = None
|
|
208
|
+
try:
|
|
209
|
+
event_gen = await event_gen_coroutine
|
|
210
|
+
async for item in event_gen:
|
|
211
|
+
yield create_sse_event(item)
|
|
212
|
+
except asyncio.CancelledError:
|
|
213
|
+
logger.info("Generator cancelled")
|
|
214
|
+
if event_gen:
|
|
215
|
+
await event_gen.aclose()
|
|
216
|
+
except Exception as e:
|
|
217
|
+
logger.exception("Error in sse_generator")
|
|
218
|
+
yield create_sse_event(
|
|
219
|
+
{
|
|
220
|
+
"error": {
|
|
221
|
+
"message": str(translate_exception(e)),
|
|
222
|
+
},
|
|
223
|
+
}
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
async def log_request_pre_validation(request: Request):
|
|
228
|
+
if request.method in ("POST", "PUT", "PATCH"):
|
|
229
|
+
try:
|
|
230
|
+
body_bytes = await request.body()
|
|
231
|
+
if body_bytes:
|
|
232
|
+
try:
|
|
233
|
+
parsed_body = json.loads(body_bytes.decode())
|
|
234
|
+
log_output = rich.pretty.pretty_repr(parsed_body)
|
|
235
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
236
|
+
log_output = repr(body_bytes)
|
|
237
|
+
logger.debug(f"Incoming raw request body for {request.method} {request.url.path}:\n{log_output}")
|
|
238
|
+
else:
|
|
239
|
+
logger.debug(f"Incoming {request.method} {request.url.path} request with empty body.")
|
|
240
|
+
except Exception as e:
|
|
241
|
+
logger.warning(f"Could not read or log request body for {request.method} {request.url.path}: {e}")
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def create_dynamic_typed_route(func: Any, method: str, route: str) -> Callable:
|
|
245
|
+
@functools.wraps(func)
|
|
246
|
+
async def route_handler(request: Request, **kwargs):
|
|
247
|
+
# Get auth attributes from the request scope
|
|
248
|
+
user = user_from_scope(request.scope)
|
|
249
|
+
|
|
250
|
+
await log_request_pre_validation(request)
|
|
251
|
+
|
|
252
|
+
test_context_token = None
|
|
253
|
+
test_context_var = None
|
|
254
|
+
reset_test_context_fn = None
|
|
255
|
+
|
|
256
|
+
# Use context manager with both provider data and auth attributes
|
|
257
|
+
with request_provider_data_context(request.headers, user):
|
|
258
|
+
if os.environ.get("LLAMA_STACK_TEST_INFERENCE_MODE"):
|
|
259
|
+
from llama_stack.core.testing_context import (
|
|
260
|
+
TEST_CONTEXT,
|
|
261
|
+
reset_test_context,
|
|
262
|
+
sync_test_context_from_provider_data,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
test_context_token = sync_test_context_from_provider_data()
|
|
266
|
+
test_context_var = TEST_CONTEXT
|
|
267
|
+
reset_test_context_fn = reset_test_context
|
|
268
|
+
|
|
269
|
+
is_streaming = is_streaming_request(func.__name__, request, **kwargs)
|
|
270
|
+
|
|
271
|
+
try:
|
|
272
|
+
if is_streaming:
|
|
273
|
+
context_vars = [CURRENT_TRACE_CONTEXT, PROVIDER_DATA_VAR]
|
|
274
|
+
if test_context_var is not None:
|
|
275
|
+
context_vars.append(test_context_var)
|
|
276
|
+
gen = preserve_contexts_async_generator(sse_generator(func(**kwargs)), context_vars)
|
|
277
|
+
return StreamingResponse(gen, media_type="text/event-stream")
|
|
278
|
+
else:
|
|
279
|
+
value = func(**kwargs)
|
|
280
|
+
result = await maybe_await(value)
|
|
281
|
+
if isinstance(result, PaginatedResponse) and result.url is None:
|
|
282
|
+
result.url = route
|
|
283
|
+
|
|
284
|
+
if method.upper() == "DELETE" and result is None:
|
|
285
|
+
return Response(status_code=httpx.codes.NO_CONTENT)
|
|
286
|
+
|
|
287
|
+
return result
|
|
288
|
+
except Exception as e:
|
|
289
|
+
if logger.isEnabledFor(logging.INFO):
|
|
290
|
+
logger.exception(f"Error executing endpoint {route=} {method=}")
|
|
291
|
+
else:
|
|
292
|
+
logger.error(f"Error executing endpoint {route=} {method=}: {str(e)}")
|
|
293
|
+
raise translate_exception(e) from e
|
|
294
|
+
finally:
|
|
295
|
+
if test_context_token is not None and reset_test_context_fn is not None:
|
|
296
|
+
reset_test_context_fn(test_context_token)
|
|
297
|
+
|
|
298
|
+
sig = inspect.signature(func)
|
|
299
|
+
|
|
300
|
+
new_params = [inspect.Parameter("request", inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=Request)]
|
|
301
|
+
new_params.extend(sig.parameters.values())
|
|
302
|
+
|
|
303
|
+
path_params = extract_path_params(route)
|
|
304
|
+
if method == "post":
|
|
305
|
+
# Annotate parameters that are in the path with Path(...) and others with Body(...),
|
|
306
|
+
# but preserve existing File() and Form() annotations for multipart form data
|
|
307
|
+
new_params = (
|
|
308
|
+
[new_params[0]]
|
|
309
|
+
+ [
|
|
310
|
+
(
|
|
311
|
+
param.replace(annotation=Annotated[param.annotation, FastapiPath(..., title=param.name)])
|
|
312
|
+
if param.name in path_params
|
|
313
|
+
else (
|
|
314
|
+
param # Keep original annotation if it's already an Annotated type
|
|
315
|
+
if get_origin(param.annotation) is Annotated
|
|
316
|
+
else param.replace(annotation=Annotated[param.annotation, Body(..., embed=True)])
|
|
317
|
+
)
|
|
318
|
+
)
|
|
319
|
+
for param in new_params[1:]
|
|
320
|
+
]
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
route_handler.__signature__ = sig.replace(parameters=new_params)
|
|
324
|
+
|
|
325
|
+
return route_handler
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
class ClientVersionMiddleware:
|
|
329
|
+
def __init__(self, app):
|
|
330
|
+
self.app = app
|
|
331
|
+
self.server_version = parse_version("llama-stack")
|
|
332
|
+
|
|
333
|
+
async def __call__(self, scope, receive, send):
|
|
334
|
+
if scope["type"] == "http":
|
|
335
|
+
headers = dict(scope.get("headers", []))
|
|
336
|
+
client_version = headers.get(b"x-llamastack-client-version", b"").decode()
|
|
337
|
+
if client_version:
|
|
338
|
+
try:
|
|
339
|
+
client_version_parts = tuple(map(int, client_version.split(".")[:2]))
|
|
340
|
+
server_version_parts = tuple(map(int, self.server_version.split(".")[:2]))
|
|
341
|
+
if client_version_parts != server_version_parts:
|
|
342
|
+
|
|
343
|
+
async def send_version_error(send):
|
|
344
|
+
await send(
|
|
345
|
+
{
|
|
346
|
+
"type": "http.response.start",
|
|
347
|
+
"status": httpx.codes.UPGRADE_REQUIRED,
|
|
348
|
+
"headers": [[b"content-type", b"application/json"]],
|
|
349
|
+
}
|
|
350
|
+
)
|
|
351
|
+
error_msg = json.dumps(
|
|
352
|
+
{
|
|
353
|
+
"error": {
|
|
354
|
+
"message": f"Client version {client_version} is not compatible with server version {self.server_version}. Please update your client."
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
).encode()
|
|
358
|
+
await send({"type": "http.response.body", "body": error_msg})
|
|
359
|
+
|
|
360
|
+
return await send_version_error(send)
|
|
361
|
+
except (ValueError, IndexError):
|
|
362
|
+
# If version parsing fails, let the request through
|
|
363
|
+
pass
|
|
364
|
+
|
|
365
|
+
return await self.app(scope, receive, send)
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def create_app() -> StackApp:
|
|
369
|
+
"""Create and configure the FastAPI application.
|
|
370
|
+
|
|
371
|
+
This factory function reads configuration from environment variables:
|
|
372
|
+
- LLAMA_STACK_CONFIG: Path to config file (required)
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
Configured StackApp instance.
|
|
376
|
+
"""
|
|
377
|
+
# Initialize logging from environment variables first
|
|
378
|
+
setup_logging()
|
|
379
|
+
|
|
380
|
+
config_file = os.getenv("LLAMA_STACK_CONFIG")
|
|
381
|
+
if config_file is None:
|
|
382
|
+
raise ValueError("LLAMA_STACK_CONFIG environment variable is required")
|
|
383
|
+
|
|
384
|
+
config_file = resolve_config_or_distro(config_file, Mode.RUN)
|
|
385
|
+
|
|
386
|
+
# Load and process configuration
|
|
387
|
+
logger_config = None
|
|
388
|
+
with open(config_file) as fp:
|
|
389
|
+
config_contents = yaml.safe_load(fp)
|
|
390
|
+
if isinstance(config_contents, dict) and (cfg := config_contents.get("logging_config")):
|
|
391
|
+
logger_config = LoggingConfig(**cfg)
|
|
392
|
+
logger = get_logger(name=__name__, category="core::server", config=logger_config)
|
|
393
|
+
|
|
394
|
+
config = replace_env_vars(config_contents)
|
|
395
|
+
config = StackRunConfig(**cast_image_name_to_string(config))
|
|
396
|
+
|
|
397
|
+
_log_run_config(run_config=config)
|
|
398
|
+
|
|
399
|
+
app = StackApp(
|
|
400
|
+
lifespan=lifespan,
|
|
401
|
+
docs_url="/docs",
|
|
402
|
+
redoc_url="/redoc",
|
|
403
|
+
openapi_url="/openapi.json",
|
|
404
|
+
config=config,
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
if not os.environ.get("LLAMA_STACK_DISABLE_VERSION_CHECK"):
|
|
408
|
+
app.add_middleware(ClientVersionMiddleware)
|
|
409
|
+
|
|
410
|
+
impls = app.stack.impls
|
|
411
|
+
|
|
412
|
+
if config.server.auth:
|
|
413
|
+
logger.info(f"Enabling authentication with provider: {config.server.auth.provider_config.type.value}")
|
|
414
|
+
app.add_middleware(AuthenticationMiddleware, auth_config=config.server.auth, impls=impls)
|
|
415
|
+
else:
|
|
416
|
+
if config.server.quota:
|
|
417
|
+
quota = config.server.quota
|
|
418
|
+
logger.warning(
|
|
419
|
+
"Configured authenticated_max_requests (%d) but no auth is enabled; "
|
|
420
|
+
"falling back to anonymous_max_requests (%d) for all the requests",
|
|
421
|
+
quota.authenticated_max_requests,
|
|
422
|
+
quota.anonymous_max_requests,
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
if config.server.quota:
|
|
426
|
+
logger.info("Enabling quota middleware for authenticated and anonymous clients")
|
|
427
|
+
|
|
428
|
+
quota = config.server.quota
|
|
429
|
+
anonymous_max_requests = quota.anonymous_max_requests
|
|
430
|
+
# if auth is disabled, use the anonymous max requests
|
|
431
|
+
authenticated_max_requests = quota.authenticated_max_requests if config.server.auth else anonymous_max_requests
|
|
432
|
+
|
|
433
|
+
kv_config = quota.kvstore
|
|
434
|
+
window_map = {"day": 86400}
|
|
435
|
+
window_seconds = window_map[quota.period.value]
|
|
436
|
+
|
|
437
|
+
app.add_middleware(
|
|
438
|
+
QuotaMiddleware,
|
|
439
|
+
kv_config=kv_config,
|
|
440
|
+
anonymous_max_requests=anonymous_max_requests,
|
|
441
|
+
authenticated_max_requests=authenticated_max_requests,
|
|
442
|
+
window_seconds=window_seconds,
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
if config.server.cors:
|
|
446
|
+
logger.info("Enabling CORS")
|
|
447
|
+
cors_config = process_cors_config(config.server.cors)
|
|
448
|
+
if cors_config:
|
|
449
|
+
app.add_middleware(CORSMiddleware, **cors_config.model_dump())
|
|
450
|
+
|
|
451
|
+
if config.telemetry.enabled:
|
|
452
|
+
setup_logger(impls[Api.telemetry])
|
|
453
|
+
else:
|
|
454
|
+
setup_logger(TelemetryAdapter(TelemetryConfig(), {}))
|
|
455
|
+
|
|
456
|
+
# Load external APIs if configured
|
|
457
|
+
external_apis = load_external_apis(config)
|
|
458
|
+
all_routes = get_all_api_routes(external_apis)
|
|
459
|
+
|
|
460
|
+
if config.apis:
|
|
461
|
+
apis_to_serve = set(config.apis)
|
|
462
|
+
else:
|
|
463
|
+
apis_to_serve = set(impls.keys())
|
|
464
|
+
|
|
465
|
+
for inf in builtin_automatically_routed_apis():
|
|
466
|
+
# if we do not serve the corresponding router API, we should not serve the routing table API
|
|
467
|
+
if inf.router_api.value not in apis_to_serve:
|
|
468
|
+
continue
|
|
469
|
+
apis_to_serve.add(inf.routing_table_api.value)
|
|
470
|
+
|
|
471
|
+
apis_to_serve.add("inspect")
|
|
472
|
+
apis_to_serve.add("providers")
|
|
473
|
+
apis_to_serve.add("prompts")
|
|
474
|
+
apis_to_serve.add("conversations")
|
|
475
|
+
for api_str in apis_to_serve:
|
|
476
|
+
api = Api(api_str)
|
|
477
|
+
|
|
478
|
+
routes = all_routes[api]
|
|
479
|
+
try:
|
|
480
|
+
impl = impls[api]
|
|
481
|
+
except KeyError as e:
|
|
482
|
+
raise ValueError(f"Could not find provider implementation for {api} API") from e
|
|
483
|
+
|
|
484
|
+
for route, _ in routes:
|
|
485
|
+
if not hasattr(impl, route.name):
|
|
486
|
+
# ideally this should be a typing violation already
|
|
487
|
+
raise ValueError(f"Could not find method {route.name} on {impl}!")
|
|
488
|
+
|
|
489
|
+
impl_method = getattr(impl, route.name)
|
|
490
|
+
# Filter out HEAD method since it's automatically handled by FastAPI for GET routes
|
|
491
|
+
available_methods = [m for m in route.methods if m != "HEAD"]
|
|
492
|
+
if not available_methods:
|
|
493
|
+
raise ValueError(f"No methods found for {route.name} on {impl}")
|
|
494
|
+
method = available_methods[0]
|
|
495
|
+
logger.debug(f"{method} {route.path}")
|
|
496
|
+
|
|
497
|
+
with warnings.catch_warnings():
|
|
498
|
+
warnings.filterwarnings("ignore", category=UserWarning, module="pydantic._internal._fields")
|
|
499
|
+
getattr(app, method.lower())(route.path, response_model=None)(
|
|
500
|
+
create_dynamic_typed_route(
|
|
501
|
+
impl_method,
|
|
502
|
+
method.lower(),
|
|
503
|
+
route.path,
|
|
504
|
+
)
|
|
505
|
+
)
|
|
506
|
+
|
|
507
|
+
logger.debug(f"serving APIs: {apis_to_serve}")
|
|
508
|
+
|
|
509
|
+
app.exception_handler(RequestValidationError)(global_exception_handler)
|
|
510
|
+
app.exception_handler(Exception)(global_exception_handler)
|
|
511
|
+
|
|
512
|
+
app.add_middleware(TracingMiddleware, impls=impls, external_apis=external_apis)
|
|
513
|
+
|
|
514
|
+
return app
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
def _log_run_config(run_config: StackRunConfig):
|
|
518
|
+
"""Logs the run config with redacted fields and disabled providers removed."""
|
|
519
|
+
logger.info("Run configuration:")
|
|
520
|
+
safe_config = redact_sensitive_fields(run_config.model_dump(mode="json"))
|
|
521
|
+
clean_config = remove_disabled_providers(safe_config)
|
|
522
|
+
logger.info(yaml.dump(clean_config, indent=2))
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
def extract_path_params(route: str) -> list[str]:
|
|
526
|
+
segments = route.split("/")
|
|
527
|
+
params = [seg[1:-1] for seg in segments if seg.startswith("{") and seg.endswith("}")]
|
|
528
|
+
# to handle path params like {param:path}
|
|
529
|
+
params = [param.split(":")[0] for param in params]
|
|
530
|
+
return params
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
def remove_disabled_providers(obj):
|
|
534
|
+
if isinstance(obj, dict):
|
|
535
|
+
keys = ["provider_id", "shield_id", "provider_model_id", "model_id"]
|
|
536
|
+
if any(k in obj and obj[k] in ("__disabled__", "", None) for k in keys):
|
|
537
|
+
return None
|
|
538
|
+
return {k: v for k, v in ((k, remove_disabled_providers(v)) for k, v in obj.items()) if v is not None}
|
|
539
|
+
elif isinstance(obj, list):
|
|
540
|
+
return [item for item in (remove_disabled_providers(i) for i in obj) if item is not None]
|
|
541
|
+
else:
|
|
542
|
+
return obj
|