llama-stack 0.0.42__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack/__init__.py +5 -0
- llama_stack/apis/agents/__init__.py +1 -1
- llama_stack/apis/agents/agents.py +700 -281
- llama_stack/apis/agents/openai_responses.py +1311 -0
- llama_stack/{providers/adapters/memory/sample/config.py → apis/batches/__init__.py} +2 -5
- llama_stack/apis/batches/batches.py +100 -0
- llama_stack/apis/benchmarks/__init__.py +7 -0
- llama_stack/apis/benchmarks/benchmarks.py +108 -0
- llama_stack/apis/common/content_types.py +143 -0
- llama_stack/apis/common/errors.py +103 -0
- llama_stack/apis/common/job_types.py +38 -0
- llama_stack/apis/common/responses.py +36 -0
- llama_stack/apis/common/training_types.py +36 -5
- llama_stack/apis/common/type_system.py +158 -0
- llama_stack/apis/conversations/__init__.py +31 -0
- llama_stack/apis/conversations/conversations.py +286 -0
- llama_stack/apis/datasetio/__init__.py +7 -0
- llama_stack/apis/datasetio/datasetio.py +59 -0
- llama_stack/apis/datasets/__init__.py +7 -0
- llama_stack/apis/datasets/datasets.py +251 -0
- llama_stack/apis/datatypes.py +160 -0
- llama_stack/apis/eval/__init__.py +7 -0
- llama_stack/apis/eval/eval.py +169 -0
- llama_stack/apis/files/__init__.py +7 -0
- llama_stack/apis/files/files.py +199 -0
- llama_stack/apis/inference/__init__.py +1 -1
- llama_stack/apis/inference/inference.py +1169 -113
- llama_stack/apis/inspect/__init__.py +1 -1
- llama_stack/apis/inspect/inspect.py +69 -16
- llama_stack/apis/models/__init__.py +1 -1
- llama_stack/apis/models/models.py +148 -21
- llama_stack/apis/post_training/__init__.py +1 -1
- llama_stack/apis/post_training/post_training.py +265 -120
- llama_stack/{providers/adapters/agents/sample/config.py → apis/prompts/__init__.py} +2 -5
- llama_stack/apis/prompts/prompts.py +204 -0
- llama_stack/apis/providers/__init__.py +7 -0
- llama_stack/apis/providers/providers.py +69 -0
- llama_stack/apis/resource.py +37 -0
- llama_stack/apis/safety/__init__.py +1 -1
- llama_stack/apis/safety/safety.py +95 -12
- llama_stack/apis/scoring/__init__.py +7 -0
- llama_stack/apis/scoring/scoring.py +93 -0
- llama_stack/apis/scoring_functions/__init__.py +7 -0
- llama_stack/apis/scoring_functions/scoring_functions.py +208 -0
- llama_stack/apis/shields/__init__.py +1 -1
- llama_stack/apis/shields/shields.py +76 -33
- llama_stack/apis/synthetic_data_generation/__init__.py +1 -1
- llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +40 -17
- llama_stack/apis/telemetry/__init__.py +1 -1
- llama_stack/apis/telemetry/telemetry.py +322 -31
- llama_stack/apis/{dataset → tools}/__init__.py +2 -1
- llama_stack/apis/tools/rag_tool.py +218 -0
- llama_stack/apis/tools/tools.py +221 -0
- llama_stack/apis/vector_io/__init__.py +7 -0
- llama_stack/apis/vector_io/vector_io.py +960 -0
- llama_stack/apis/vector_stores/__init__.py +7 -0
- llama_stack/apis/vector_stores/vector_stores.py +51 -0
- llama_stack/apis/version.py +9 -0
- llama_stack/cli/llama.py +13 -5
- llama_stack/cli/stack/_list_deps.py +182 -0
- llama_stack/cli/stack/list_apis.py +1 -1
- llama_stack/cli/stack/list_deps.py +55 -0
- llama_stack/cli/stack/list_providers.py +24 -10
- llama_stack/cli/stack/list_stacks.py +56 -0
- llama_stack/cli/stack/remove.py +115 -0
- llama_stack/cli/stack/run.py +169 -56
- llama_stack/cli/stack/stack.py +18 -4
- llama_stack/cli/stack/utils.py +151 -0
- llama_stack/cli/table.py +23 -61
- llama_stack/cli/utils.py +29 -0
- llama_stack/core/access_control/access_control.py +131 -0
- llama_stack/core/access_control/conditions.py +129 -0
- llama_stack/core/access_control/datatypes.py +107 -0
- llama_stack/core/build.py +164 -0
- llama_stack/core/client.py +205 -0
- llama_stack/core/common.sh +37 -0
- llama_stack/{distribution → core}/configure.py +74 -55
- llama_stack/core/conversations/conversations.py +309 -0
- llama_stack/core/datatypes.py +625 -0
- llama_stack/core/distribution.py +276 -0
- llama_stack/core/external.py +54 -0
- llama_stack/core/id_generation.py +42 -0
- llama_stack/core/inspect.py +86 -0
- llama_stack/core/library_client.py +539 -0
- llama_stack/core/prompts/prompts.py +234 -0
- llama_stack/core/providers.py +137 -0
- llama_stack/core/request_headers.py +115 -0
- llama_stack/core/resolver.py +506 -0
- llama_stack/core/routers/__init__.py +101 -0
- llama_stack/core/routers/datasets.py +73 -0
- llama_stack/core/routers/eval_scoring.py +155 -0
- llama_stack/core/routers/inference.py +645 -0
- llama_stack/core/routers/safety.py +85 -0
- llama_stack/core/routers/tool_runtime.py +91 -0
- llama_stack/core/routers/vector_io.py +442 -0
- llama_stack/core/routing_tables/benchmarks.py +62 -0
- llama_stack/core/routing_tables/common.py +254 -0
- llama_stack/core/routing_tables/datasets.py +91 -0
- llama_stack/core/routing_tables/models.py +163 -0
- llama_stack/core/routing_tables/scoring_functions.py +66 -0
- llama_stack/core/routing_tables/shields.py +61 -0
- llama_stack/core/routing_tables/toolgroups.py +129 -0
- llama_stack/core/routing_tables/vector_stores.py +292 -0
- llama_stack/core/server/auth.py +187 -0
- llama_stack/core/server/auth_providers.py +494 -0
- llama_stack/core/server/quota.py +110 -0
- llama_stack/core/server/routes.py +141 -0
- llama_stack/core/server/server.py +542 -0
- llama_stack/core/server/tracing.py +80 -0
- llama_stack/core/stack.py +546 -0
- llama_stack/core/start_stack.sh +117 -0
- llama_stack/core/storage/datatypes.py +283 -0
- llama_stack/{cli/model → core/store}/__init__.py +1 -1
- llama_stack/core/store/registry.py +199 -0
- llama_stack/core/testing_context.py +49 -0
- llama_stack/core/ui/app.py +55 -0
- llama_stack/core/ui/modules/api.py +32 -0
- llama_stack/core/ui/modules/utils.py +42 -0
- llama_stack/core/ui/page/distribution/datasets.py +18 -0
- llama_stack/core/ui/page/distribution/eval_tasks.py +20 -0
- llama_stack/core/ui/page/distribution/models.py +18 -0
- llama_stack/core/ui/page/distribution/providers.py +27 -0
- llama_stack/core/ui/page/distribution/resources.py +48 -0
- llama_stack/core/ui/page/distribution/scoring_functions.py +18 -0
- llama_stack/core/ui/page/distribution/shields.py +19 -0
- llama_stack/core/ui/page/evaluations/app_eval.py +143 -0
- llama_stack/core/ui/page/evaluations/native_eval.py +253 -0
- llama_stack/core/ui/page/playground/chat.py +130 -0
- llama_stack/core/ui/page/playground/tools.py +352 -0
- llama_stack/core/utils/config.py +30 -0
- llama_stack/{distribution → core}/utils/config_dirs.py +3 -6
- llama_stack/core/utils/config_resolution.py +125 -0
- llama_stack/core/utils/context.py +84 -0
- llama_stack/core/utils/exec.py +96 -0
- llama_stack/{providers/impls/meta_reference/codeshield/config.py → core/utils/image_types.py} +4 -3
- llama_stack/{distribution → core}/utils/model_utils.py +2 -2
- llama_stack/{distribution → core}/utils/prompt_for_config.py +30 -63
- llama_stack/{apis/batch_inference → distributions/dell}/__init__.py +1 -1
- llama_stack/distributions/dell/build.yaml +33 -0
- llama_stack/distributions/dell/dell.py +158 -0
- llama_stack/distributions/dell/run-with-safety.yaml +141 -0
- llama_stack/distributions/dell/run.yaml +132 -0
- llama_stack/distributions/meta-reference-gpu/__init__.py +7 -0
- llama_stack/distributions/meta-reference-gpu/build.yaml +32 -0
- llama_stack/distributions/meta-reference-gpu/meta_reference.py +163 -0
- llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +154 -0
- llama_stack/distributions/meta-reference-gpu/run.yaml +139 -0
- llama_stack/{apis/evals → distributions/nvidia}/__init__.py +1 -1
- llama_stack/distributions/nvidia/build.yaml +29 -0
- llama_stack/distributions/nvidia/nvidia.py +154 -0
- llama_stack/distributions/nvidia/run-with-safety.yaml +137 -0
- llama_stack/distributions/nvidia/run.yaml +116 -0
- llama_stack/distributions/open-benchmark/__init__.py +7 -0
- llama_stack/distributions/open-benchmark/build.yaml +36 -0
- llama_stack/distributions/open-benchmark/open_benchmark.py +303 -0
- llama_stack/distributions/open-benchmark/run.yaml +252 -0
- llama_stack/distributions/postgres-demo/__init__.py +7 -0
- llama_stack/distributions/postgres-demo/build.yaml +23 -0
- llama_stack/distributions/postgres-demo/postgres_demo.py +125 -0
- llama_stack/distributions/postgres-demo/run.yaml +115 -0
- llama_stack/{apis/memory → distributions/starter}/__init__.py +1 -1
- llama_stack/distributions/starter/build.yaml +61 -0
- llama_stack/distributions/starter/run-with-postgres-store.yaml +285 -0
- llama_stack/distributions/starter/run.yaml +276 -0
- llama_stack/distributions/starter/starter.py +345 -0
- llama_stack/distributions/starter-gpu/__init__.py +7 -0
- llama_stack/distributions/starter-gpu/build.yaml +61 -0
- llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +288 -0
- llama_stack/distributions/starter-gpu/run.yaml +279 -0
- llama_stack/distributions/starter-gpu/starter_gpu.py +20 -0
- llama_stack/distributions/template.py +456 -0
- llama_stack/distributions/watsonx/__init__.py +7 -0
- llama_stack/distributions/watsonx/build.yaml +33 -0
- llama_stack/distributions/watsonx/run.yaml +133 -0
- llama_stack/distributions/watsonx/watsonx.py +95 -0
- llama_stack/env.py +24 -0
- llama_stack/log.py +314 -0
- llama_stack/models/llama/checkpoint.py +164 -0
- llama_stack/models/llama/datatypes.py +164 -0
- llama_stack/models/llama/hadamard_utils.py +86 -0
- llama_stack/models/llama/llama3/args.py +74 -0
- llama_stack/models/llama/llama3/chat_format.py +286 -0
- llama_stack/models/llama/llama3/generation.py +376 -0
- llama_stack/models/llama/llama3/interface.py +255 -0
- llama_stack/models/llama/llama3/model.py +304 -0
- llama_stack/models/llama/llama3/multimodal/__init__.py +12 -0
- llama_stack/models/llama/llama3/multimodal/encoder_utils.py +180 -0
- llama_stack/models/llama/llama3/multimodal/image_transform.py +409 -0
- llama_stack/models/llama/llama3/multimodal/model.py +1430 -0
- llama_stack/models/llama/llama3/multimodal/utils.py +26 -0
- llama_stack/models/llama/llama3/prompt_templates/__init__.py +22 -0
- llama_stack/models/llama/llama3/prompt_templates/base.py +39 -0
- llama_stack/models/llama/llama3/prompt_templates/system_prompts.py +319 -0
- llama_stack/models/llama/llama3/prompt_templates/tool_response.py +62 -0
- llama_stack/models/llama/llama3/quantization/loader.py +316 -0
- llama_stack/models/llama/llama3/template_data.py +116 -0
- llama_stack/models/llama/llama3/tokenizer.model +128000 -0
- llama_stack/models/llama/llama3/tokenizer.py +198 -0
- llama_stack/models/llama/llama3/tool_utils.py +266 -0
- llama_stack/models/llama/llama3_1/__init__.py +12 -0
- llama_stack/models/llama/llama3_1/prompt_format.md +358 -0
- llama_stack/models/llama/llama3_1/prompts.py +258 -0
- llama_stack/models/llama/llama3_2/prompts_text.py +229 -0
- llama_stack/models/llama/llama3_2/prompts_vision.py +126 -0
- llama_stack/models/llama/llama3_2/text_prompt_format.md +286 -0
- llama_stack/models/llama/llama3_2/vision_prompt_format.md +141 -0
- llama_stack/models/llama/llama3_3/prompts.py +259 -0
- llama_stack/models/llama/llama4/args.py +107 -0
- llama_stack/models/llama/llama4/chat_format.py +317 -0
- llama_stack/models/llama/llama4/datatypes.py +56 -0
- llama_stack/models/llama/llama4/ffn.py +58 -0
- llama_stack/models/llama/llama4/generation.py +313 -0
- llama_stack/models/llama/llama4/model.py +437 -0
- llama_stack/models/llama/llama4/moe.py +214 -0
- llama_stack/models/llama/llama4/preprocess.py +435 -0
- llama_stack/models/llama/llama4/prompt_format.md +304 -0
- llama_stack/models/llama/llama4/prompt_templates/system_prompts.py +136 -0
- llama_stack/models/llama/llama4/prompts.py +279 -0
- llama_stack/models/llama/llama4/quantization/__init__.py +5 -0
- llama_stack/models/llama/llama4/quantization/loader.py +226 -0
- llama_stack/models/llama/llama4/tokenizer.model +200000 -0
- llama_stack/models/llama/llama4/tokenizer.py +263 -0
- llama_stack/models/llama/llama4/vision/__init__.py +5 -0
- llama_stack/models/llama/llama4/vision/embedding.py +210 -0
- llama_stack/models/llama/llama4/vision/encoder.py +412 -0
- llama_stack/models/llama/prompt_format.py +191 -0
- llama_stack/models/llama/quantize_impls.py +316 -0
- llama_stack/models/llama/sku_list.py +1029 -0
- llama_stack/models/llama/sku_types.py +233 -0
- llama_stack/models/llama/tokenizer_utils.py +40 -0
- llama_stack/providers/datatypes.py +136 -107
- llama_stack/providers/inline/__init__.py +5 -0
- llama_stack/providers/inline/agents/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/agents/meta_reference}/__init__.py +12 -5
- llama_stack/providers/inline/agents/meta_reference/agent_instance.py +1024 -0
- llama_stack/providers/inline/agents/meta_reference/agents.py +383 -0
- llama_stack/providers/inline/agents/meta_reference/config.py +37 -0
- llama_stack/providers/inline/agents/meta_reference/persistence.py +228 -0
- llama_stack/providers/inline/agents/meta_reference/responses/__init__.py +5 -0
- llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +423 -0
- llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +1226 -0
- llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +449 -0
- llama_stack/providers/inline/agents/meta_reference/responses/types.py +194 -0
- llama_stack/providers/inline/agents/meta_reference/responses/utils.py +365 -0
- llama_stack/providers/inline/agents/meta_reference/safety.py +52 -0
- llama_stack/providers/inline/batches/__init__.py +5 -0
- llama_stack/providers/inline/batches/reference/__init__.py +36 -0
- llama_stack/providers/inline/batches/reference/batches.py +679 -0
- llama_stack/providers/inline/batches/reference/config.py +40 -0
- llama_stack/providers/inline/datasetio/__init__.py +5 -0
- llama_stack/providers/inline/datasetio/localfs/__init__.py +20 -0
- llama_stack/providers/inline/datasetio/localfs/config.py +23 -0
- llama_stack/providers/inline/datasetio/localfs/datasetio.py +113 -0
- llama_stack/providers/inline/eval/__init__.py +5 -0
- llama_stack/providers/inline/eval/meta_reference/__init__.py +28 -0
- llama_stack/providers/inline/eval/meta_reference/config.py +23 -0
- llama_stack/providers/inline/eval/meta_reference/eval.py +259 -0
- llama_stack/providers/inline/files/localfs/__init__.py +20 -0
- llama_stack/providers/inline/files/localfs/config.py +31 -0
- llama_stack/providers/inline/files/localfs/files.py +219 -0
- llama_stack/providers/inline/inference/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/__init__.py +4 -4
- llama_stack/providers/inline/inference/meta_reference/common.py +24 -0
- llama_stack/providers/inline/inference/meta_reference/config.py +68 -0
- llama_stack/providers/inline/inference/meta_reference/generators.py +211 -0
- llama_stack/providers/inline/inference/meta_reference/inference.py +158 -0
- llama_stack/providers/inline/inference/meta_reference/model_parallel.py +96 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/parallel_utils.py +56 -73
- llama_stack/providers/inline/inference/sentence_transformers/__init__.py +22 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/inference/sentence_transformers}/config.py +6 -4
- llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +83 -0
- llama_stack/providers/inline/post_training/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/utils.py +35 -0
- llama_stack/providers/inline/post_training/common/validator.py +36 -0
- llama_stack/providers/inline/post_training/huggingface/__init__.py +27 -0
- llama_stack/providers/inline/post_training/huggingface/config.py +83 -0
- llama_stack/providers/inline/post_training/huggingface/post_training.py +208 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +519 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +485 -0
- llama_stack/providers/inline/post_training/huggingface/utils.py +269 -0
- llama_stack/providers/inline/post_training/torchtune/__init__.py +27 -0
- llama_stack/providers/inline/post_training/torchtune/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +240 -0
- llama_stack/providers/inline/post_training/torchtune/common/utils.py +99 -0
- llama_stack/providers/inline/post_training/torchtune/config.py +20 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +57 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/sft.py +78 -0
- llama_stack/providers/inline/post_training/torchtune/post_training.py +178 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +588 -0
- llama_stack/providers/inline/safety/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/codeshield → inline/safety/code_scanner}/__init__.py +4 -2
- llama_stack/providers/inline/safety/code_scanner/code_scanner.py +128 -0
- llama_stack/providers/{impls/meta_reference/memory → inline/safety/code_scanner}/config.py +5 -3
- llama_stack/providers/inline/safety/llama_guard/__init__.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/config.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/llama_guard.py +489 -0
- llama_stack/providers/{adapters/memory/sample → inline/safety/prompt_guard}/__init__.py +4 -4
- llama_stack/providers/inline/safety/prompt_guard/config.py +32 -0
- llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +131 -0
- llama_stack/providers/inline/scoring/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/__init__.py +25 -0
- llama_stack/providers/{adapters/memory/weaviate → inline/scoring/basic}/config.py +5 -7
- llama_stack/providers/inline/scoring/basic/scoring.py +126 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +240 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +41 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/docvqa.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/equality.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/ifeval.py +23 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_math_response.py +27 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py +71 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/subset_of.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +80 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +66 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +58 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +38 -0
- llama_stack/providers/inline/scoring/basic/utils/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py +3319 -0
- llama_stack/providers/inline/scoring/basic/utils/math_utils.py +330 -0
- llama_stack/providers/inline/scoring/braintrust/__init__.py +27 -0
- llama_stack/providers/inline/scoring/braintrust/braintrust.py +230 -0
- llama_stack/providers/inline/scoring/braintrust/config.py +21 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_correctness.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_relevancy.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_similarity.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_entity_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_precision.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_relevancy.py +23 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/factuality.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/faithfulness.py +24 -0
- llama_stack/providers/inline/scoring/llm_as_judge/__init__.py +21 -0
- llama_stack/providers/inline/scoring/llm_as_judge/config.py +14 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +113 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +96 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +20 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +81 -0
- llama_stack/providers/inline/telemetry/__init__.py +5 -0
- llama_stack/providers/inline/telemetry/meta_reference/__init__.py +21 -0
- llama_stack/providers/inline/telemetry/meta_reference/config.py +47 -0
- llama_stack/providers/inline/telemetry/meta_reference/telemetry.py +252 -0
- llama_stack/providers/inline/tool_runtime/__init__.py +5 -0
- llama_stack/providers/inline/tool_runtime/rag/__init__.py +19 -0
- llama_stack/providers/{impls/meta_reference/telemetry → inline/tool_runtime/rag}/config.py +5 -3
- llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +77 -0
- llama_stack/providers/inline/tool_runtime/rag/memory.py +332 -0
- llama_stack/providers/inline/vector_io/__init__.py +5 -0
- llama_stack/providers/inline/vector_io/chroma/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/chroma/config.py +30 -0
- llama_stack/providers/inline/vector_io/faiss/__init__.py +21 -0
- llama_stack/providers/inline/vector_io/faiss/config.py +26 -0
- llama_stack/providers/inline/vector_io/faiss/faiss.py +293 -0
- llama_stack/providers/inline/vector_io/milvus/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/milvus/config.py +29 -0
- llama_stack/providers/inline/vector_io/qdrant/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/qdrant/config.py +29 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/config.py +26 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +483 -0
- llama_stack/providers/registry/agents.py +16 -18
- llama_stack/providers/registry/batches.py +26 -0
- llama_stack/providers/registry/datasetio.py +49 -0
- llama_stack/providers/registry/eval.py +46 -0
- llama_stack/providers/registry/files.py +31 -0
- llama_stack/providers/registry/inference.py +273 -118
- llama_stack/providers/registry/post_training.py +69 -0
- llama_stack/providers/registry/safety.py +46 -41
- llama_stack/providers/registry/scoring.py +51 -0
- llama_stack/providers/registry/tool_runtime.py +87 -0
- llama_stack/providers/registry/vector_io.py +828 -0
- llama_stack/providers/remote/__init__.py +5 -0
- llama_stack/providers/remote/agents/__init__.py +5 -0
- llama_stack/providers/remote/datasetio/__init__.py +5 -0
- llama_stack/providers/{adapters/memory/chroma → remote/datasetio/huggingface}/__init__.py +7 -4
- llama_stack/providers/remote/datasetio/huggingface/config.py +23 -0
- llama_stack/providers/remote/datasetio/huggingface/huggingface.py +99 -0
- llama_stack/providers/remote/datasetio/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/datasetio/nvidia/config.py +61 -0
- llama_stack/providers/remote/datasetio/nvidia/datasetio.py +116 -0
- llama_stack/providers/remote/eval/__init__.py +5 -0
- llama_stack/providers/remote/eval/nvidia/__init__.py +31 -0
- llama_stack/providers/remote/eval/nvidia/config.py +29 -0
- llama_stack/providers/remote/eval/nvidia/eval.py +162 -0
- llama_stack/providers/remote/files/s3/__init__.py +19 -0
- llama_stack/providers/remote/files/s3/config.py +42 -0
- llama_stack/providers/remote/files/s3/files.py +313 -0
- llama_stack/providers/remote/inference/__init__.py +5 -0
- llama_stack/providers/{adapters/safety/sample → remote/inference/anthropic}/__init__.py +4 -6
- llama_stack/providers/remote/inference/anthropic/anthropic.py +36 -0
- llama_stack/providers/remote/inference/anthropic/config.py +28 -0
- llama_stack/providers/{impls/meta_reference/telemetry → remote/inference/azure}/__init__.py +4 -4
- llama_stack/providers/remote/inference/azure/azure.py +25 -0
- llama_stack/providers/remote/inference/azure/config.py +61 -0
- llama_stack/providers/{adapters → remote}/inference/bedrock/__init__.py +18 -17
- llama_stack/providers/remote/inference/bedrock/bedrock.py +142 -0
- llama_stack/providers/{adapters/inference/sample → remote/inference/bedrock}/config.py +3 -4
- llama_stack/providers/remote/inference/bedrock/models.py +29 -0
- llama_stack/providers/remote/inference/cerebras/__init__.py +19 -0
- llama_stack/providers/remote/inference/cerebras/cerebras.py +28 -0
- llama_stack/providers/remote/inference/cerebras/config.py +30 -0
- llama_stack/providers/{adapters → remote}/inference/databricks/__init__.py +4 -5
- llama_stack/providers/remote/inference/databricks/config.py +37 -0
- llama_stack/providers/remote/inference/databricks/databricks.py +44 -0
- llama_stack/providers/{adapters → remote}/inference/fireworks/__init__.py +8 -4
- llama_stack/providers/remote/inference/fireworks/config.py +27 -0
- llama_stack/providers/remote/inference/fireworks/fireworks.py +27 -0
- llama_stack/providers/{adapters/memory/pgvector → remote/inference/gemini}/__init__.py +4 -4
- llama_stack/providers/remote/inference/gemini/config.py +28 -0
- llama_stack/providers/remote/inference/gemini/gemini.py +82 -0
- llama_stack/providers/remote/inference/groq/__init__.py +15 -0
- llama_stack/providers/remote/inference/groq/config.py +34 -0
- llama_stack/providers/remote/inference/groq/groq.py +18 -0
- llama_stack/providers/remote/inference/llama_openai_compat/__init__.py +15 -0
- llama_stack/providers/remote/inference/llama_openai_compat/config.py +34 -0
- llama_stack/providers/remote/inference/llama_openai_compat/llama.py +46 -0
- llama_stack/providers/remote/inference/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/inference/nvidia/config.py +64 -0
- llama_stack/providers/remote/inference/nvidia/nvidia.py +61 -0
- llama_stack/providers/{adapters/safety/sample/config.py → remote/inference/nvidia/utils.py} +3 -4
- llama_stack/providers/{impls/vllm → remote/inference/ollama}/__init__.py +4 -6
- llama_stack/providers/remote/inference/ollama/config.py +25 -0
- llama_stack/providers/remote/inference/ollama/ollama.py +102 -0
- llama_stack/providers/{adapters/telemetry/opentelemetry → remote/inference/openai}/__init__.py +4 -4
- llama_stack/providers/remote/inference/openai/config.py +39 -0
- llama_stack/providers/remote/inference/openai/openai.py +38 -0
- llama_stack/providers/remote/inference/passthrough/__init__.py +23 -0
- llama_stack/providers/remote/inference/passthrough/config.py +34 -0
- llama_stack/providers/remote/inference/passthrough/passthrough.py +122 -0
- llama_stack/providers/remote/inference/runpod/__init__.py +16 -0
- llama_stack/providers/remote/inference/runpod/config.py +32 -0
- llama_stack/providers/remote/inference/runpod/runpod.py +42 -0
- llama_stack/providers/remote/inference/sambanova/__init__.py +16 -0
- llama_stack/providers/remote/inference/sambanova/config.py +34 -0
- llama_stack/providers/remote/inference/sambanova/sambanova.py +28 -0
- llama_stack/providers/{adapters → remote}/inference/tgi/__init__.py +3 -4
- llama_stack/providers/remote/inference/tgi/config.py +76 -0
- llama_stack/providers/remote/inference/tgi/tgi.py +85 -0
- llama_stack/providers/{adapters → remote}/inference/together/__init__.py +8 -4
- llama_stack/providers/remote/inference/together/config.py +27 -0
- llama_stack/providers/remote/inference/together/together.py +102 -0
- llama_stack/providers/remote/inference/vertexai/__init__.py +15 -0
- llama_stack/providers/remote/inference/vertexai/config.py +48 -0
- llama_stack/providers/remote/inference/vertexai/vertexai.py +54 -0
- llama_stack/providers/remote/inference/vllm/__init__.py +22 -0
- llama_stack/providers/remote/inference/vllm/config.py +59 -0
- llama_stack/providers/remote/inference/vllm/vllm.py +111 -0
- llama_stack/providers/remote/inference/watsonx/__init__.py +15 -0
- llama_stack/providers/remote/inference/watsonx/config.py +45 -0
- llama_stack/providers/remote/inference/watsonx/watsonx.py +336 -0
- llama_stack/providers/remote/post_training/__init__.py +5 -0
- llama_stack/providers/remote/post_training/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/post_training/nvidia/config.py +113 -0
- llama_stack/providers/remote/post_training/nvidia/models.py +27 -0
- llama_stack/providers/remote/post_training/nvidia/post_training.py +430 -0
- llama_stack/providers/remote/post_training/nvidia/utils.py +63 -0
- llama_stack/providers/remote/safety/__init__.py +5 -0
- llama_stack/providers/remote/safety/bedrock/bedrock.py +111 -0
- llama_stack/providers/remote/safety/bedrock/config.py +14 -0
- llama_stack/providers/{adapters/inference/sample → remote/safety/nvidia}/__init__.py +5 -4
- llama_stack/providers/remote/safety/nvidia/config.py +40 -0
- llama_stack/providers/remote/safety/nvidia/nvidia.py +161 -0
- llama_stack/providers/{adapters/agents/sample → remote/safety/sambanova}/__init__.py +5 -4
- llama_stack/providers/remote/safety/sambanova/config.py +37 -0
- llama_stack/providers/remote/safety/sambanova/sambanova.py +98 -0
- llama_stack/providers/remote/tool_runtime/__init__.py +5 -0
- llama_stack/providers/remote/tool_runtime/bing_search/__init__.py +21 -0
- llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +112 -0
- llama_stack/providers/remote/tool_runtime/bing_search/config.py +22 -0
- llama_stack/providers/remote/tool_runtime/brave_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +148 -0
- llama_stack/providers/remote/tool_runtime/brave_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py +15 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py +20 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +73 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +84 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/__init__.py +22 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py +21 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +140 -0
- llama_stack/providers/remote/vector_io/__init__.py +5 -0
- llama_stack/providers/remote/vector_io/chroma/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/chroma/chroma.py +215 -0
- llama_stack/providers/remote/vector_io/chroma/config.py +28 -0
- llama_stack/providers/remote/vector_io/milvus/__init__.py +18 -0
- llama_stack/providers/remote/vector_io/milvus/config.py +35 -0
- llama_stack/providers/remote/vector_io/milvus/milvus.py +375 -0
- llama_stack/providers/remote/vector_io/pgvector/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/pgvector/config.py +47 -0
- llama_stack/providers/remote/vector_io/pgvector/pgvector.py +460 -0
- llama_stack/providers/remote/vector_io/qdrant/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/qdrant/config.py +37 -0
- llama_stack/providers/remote/vector_io/qdrant/qdrant.py +265 -0
- llama_stack/providers/remote/vector_io/weaviate/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/weaviate/config.py +32 -0
- llama_stack/providers/remote/vector_io/weaviate/weaviate.py +393 -0
- llama_stack/providers/utils/bedrock/__init__.py +5 -0
- llama_stack/providers/utils/bedrock/client.py +74 -0
- llama_stack/providers/utils/bedrock/config.py +64 -0
- llama_stack/providers/utils/bedrock/refreshable_boto_session.py +112 -0
- llama_stack/providers/utils/common/__init__.py +5 -0
- llama_stack/providers/utils/common/data_schema_validator.py +103 -0
- llama_stack/providers/utils/datasetio/__init__.py +5 -0
- llama_stack/providers/utils/datasetio/url_utils.py +47 -0
- llama_stack/providers/utils/files/__init__.py +5 -0
- llama_stack/providers/utils/files/form_data.py +69 -0
- llama_stack/providers/utils/inference/__init__.py +8 -7
- llama_stack/providers/utils/inference/embedding_mixin.py +101 -0
- llama_stack/providers/utils/inference/inference_store.py +264 -0
- llama_stack/providers/utils/inference/litellm_openai_mixin.py +336 -0
- llama_stack/providers/utils/inference/model_registry.py +173 -23
- llama_stack/providers/utils/inference/openai_compat.py +1261 -49
- llama_stack/providers/utils/inference/openai_mixin.py +506 -0
- llama_stack/providers/utils/inference/prompt_adapter.py +365 -67
- llama_stack/providers/utils/kvstore/api.py +6 -6
- llama_stack/providers/utils/kvstore/config.py +28 -48
- llama_stack/providers/utils/kvstore/kvstore.py +61 -15
- llama_stack/providers/utils/kvstore/mongodb/__init__.py +9 -0
- llama_stack/providers/utils/kvstore/mongodb/mongodb.py +82 -0
- llama_stack/providers/utils/kvstore/postgres/__init__.py +7 -0
- llama_stack/providers/utils/kvstore/postgres/postgres.py +114 -0
- llama_stack/providers/utils/kvstore/redis/redis.py +33 -9
- llama_stack/providers/utils/kvstore/sqlite/config.py +2 -1
- llama_stack/providers/utils/kvstore/sqlite/sqlite.py +123 -22
- llama_stack/providers/utils/memory/file_utils.py +1 -1
- llama_stack/providers/utils/memory/openai_vector_store_mixin.py +1304 -0
- llama_stack/providers/utils/memory/vector_store.py +220 -82
- llama_stack/providers/utils/pagination.py +43 -0
- llama_stack/providers/utils/responses/__init__.py +5 -0
- llama_stack/providers/utils/responses/responses_store.py +292 -0
- llama_stack/providers/utils/scheduler.py +270 -0
- llama_stack/providers/utils/scoring/__init__.py +5 -0
- llama_stack/providers/utils/scoring/aggregation_utils.py +75 -0
- llama_stack/providers/utils/scoring/base_scoring_fn.py +114 -0
- llama_stack/providers/utils/scoring/basic_scoring_utils.py +26 -0
- llama_stack/providers/utils/sqlstore/__init__.py +5 -0
- llama_stack/providers/utils/sqlstore/api.py +128 -0
- llama_stack/providers/utils/sqlstore/authorized_sqlstore.py +319 -0
- llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py +343 -0
- llama_stack/providers/utils/sqlstore/sqlstore.py +70 -0
- llama_stack/providers/utils/telemetry/trace_protocol.py +142 -0
- llama_stack/providers/utils/telemetry/tracing.py +192 -53
- llama_stack/providers/utils/tools/__init__.py +5 -0
- llama_stack/providers/utils/tools/mcp.py +148 -0
- llama_stack/providers/utils/tools/ttl_dict.py +70 -0
- llama_stack/providers/utils/vector_io/__init__.py +5 -0
- llama_stack/providers/utils/vector_io/vector_utils.py +156 -0
- llama_stack/schema_utils.py +118 -0
- llama_stack/strong_typing/__init__.py +19 -0
- llama_stack/strong_typing/auxiliary.py +228 -0
- llama_stack/strong_typing/classdef.py +440 -0
- llama_stack/strong_typing/core.py +46 -0
- llama_stack/strong_typing/deserializer.py +877 -0
- llama_stack/strong_typing/docstring.py +409 -0
- llama_stack/strong_typing/exception.py +23 -0
- llama_stack/strong_typing/inspection.py +1085 -0
- llama_stack/strong_typing/mapping.py +40 -0
- llama_stack/strong_typing/name.py +182 -0
- llama_stack/strong_typing/py.typed +0 -0
- llama_stack/strong_typing/schema.py +792 -0
- llama_stack/strong_typing/serialization.py +97 -0
- llama_stack/strong_typing/serializer.py +500 -0
- llama_stack/strong_typing/slots.py +27 -0
- llama_stack/strong_typing/topological.py +89 -0
- llama_stack/testing/__init__.py +5 -0
- llama_stack/testing/api_recorder.py +956 -0
- llama_stack/ui/node_modules/flatted/python/flatted.py +149 -0
- llama_stack-0.3.4.dist-info/METADATA +261 -0
- llama_stack-0.3.4.dist-info/RECORD +625 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/WHEEL +1 -1
- llama_stack/apis/agents/client.py +0 -292
- llama_stack/apis/agents/event_logger.py +0 -184
- llama_stack/apis/batch_inference/batch_inference.py +0 -72
- llama_stack/apis/common/deployment_types.py +0 -31
- llama_stack/apis/dataset/dataset.py +0 -63
- llama_stack/apis/evals/evals.py +0 -122
- llama_stack/apis/inference/client.py +0 -197
- llama_stack/apis/inspect/client.py +0 -82
- llama_stack/apis/memory/client.py +0 -155
- llama_stack/apis/memory/memory.py +0 -65
- llama_stack/apis/memory_banks/__init__.py +0 -7
- llama_stack/apis/memory_banks/client.py +0 -101
- llama_stack/apis/memory_banks/memory_banks.py +0 -78
- llama_stack/apis/models/client.py +0 -83
- llama_stack/apis/reward_scoring/__init__.py +0 -7
- llama_stack/apis/reward_scoring/reward_scoring.py +0 -55
- llama_stack/apis/safety/client.py +0 -105
- llama_stack/apis/shields/client.py +0 -79
- llama_stack/cli/download.py +0 -340
- llama_stack/cli/model/describe.py +0 -82
- llama_stack/cli/model/download.py +0 -24
- llama_stack/cli/model/list.py +0 -62
- llama_stack/cli/model/model.py +0 -34
- llama_stack/cli/model/prompt_format.py +0 -112
- llama_stack/cli/model/safety_models.py +0 -52
- llama_stack/cli/stack/build.py +0 -299
- llama_stack/cli/stack/configure.py +0 -178
- llama_stack/distribution/build.py +0 -123
- llama_stack/distribution/build_conda_env.sh +0 -136
- llama_stack/distribution/build_container.sh +0 -142
- llama_stack/distribution/common.sh +0 -40
- llama_stack/distribution/configure_container.sh +0 -47
- llama_stack/distribution/datatypes.py +0 -139
- llama_stack/distribution/distribution.py +0 -58
- llama_stack/distribution/inspect.py +0 -67
- llama_stack/distribution/request_headers.py +0 -57
- llama_stack/distribution/resolver.py +0 -323
- llama_stack/distribution/routers/__init__.py +0 -48
- llama_stack/distribution/routers/routers.py +0 -158
- llama_stack/distribution/routers/routing_tables.py +0 -173
- llama_stack/distribution/server/endpoints.py +0 -48
- llama_stack/distribution/server/server.py +0 -343
- llama_stack/distribution/start_conda_env.sh +0 -42
- llama_stack/distribution/start_container.sh +0 -64
- llama_stack/distribution/templates/local-bedrock-conda-example-build.yaml +0 -10
- llama_stack/distribution/templates/local-build.yaml +0 -10
- llama_stack/distribution/templates/local-databricks-build.yaml +0 -10
- llama_stack/distribution/templates/local-fireworks-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-endpoint-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-serverless-build.yaml +0 -10
- llama_stack/distribution/templates/local-ollama-build.yaml +0 -10
- llama_stack/distribution/templates/local-tgi-build.yaml +0 -10
- llama_stack/distribution/templates/local-together-build.yaml +0 -10
- llama_stack/distribution/templates/local-vllm-build.yaml +0 -10
- llama_stack/distribution/utils/exec.py +0 -105
- llama_stack/providers/adapters/agents/sample/sample.py +0 -18
- llama_stack/providers/adapters/inference/bedrock/bedrock.py +0 -451
- llama_stack/providers/adapters/inference/bedrock/config.py +0 -55
- llama_stack/providers/adapters/inference/databricks/config.py +0 -21
- llama_stack/providers/adapters/inference/databricks/databricks.py +0 -125
- llama_stack/providers/adapters/inference/fireworks/config.py +0 -20
- llama_stack/providers/adapters/inference/fireworks/fireworks.py +0 -130
- llama_stack/providers/adapters/inference/ollama/__init__.py +0 -19
- llama_stack/providers/adapters/inference/ollama/ollama.py +0 -175
- llama_stack/providers/adapters/inference/sample/sample.py +0 -23
- llama_stack/providers/adapters/inference/tgi/config.py +0 -43
- llama_stack/providers/adapters/inference/tgi/tgi.py +0 -200
- llama_stack/providers/adapters/inference/together/config.py +0 -22
- llama_stack/providers/adapters/inference/together/together.py +0 -143
- llama_stack/providers/adapters/memory/chroma/chroma.py +0 -157
- llama_stack/providers/adapters/memory/pgvector/config.py +0 -17
- llama_stack/providers/adapters/memory/pgvector/pgvector.py +0 -211
- llama_stack/providers/adapters/memory/sample/sample.py +0 -23
- llama_stack/providers/adapters/memory/weaviate/__init__.py +0 -15
- llama_stack/providers/adapters/memory/weaviate/weaviate.py +0 -190
- llama_stack/providers/adapters/safety/bedrock/bedrock.py +0 -113
- llama_stack/providers/adapters/safety/bedrock/config.py +0 -16
- llama_stack/providers/adapters/safety/sample/sample.py +0 -23
- llama_stack/providers/adapters/safety/together/__init__.py +0 -18
- llama_stack/providers/adapters/safety/together/config.py +0 -26
- llama_stack/providers/adapters/safety/together/together.py +0 -101
- llama_stack/providers/adapters/telemetry/opentelemetry/config.py +0 -12
- llama_stack/providers/adapters/telemetry/opentelemetry/opentelemetry.py +0 -201
- llama_stack/providers/adapters/telemetry/sample/__init__.py +0 -17
- llama_stack/providers/adapters/telemetry/sample/config.py +0 -12
- llama_stack/providers/adapters/telemetry/sample/sample.py +0 -18
- llama_stack/providers/impls/meta_reference/agents/agent_instance.py +0 -844
- llama_stack/providers/impls/meta_reference/agents/agents.py +0 -161
- llama_stack/providers/impls/meta_reference/agents/persistence.py +0 -84
- llama_stack/providers/impls/meta_reference/agents/rag/context_retriever.py +0 -74
- llama_stack/providers/impls/meta_reference/agents/safety.py +0 -57
- llama_stack/providers/impls/meta_reference/agents/tests/code_execution.py +0 -93
- llama_stack/providers/impls/meta_reference/agents/tests/test_chat_agent.py +0 -305
- llama_stack/providers/impls/meta_reference/agents/tools/base.py +0 -20
- llama_stack/providers/impls/meta_reference/agents/tools/builtin.py +0 -375
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_env_prefix.py +0 -133
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_execution.py +0 -256
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/matplotlib_custom_backend.py +0 -87
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/utils.py +0 -21
- llama_stack/providers/impls/meta_reference/agents/tools/safety.py +0 -43
- llama_stack/providers/impls/meta_reference/codeshield/code_scanner.py +0 -58
- llama_stack/providers/impls/meta_reference/inference/config.py +0 -45
- llama_stack/providers/impls/meta_reference/inference/generation.py +0 -376
- llama_stack/providers/impls/meta_reference/inference/inference.py +0 -280
- llama_stack/providers/impls/meta_reference/inference/model_parallel.py +0 -99
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_impls.py +0 -184
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_txest_disabled.py +0 -76
- llama_stack/providers/impls/meta_reference/inference/quantization/loader.py +0 -97
- llama_stack/providers/impls/meta_reference/inference/quantization/scripts/quantize_checkpoint.py +0 -161
- llama_stack/providers/impls/meta_reference/memory/__init__.py +0 -19
- llama_stack/providers/impls/meta_reference/memory/faiss.py +0 -113
- llama_stack/providers/impls/meta_reference/safety/__init__.py +0 -17
- llama_stack/providers/impls/meta_reference/safety/base.py +0 -57
- llama_stack/providers/impls/meta_reference/safety/config.py +0 -48
- llama_stack/providers/impls/meta_reference/safety/llama_guard.py +0 -268
- llama_stack/providers/impls/meta_reference/safety/prompt_guard.py +0 -145
- llama_stack/providers/impls/meta_reference/safety/safety.py +0 -112
- llama_stack/providers/impls/meta_reference/telemetry/console.py +0 -89
- llama_stack/providers/impls/vllm/config.py +0 -35
- llama_stack/providers/impls/vllm/vllm.py +0 -241
- llama_stack/providers/registry/memory.py +0 -78
- llama_stack/providers/registry/telemetry.py +0 -44
- llama_stack/providers/tests/agents/test_agents.py +0 -210
- llama_stack/providers/tests/inference/test_inference.py +0 -257
- llama_stack/providers/tests/inference/test_prompt_adapter.py +0 -126
- llama_stack/providers/tests/memory/test_memory.py +0 -136
- llama_stack/providers/tests/resolver.py +0 -100
- llama_stack/providers/tests/safety/test_safety.py +0 -77
- llama_stack-0.0.42.dist-info/METADATA +0 -137
- llama_stack-0.0.42.dist-info/RECORD +0 -256
- /llama_stack/{distribution → core}/__init__.py +0 -0
- /llama_stack/{distribution/server → core/access_control}/__init__.py +0 -0
- /llama_stack/{distribution/utils → core/conversations}/__init__.py +0 -0
- /llama_stack/{providers/adapters → core/prompts}/__init__.py +0 -0
- /llama_stack/{providers/adapters/agents → core/routing_tables}/__init__.py +0 -0
- /llama_stack/{providers/adapters/inference → core/server}/__init__.py +0 -0
- /llama_stack/{providers/adapters/memory → core/storage}/__init__.py +0 -0
- /llama_stack/{providers/adapters/safety → core/ui}/__init__.py +0 -0
- /llama_stack/{providers/adapters/telemetry → core/ui/modules}/__init__.py +0 -0
- /llama_stack/{providers/impls → core/ui/page}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference → core/ui/page/distribution}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/rag → core/ui/page/evaluations}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tests → core/ui/page/playground}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools → core/utils}/__init__.py +0 -0
- /llama_stack/{distribution → core}/utils/dynamic.py +0 -0
- /llama_stack/{distribution → core}/utils/serialize.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools/ipython_tool → distributions}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization → models}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization/scripts → models/llama}/__init__.py +0 -0
- /llama_stack/{providers/tests → models/llama/llama3}/__init__.py +0 -0
- /llama_stack/{providers/tests/agents → models/llama/llama3/quantization}/__init__.py +0 -0
- /llama_stack/{providers/tests/inference → models/llama/llama3_2}/__init__.py +0 -0
- /llama_stack/{providers/tests/memory → models/llama/llama3_3}/__init__.py +0 -0
- /llama_stack/{providers/tests/safety → models/llama/llama4}/__init__.py +0 -0
- /llama_stack/{scripts → models/llama/llama4/prompt_templates}/__init__.py +0 -0
- /llama_stack/providers/{adapters → remote}/safety/bedrock/__init__.py +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/entry_points.txt +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info/licenses}/LICENSE +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
from enum import Enum
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from llama_stack.apis.common.type_system import (
|
|
11
|
+
ChatCompletionInputType,
|
|
12
|
+
CompletionInputType,
|
|
13
|
+
StringType,
|
|
14
|
+
)
|
|
15
|
+
from llama_stack.core.datatypes import Api
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ColumnName(Enum):
|
|
19
|
+
input_query = "input_query"
|
|
20
|
+
expected_answer = "expected_answer"
|
|
21
|
+
chat_completion_input = "chat_completion_input"
|
|
22
|
+
completion_input = "completion_input"
|
|
23
|
+
generated_answer = "generated_answer"
|
|
24
|
+
context = "context"
|
|
25
|
+
dialog = "dialog"
|
|
26
|
+
function = "function"
|
|
27
|
+
language = "language"
|
|
28
|
+
id = "id"
|
|
29
|
+
ground_truth = "ground_truth"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
VALID_SCHEMAS_FOR_SCORING = [
|
|
33
|
+
{
|
|
34
|
+
ColumnName.input_query.value: StringType(),
|
|
35
|
+
ColumnName.expected_answer.value: StringType(),
|
|
36
|
+
ColumnName.generated_answer.value: StringType(),
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
ColumnName.input_query.value: StringType(),
|
|
40
|
+
ColumnName.expected_answer.value: StringType(),
|
|
41
|
+
ColumnName.generated_answer.value: StringType(),
|
|
42
|
+
ColumnName.context.value: StringType(),
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
ColumnName.input_query.value: StringType(),
|
|
46
|
+
ColumnName.expected_answer.value: StringType(),
|
|
47
|
+
ColumnName.generated_answer.value: StringType(),
|
|
48
|
+
ColumnName.function.value: StringType(),
|
|
49
|
+
ColumnName.language.value: StringType(),
|
|
50
|
+
ColumnName.id.value: StringType(),
|
|
51
|
+
ColumnName.ground_truth.value: StringType(),
|
|
52
|
+
},
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
VALID_SCHEMAS_FOR_EVAL = [
|
|
56
|
+
{
|
|
57
|
+
ColumnName.input_query.value: StringType(),
|
|
58
|
+
ColumnName.expected_answer.value: StringType(),
|
|
59
|
+
ColumnName.chat_completion_input.value: ChatCompletionInputType(),
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
ColumnName.input_query.value: StringType(),
|
|
63
|
+
ColumnName.expected_answer.value: StringType(),
|
|
64
|
+
ColumnName.completion_input.value: CompletionInputType(),
|
|
65
|
+
},
|
|
66
|
+
{
|
|
67
|
+
ColumnName.input_query.value: StringType(),
|
|
68
|
+
ColumnName.expected_answer.value: StringType(),
|
|
69
|
+
ColumnName.generated_answer.value: StringType(),
|
|
70
|
+
ColumnName.function.value: StringType(),
|
|
71
|
+
ColumnName.language.value: StringType(),
|
|
72
|
+
ColumnName.id.value: StringType(),
|
|
73
|
+
ColumnName.ground_truth.value: StringType(),
|
|
74
|
+
},
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def get_valid_schemas(api_str: str):
|
|
79
|
+
if api_str == Api.scoring.value:
|
|
80
|
+
return VALID_SCHEMAS_FOR_SCORING
|
|
81
|
+
elif api_str == Api.eval.value:
|
|
82
|
+
return VALID_SCHEMAS_FOR_EVAL
|
|
83
|
+
else:
|
|
84
|
+
raise ValueError(f"Invalid API string: {api_str}")
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def validate_dataset_schema(
|
|
88
|
+
dataset_schema: dict[str, Any],
|
|
89
|
+
expected_schemas: list[dict[str, Any]],
|
|
90
|
+
):
|
|
91
|
+
if dataset_schema not in expected_schemas:
|
|
92
|
+
raise ValueError(f"Dataset {dataset_schema} does not have a correct input schema in {expected_schemas}")
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def validate_row_schema(
|
|
96
|
+
input_row: dict[str, Any],
|
|
97
|
+
expected_schemas: list[dict[str, Any]],
|
|
98
|
+
):
|
|
99
|
+
for schema in expected_schemas:
|
|
100
|
+
if all(key in input_row for key in schema):
|
|
101
|
+
return
|
|
102
|
+
|
|
103
|
+
raise ValueError(f"Input row {input_row} does not match any of the expected schemas in {expected_schemas}")
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import base64
|
|
9
|
+
import io
|
|
10
|
+
from urllib.parse import unquote
|
|
11
|
+
|
|
12
|
+
from llama_stack.providers.utils.memory.vector_store import parse_data_url
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def get_dataframe_from_uri(uri: str):
|
|
16
|
+
import pandas
|
|
17
|
+
|
|
18
|
+
df = None
|
|
19
|
+
if uri.endswith(".csv"):
|
|
20
|
+
# Moving to its own thread to avoid io from blocking the eventloop
|
|
21
|
+
# This isn't ideal as it moves more then just the IO to a new thread
|
|
22
|
+
# but it is as close as we can easly get
|
|
23
|
+
df = await asyncio.to_thread(pandas.read_csv, uri)
|
|
24
|
+
elif uri.endswith(".xlsx"):
|
|
25
|
+
df = await asyncio.to_thread(pandas.read_excel, uri)
|
|
26
|
+
elif uri.startswith("data:"):
|
|
27
|
+
parts = parse_data_url(uri)
|
|
28
|
+
data = parts["data"]
|
|
29
|
+
if parts["is_base64"]:
|
|
30
|
+
data = base64.b64decode(data)
|
|
31
|
+
else:
|
|
32
|
+
data = unquote(data)
|
|
33
|
+
encoding = parts["encoding"] or "utf-8"
|
|
34
|
+
data = data.encode(encoding)
|
|
35
|
+
|
|
36
|
+
mime_type = parts["mimetype"]
|
|
37
|
+
mime_category = mime_type.split("/")[0]
|
|
38
|
+
data_bytes = io.BytesIO(data)
|
|
39
|
+
|
|
40
|
+
if mime_category == "text":
|
|
41
|
+
df = pandas.read_csv(data_bytes)
|
|
42
|
+
else:
|
|
43
|
+
df = pandas.read_excel(data_bytes)
|
|
44
|
+
else:
|
|
45
|
+
raise ValueError(f"Unsupported file type: {uri}")
|
|
46
|
+
|
|
47
|
+
return df
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
from fastapi import Request
|
|
10
|
+
from pydantic import BaseModel, ValidationError
|
|
11
|
+
|
|
12
|
+
from llama_stack.apis.files import ExpiresAfter
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def parse_pydantic_from_form[T: BaseModel](request: Request, field_name: str, model_class: type[T]) -> T | None:
|
|
16
|
+
"""
|
|
17
|
+
Generic parser to extract a Pydantic model from multipart form data.
|
|
18
|
+
Handles both bracket notation (field[attr1], field[attr2]) and JSON string format.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
request: The FastAPI request object
|
|
22
|
+
field_name: The name of the field in the form data (e.g., "expires_after")
|
|
23
|
+
model_class: The Pydantic model class to parse into
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
An instance of model_class if parsing succeeds, None otherwise
|
|
27
|
+
|
|
28
|
+
Example:
|
|
29
|
+
expires_after = await parse_pydantic_from_form(
|
|
30
|
+
request, "expires_after", ExpiresAfter
|
|
31
|
+
)
|
|
32
|
+
"""
|
|
33
|
+
form = await request.form()
|
|
34
|
+
|
|
35
|
+
# Check for bracket notation first (e.g., expires_after[anchor], expires_after[seconds])
|
|
36
|
+
bracket_data = {}
|
|
37
|
+
prefix = f"{field_name}["
|
|
38
|
+
for key in form.keys():
|
|
39
|
+
if key.startswith(prefix) and key.endswith("]"):
|
|
40
|
+
# Extract the attribute name from field_name[attr]
|
|
41
|
+
attr = key[len(prefix) : -1]
|
|
42
|
+
bracket_data[attr] = form[key]
|
|
43
|
+
|
|
44
|
+
if bracket_data:
|
|
45
|
+
try:
|
|
46
|
+
return model_class(**bracket_data)
|
|
47
|
+
except (ValidationError, TypeError):
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
# Check for JSON string format
|
|
51
|
+
if field_name in form:
|
|
52
|
+
value = form[field_name]
|
|
53
|
+
if isinstance(value, str):
|
|
54
|
+
try:
|
|
55
|
+
data = json.loads(value)
|
|
56
|
+
return model_class(**data)
|
|
57
|
+
except (json.JSONDecodeError, TypeError, ValidationError):
|
|
58
|
+
pass
|
|
59
|
+
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
async def parse_expires_after(request: Request) -> ExpiresAfter | None:
|
|
64
|
+
"""
|
|
65
|
+
Dependency to parse expires_after from multipart form data.
|
|
66
|
+
Handles both bracket notation (expires_after[anchor], expires_after[seconds])
|
|
67
|
+
and JSON string format.
|
|
68
|
+
"""
|
|
69
|
+
return await parse_pydantic_from_form(request, "expires_after", ExpiresAfter)
|
|
@@ -4,10 +4,8 @@
|
|
|
4
4
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
5
|
# the root directory of this source tree.
|
|
6
6
|
|
|
7
|
-
from
|
|
8
|
-
|
|
9
|
-
from llama_models.datatypes import * # noqa: F403
|
|
10
|
-
from llama_models.sku_list import all_registered_models
|
|
7
|
+
from llama_stack.models.llama.sku_list import all_registered_models
|
|
8
|
+
from llama_stack.models.llama.sku_types import * # noqa: F403
|
|
11
9
|
|
|
12
10
|
|
|
13
11
|
def is_supported_safety_model(model: Model) -> bool:
|
|
@@ -22,12 +20,15 @@ def is_supported_safety_model(model: Model) -> bool:
|
|
|
22
20
|
]
|
|
23
21
|
|
|
24
22
|
|
|
25
|
-
def supported_inference_models() ->
|
|
23
|
+
def supported_inference_models() -> list[Model]:
|
|
26
24
|
return [
|
|
27
|
-
m
|
|
25
|
+
m
|
|
28
26
|
for m in all_registered_models()
|
|
29
27
|
if (
|
|
30
|
-
m.model_family in {ModelFamily.llama3_1, ModelFamily.llama3_2}
|
|
28
|
+
m.model_family in {ModelFamily.llama3_1, ModelFamily.llama3_2, ModelFamily.llama3_3, ModelFamily.llama4}
|
|
31
29
|
or is_supported_safety_model(m)
|
|
32
30
|
)
|
|
33
31
|
]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR = {m.huggingface_repo: m.descriptor() for m in all_registered_models()}
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import base64
|
|
9
|
+
import platform
|
|
10
|
+
import struct
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
import torch
|
|
14
|
+
|
|
15
|
+
from llama_stack.log import get_logger
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from sentence_transformers import SentenceTransformer
|
|
19
|
+
|
|
20
|
+
from llama_stack.apis.inference import (
|
|
21
|
+
ModelStore,
|
|
22
|
+
OpenAIEmbeddingData,
|
|
23
|
+
OpenAIEmbeddingsRequestWithExtraBody,
|
|
24
|
+
OpenAIEmbeddingsResponse,
|
|
25
|
+
OpenAIEmbeddingUsage,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
EMBEDDING_MODELS = {}
|
|
29
|
+
|
|
30
|
+
DARWIN = "Darwin"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
log = get_logger(name=__name__, category="providers::utils")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class SentenceTransformerEmbeddingMixin:
|
|
37
|
+
model_store: ModelStore
|
|
38
|
+
|
|
39
|
+
async def openai_embeddings(
|
|
40
|
+
self,
|
|
41
|
+
params: OpenAIEmbeddingsRequestWithExtraBody,
|
|
42
|
+
) -> OpenAIEmbeddingsResponse:
|
|
43
|
+
# Convert input to list format if it's a single string
|
|
44
|
+
input_list = [params.input] if isinstance(params.input, str) else params.input
|
|
45
|
+
if not input_list:
|
|
46
|
+
raise ValueError("Empty list not supported")
|
|
47
|
+
|
|
48
|
+
# Get the model and generate embeddings
|
|
49
|
+
embedding_model = await self._load_sentence_transformer_model(params.model)
|
|
50
|
+
embeddings = await asyncio.to_thread(embedding_model.encode, input_list, show_progress_bar=False)
|
|
51
|
+
|
|
52
|
+
# Convert embeddings to the requested format
|
|
53
|
+
data = []
|
|
54
|
+
for i, embedding in enumerate(embeddings):
|
|
55
|
+
if params.encoding_format == "base64":
|
|
56
|
+
# Convert float array to base64 string
|
|
57
|
+
float_bytes = struct.pack(f"{len(embedding)}f", *embedding)
|
|
58
|
+
embedding_value = base64.b64encode(float_bytes).decode("ascii")
|
|
59
|
+
else:
|
|
60
|
+
# Default to float format
|
|
61
|
+
embedding_value = embedding.tolist()
|
|
62
|
+
|
|
63
|
+
data.append(
|
|
64
|
+
OpenAIEmbeddingData(
|
|
65
|
+
embedding=embedding_value,
|
|
66
|
+
index=i,
|
|
67
|
+
)
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Not returning actual token usage
|
|
71
|
+
usage = OpenAIEmbeddingUsage(prompt_tokens=-1, total_tokens=-1)
|
|
72
|
+
return OpenAIEmbeddingsResponse(
|
|
73
|
+
data=data,
|
|
74
|
+
model=params.model,
|
|
75
|
+
usage=usage,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
async def _load_sentence_transformer_model(self, model: str) -> "SentenceTransformer":
|
|
79
|
+
global EMBEDDING_MODELS
|
|
80
|
+
|
|
81
|
+
loaded_model = EMBEDDING_MODELS.get(model)
|
|
82
|
+
if loaded_model is not None:
|
|
83
|
+
return loaded_model
|
|
84
|
+
|
|
85
|
+
log.info(f"Loading sentence transformer for {model}...")
|
|
86
|
+
|
|
87
|
+
def _load_model():
|
|
88
|
+
from sentence_transformers import SentenceTransformer
|
|
89
|
+
|
|
90
|
+
platform_name = platform.system()
|
|
91
|
+
if platform_name == DARWIN:
|
|
92
|
+
# PyTorch's OpenMP kernels can segfault on macOS when spawned from background
|
|
93
|
+
# threads with the default parallel settings, so force a single-threaded CPU run.
|
|
94
|
+
log.debug(f"Constraining torch threads on {platform_name} to a single worker")
|
|
95
|
+
torch.set_num_threads(1)
|
|
96
|
+
|
|
97
|
+
return SentenceTransformer(model, trust_remote_code=True)
|
|
98
|
+
|
|
99
|
+
loaded_model = await asyncio.to_thread(_load_model)
|
|
100
|
+
EMBEDDING_MODELS[model] = loaded_model
|
|
101
|
+
return loaded_model
|
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
import asyncio
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from sqlalchemy.exc import IntegrityError
|
|
10
|
+
|
|
11
|
+
from llama_stack.apis.inference import (
|
|
12
|
+
ListOpenAIChatCompletionResponse,
|
|
13
|
+
OpenAIChatCompletion,
|
|
14
|
+
OpenAICompletionWithInputMessages,
|
|
15
|
+
OpenAIMessageParam,
|
|
16
|
+
Order,
|
|
17
|
+
)
|
|
18
|
+
from llama_stack.core.datatypes import AccessRule
|
|
19
|
+
from llama_stack.core.storage.datatypes import InferenceStoreReference, StorageBackendType
|
|
20
|
+
from llama_stack.log import get_logger
|
|
21
|
+
|
|
22
|
+
from ..sqlstore.api import ColumnDefinition, ColumnType
|
|
23
|
+
from ..sqlstore.authorized_sqlstore import AuthorizedSqlStore
|
|
24
|
+
from ..sqlstore.sqlstore import _SQLSTORE_BACKENDS, sqlstore_impl
|
|
25
|
+
|
|
26
|
+
logger = get_logger(name=__name__, category="inference")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class InferenceStore:
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
reference: InferenceStoreReference,
|
|
33
|
+
policy: list[AccessRule],
|
|
34
|
+
):
|
|
35
|
+
self.reference = reference
|
|
36
|
+
self.sql_store = None
|
|
37
|
+
self.policy = policy
|
|
38
|
+
self.enable_write_queue = True
|
|
39
|
+
|
|
40
|
+
# Async write queue and worker control
|
|
41
|
+
self._queue: asyncio.Queue[tuple[OpenAIChatCompletion, list[OpenAIMessageParam]]] | None = None
|
|
42
|
+
self._worker_tasks: list[asyncio.Task[Any]] = []
|
|
43
|
+
self._max_write_queue_size: int = reference.max_write_queue_size
|
|
44
|
+
self._num_writers: int = max(1, reference.num_writers)
|
|
45
|
+
|
|
46
|
+
async def initialize(self):
|
|
47
|
+
"""Create the necessary tables if they don't exist."""
|
|
48
|
+
base_store = sqlstore_impl(self.reference)
|
|
49
|
+
self.sql_store = AuthorizedSqlStore(base_store, self.policy)
|
|
50
|
+
|
|
51
|
+
# Disable write queue for SQLite since WAL mode handles concurrency
|
|
52
|
+
# Keep it enabled for other backends (like Postgres) for performance
|
|
53
|
+
backend_config = _SQLSTORE_BACKENDS.get(self.reference.backend)
|
|
54
|
+
if backend_config and backend_config.type == StorageBackendType.SQL_SQLITE:
|
|
55
|
+
self.enable_write_queue = False
|
|
56
|
+
logger.debug("Write queue disabled for SQLite (WAL mode handles concurrency)")
|
|
57
|
+
|
|
58
|
+
await self.sql_store.create_table(
|
|
59
|
+
"chat_completions",
|
|
60
|
+
{
|
|
61
|
+
"id": ColumnDefinition(type=ColumnType.STRING, primary_key=True),
|
|
62
|
+
"created": ColumnType.INTEGER,
|
|
63
|
+
"model": ColumnType.STRING,
|
|
64
|
+
"choices": ColumnType.JSON,
|
|
65
|
+
"input_messages": ColumnType.JSON,
|
|
66
|
+
},
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
if self.enable_write_queue:
|
|
70
|
+
self._queue = asyncio.Queue(maxsize=self._max_write_queue_size)
|
|
71
|
+
for _ in range(self._num_writers):
|
|
72
|
+
self._worker_tasks.append(asyncio.create_task(self._worker_loop()))
|
|
73
|
+
logger.debug(
|
|
74
|
+
f"Inference store write queue enabled with {self._num_writers} writers, max queue size {self._max_write_queue_size}"
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
async def shutdown(self) -> None:
|
|
78
|
+
if not self._worker_tasks:
|
|
79
|
+
return
|
|
80
|
+
if self._queue is not None:
|
|
81
|
+
await self._queue.join()
|
|
82
|
+
for t in self._worker_tasks:
|
|
83
|
+
if not t.done():
|
|
84
|
+
t.cancel()
|
|
85
|
+
for t in self._worker_tasks:
|
|
86
|
+
try:
|
|
87
|
+
await t
|
|
88
|
+
except asyncio.CancelledError:
|
|
89
|
+
pass
|
|
90
|
+
self._worker_tasks.clear()
|
|
91
|
+
|
|
92
|
+
async def flush(self) -> None:
|
|
93
|
+
"""Wait for all queued writes to complete. Useful for testing."""
|
|
94
|
+
if self.enable_write_queue and self._queue is not None:
|
|
95
|
+
await self._queue.join()
|
|
96
|
+
|
|
97
|
+
async def _ensure_workers_started(self) -> None:
|
|
98
|
+
"""Ensure the async write queue workers run on the current loop."""
|
|
99
|
+
if not self.enable_write_queue:
|
|
100
|
+
return
|
|
101
|
+
|
|
102
|
+
if self._queue is None:
|
|
103
|
+
self._queue = asyncio.Queue(maxsize=self._max_write_queue_size)
|
|
104
|
+
logger.debug(
|
|
105
|
+
f"Inference store write queue created with max size {self._max_write_queue_size} "
|
|
106
|
+
f"and {self._num_writers} writers"
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
if not self._worker_tasks:
|
|
110
|
+
loop = asyncio.get_running_loop()
|
|
111
|
+
for _ in range(self._num_writers):
|
|
112
|
+
task = loop.create_task(self._worker_loop())
|
|
113
|
+
self._worker_tasks.append(task)
|
|
114
|
+
|
|
115
|
+
async def store_chat_completion(
|
|
116
|
+
self, chat_completion: OpenAIChatCompletion, input_messages: list[OpenAIMessageParam]
|
|
117
|
+
) -> None:
|
|
118
|
+
if self.enable_write_queue:
|
|
119
|
+
await self._ensure_workers_started()
|
|
120
|
+
if self._queue is None:
|
|
121
|
+
raise ValueError("Inference store is not initialized")
|
|
122
|
+
try:
|
|
123
|
+
self._queue.put_nowait((chat_completion, input_messages))
|
|
124
|
+
except asyncio.QueueFull:
|
|
125
|
+
logger.warning(
|
|
126
|
+
f"Write queue full; adding chat completion id={getattr(chat_completion, 'id', '<unknown>')}"
|
|
127
|
+
)
|
|
128
|
+
await self._queue.put((chat_completion, input_messages))
|
|
129
|
+
else:
|
|
130
|
+
await self._write_chat_completion(chat_completion, input_messages)
|
|
131
|
+
|
|
132
|
+
async def _worker_loop(self) -> None:
|
|
133
|
+
assert self._queue is not None
|
|
134
|
+
while True:
|
|
135
|
+
try:
|
|
136
|
+
item = await self._queue.get()
|
|
137
|
+
except asyncio.CancelledError:
|
|
138
|
+
break
|
|
139
|
+
chat_completion, input_messages = item
|
|
140
|
+
try:
|
|
141
|
+
await self._write_chat_completion(chat_completion, input_messages)
|
|
142
|
+
except Exception as e: # noqa: BLE001
|
|
143
|
+
logger.error(f"Error writing chat completion: {e}")
|
|
144
|
+
finally:
|
|
145
|
+
self._queue.task_done()
|
|
146
|
+
|
|
147
|
+
async def _write_chat_completion(
|
|
148
|
+
self, chat_completion: OpenAIChatCompletion, input_messages: list[OpenAIMessageParam]
|
|
149
|
+
) -> None:
|
|
150
|
+
if self.sql_store is None:
|
|
151
|
+
raise ValueError("Inference store is not initialized")
|
|
152
|
+
|
|
153
|
+
data = chat_completion.model_dump()
|
|
154
|
+
record_data = {
|
|
155
|
+
"id": data["id"],
|
|
156
|
+
"created": data["created"],
|
|
157
|
+
"model": data["model"],
|
|
158
|
+
"choices": data["choices"],
|
|
159
|
+
"input_messages": [message.model_dump() for message in input_messages],
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
try:
|
|
163
|
+
await self.sql_store.insert(
|
|
164
|
+
table="chat_completions",
|
|
165
|
+
data=record_data,
|
|
166
|
+
)
|
|
167
|
+
except IntegrityError as e:
|
|
168
|
+
# Duplicate chat completion IDs can be generated during tests especially if they are replaying
|
|
169
|
+
# recorded responses across different tests. No need to warn or error under those circumstances.
|
|
170
|
+
# In the wild, this is not likely to happen at all (no evidence) so we aren't really hiding any problem.
|
|
171
|
+
|
|
172
|
+
# Check if it's a unique constraint violation
|
|
173
|
+
error_message = str(e.orig) if e.orig else str(e)
|
|
174
|
+
if self._is_unique_constraint_error(error_message):
|
|
175
|
+
# Update the existing record instead
|
|
176
|
+
await self.sql_store.update(table="chat_completions", data=record_data, where={"id": data["id"]})
|
|
177
|
+
else:
|
|
178
|
+
# Re-raise if it's not a unique constraint error
|
|
179
|
+
raise
|
|
180
|
+
|
|
181
|
+
def _is_unique_constraint_error(self, error_message: str) -> bool:
|
|
182
|
+
"""Check if the error is specifically a unique constraint violation."""
|
|
183
|
+
error_lower = error_message.lower()
|
|
184
|
+
return any(
|
|
185
|
+
indicator in error_lower
|
|
186
|
+
for indicator in [
|
|
187
|
+
"unique constraint failed", # SQLite
|
|
188
|
+
"duplicate key", # PostgreSQL
|
|
189
|
+
"unique violation", # PostgreSQL alternative
|
|
190
|
+
"duplicate entry", # MySQL
|
|
191
|
+
]
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
async def list_chat_completions(
|
|
195
|
+
self,
|
|
196
|
+
after: str | None = None,
|
|
197
|
+
limit: int | None = 50,
|
|
198
|
+
model: str | None = None,
|
|
199
|
+
order: Order | None = Order.desc,
|
|
200
|
+
) -> ListOpenAIChatCompletionResponse:
|
|
201
|
+
"""
|
|
202
|
+
List chat completions from the database.
|
|
203
|
+
|
|
204
|
+
:param after: The ID of the last chat completion to return.
|
|
205
|
+
:param limit: The maximum number of chat completions to return.
|
|
206
|
+
:param model: The model to filter by.
|
|
207
|
+
:param order: The order to sort the chat completions by.
|
|
208
|
+
"""
|
|
209
|
+
if not self.sql_store:
|
|
210
|
+
raise ValueError("Inference store is not initialized")
|
|
211
|
+
|
|
212
|
+
if not order:
|
|
213
|
+
order = Order.desc
|
|
214
|
+
|
|
215
|
+
where_conditions = {}
|
|
216
|
+
if model:
|
|
217
|
+
where_conditions["model"] = model
|
|
218
|
+
|
|
219
|
+
paginated_result = await self.sql_store.fetch_all(
|
|
220
|
+
table="chat_completions",
|
|
221
|
+
where=where_conditions if where_conditions else None,
|
|
222
|
+
order_by=[("created", order.value)],
|
|
223
|
+
cursor=("id", after) if after else None,
|
|
224
|
+
limit=limit,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
data = [
|
|
228
|
+
OpenAICompletionWithInputMessages(
|
|
229
|
+
id=row["id"],
|
|
230
|
+
created=row["created"],
|
|
231
|
+
model=row["model"],
|
|
232
|
+
choices=row["choices"],
|
|
233
|
+
input_messages=row["input_messages"],
|
|
234
|
+
)
|
|
235
|
+
for row in paginated_result.data
|
|
236
|
+
]
|
|
237
|
+
return ListOpenAIChatCompletionResponse(
|
|
238
|
+
data=data,
|
|
239
|
+
has_more=paginated_result.has_more,
|
|
240
|
+
first_id=data[0].id if data else "",
|
|
241
|
+
last_id=data[-1].id if data else "",
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
async def get_chat_completion(self, completion_id: str) -> OpenAICompletionWithInputMessages:
|
|
245
|
+
if not self.sql_store:
|
|
246
|
+
raise ValueError("Inference store is not initialized")
|
|
247
|
+
|
|
248
|
+
row = await self.sql_store.fetch_one(
|
|
249
|
+
table="chat_completions",
|
|
250
|
+
where={"id": completion_id},
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
if not row:
|
|
254
|
+
# SecureSqlStore will return None if record doesn't exist OR access is denied
|
|
255
|
+
# This provides security by not revealing whether the record exists
|
|
256
|
+
raise ValueError(f"Chat completion with id {completion_id} not found") from None
|
|
257
|
+
|
|
258
|
+
return OpenAICompletionWithInputMessages(
|
|
259
|
+
id=row["id"],
|
|
260
|
+
created=row["created"],
|
|
261
|
+
model=row["model"],
|
|
262
|
+
choices=row["choices"],
|
|
263
|
+
input_messages=row["input_messages"],
|
|
264
|
+
)
|