llama-stack 0.0.42__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack/__init__.py +5 -0
- llama_stack/apis/agents/__init__.py +1 -1
- llama_stack/apis/agents/agents.py +700 -281
- llama_stack/apis/agents/openai_responses.py +1311 -0
- llama_stack/{providers/adapters/memory/sample/config.py → apis/batches/__init__.py} +2 -5
- llama_stack/apis/batches/batches.py +100 -0
- llama_stack/apis/benchmarks/__init__.py +7 -0
- llama_stack/apis/benchmarks/benchmarks.py +108 -0
- llama_stack/apis/common/content_types.py +143 -0
- llama_stack/apis/common/errors.py +103 -0
- llama_stack/apis/common/job_types.py +38 -0
- llama_stack/apis/common/responses.py +36 -0
- llama_stack/apis/common/training_types.py +36 -5
- llama_stack/apis/common/type_system.py +158 -0
- llama_stack/apis/conversations/__init__.py +31 -0
- llama_stack/apis/conversations/conversations.py +286 -0
- llama_stack/apis/datasetio/__init__.py +7 -0
- llama_stack/apis/datasetio/datasetio.py +59 -0
- llama_stack/apis/datasets/__init__.py +7 -0
- llama_stack/apis/datasets/datasets.py +251 -0
- llama_stack/apis/datatypes.py +160 -0
- llama_stack/apis/eval/__init__.py +7 -0
- llama_stack/apis/eval/eval.py +169 -0
- llama_stack/apis/files/__init__.py +7 -0
- llama_stack/apis/files/files.py +199 -0
- llama_stack/apis/inference/__init__.py +1 -1
- llama_stack/apis/inference/inference.py +1169 -113
- llama_stack/apis/inspect/__init__.py +1 -1
- llama_stack/apis/inspect/inspect.py +69 -16
- llama_stack/apis/models/__init__.py +1 -1
- llama_stack/apis/models/models.py +148 -21
- llama_stack/apis/post_training/__init__.py +1 -1
- llama_stack/apis/post_training/post_training.py +265 -120
- llama_stack/{providers/adapters/agents/sample/config.py → apis/prompts/__init__.py} +2 -5
- llama_stack/apis/prompts/prompts.py +204 -0
- llama_stack/apis/providers/__init__.py +7 -0
- llama_stack/apis/providers/providers.py +69 -0
- llama_stack/apis/resource.py +37 -0
- llama_stack/apis/safety/__init__.py +1 -1
- llama_stack/apis/safety/safety.py +95 -12
- llama_stack/apis/scoring/__init__.py +7 -0
- llama_stack/apis/scoring/scoring.py +93 -0
- llama_stack/apis/scoring_functions/__init__.py +7 -0
- llama_stack/apis/scoring_functions/scoring_functions.py +208 -0
- llama_stack/apis/shields/__init__.py +1 -1
- llama_stack/apis/shields/shields.py +76 -33
- llama_stack/apis/synthetic_data_generation/__init__.py +1 -1
- llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +40 -17
- llama_stack/apis/telemetry/__init__.py +1 -1
- llama_stack/apis/telemetry/telemetry.py +322 -31
- llama_stack/apis/{dataset → tools}/__init__.py +2 -1
- llama_stack/apis/tools/rag_tool.py +218 -0
- llama_stack/apis/tools/tools.py +221 -0
- llama_stack/apis/vector_io/__init__.py +7 -0
- llama_stack/apis/vector_io/vector_io.py +960 -0
- llama_stack/apis/vector_stores/__init__.py +7 -0
- llama_stack/apis/vector_stores/vector_stores.py +51 -0
- llama_stack/apis/version.py +9 -0
- llama_stack/cli/llama.py +13 -5
- llama_stack/cli/stack/_list_deps.py +182 -0
- llama_stack/cli/stack/list_apis.py +1 -1
- llama_stack/cli/stack/list_deps.py +55 -0
- llama_stack/cli/stack/list_providers.py +24 -10
- llama_stack/cli/stack/list_stacks.py +56 -0
- llama_stack/cli/stack/remove.py +115 -0
- llama_stack/cli/stack/run.py +169 -56
- llama_stack/cli/stack/stack.py +18 -4
- llama_stack/cli/stack/utils.py +151 -0
- llama_stack/cli/table.py +23 -61
- llama_stack/cli/utils.py +29 -0
- llama_stack/core/access_control/access_control.py +131 -0
- llama_stack/core/access_control/conditions.py +129 -0
- llama_stack/core/access_control/datatypes.py +107 -0
- llama_stack/core/build.py +164 -0
- llama_stack/core/client.py +205 -0
- llama_stack/core/common.sh +37 -0
- llama_stack/{distribution → core}/configure.py +74 -55
- llama_stack/core/conversations/conversations.py +309 -0
- llama_stack/core/datatypes.py +625 -0
- llama_stack/core/distribution.py +276 -0
- llama_stack/core/external.py +54 -0
- llama_stack/core/id_generation.py +42 -0
- llama_stack/core/inspect.py +86 -0
- llama_stack/core/library_client.py +539 -0
- llama_stack/core/prompts/prompts.py +234 -0
- llama_stack/core/providers.py +137 -0
- llama_stack/core/request_headers.py +115 -0
- llama_stack/core/resolver.py +506 -0
- llama_stack/core/routers/__init__.py +101 -0
- llama_stack/core/routers/datasets.py +73 -0
- llama_stack/core/routers/eval_scoring.py +155 -0
- llama_stack/core/routers/inference.py +645 -0
- llama_stack/core/routers/safety.py +85 -0
- llama_stack/core/routers/tool_runtime.py +91 -0
- llama_stack/core/routers/vector_io.py +442 -0
- llama_stack/core/routing_tables/benchmarks.py +62 -0
- llama_stack/core/routing_tables/common.py +254 -0
- llama_stack/core/routing_tables/datasets.py +91 -0
- llama_stack/core/routing_tables/models.py +163 -0
- llama_stack/core/routing_tables/scoring_functions.py +66 -0
- llama_stack/core/routing_tables/shields.py +61 -0
- llama_stack/core/routing_tables/toolgroups.py +129 -0
- llama_stack/core/routing_tables/vector_stores.py +292 -0
- llama_stack/core/server/auth.py +187 -0
- llama_stack/core/server/auth_providers.py +494 -0
- llama_stack/core/server/quota.py +110 -0
- llama_stack/core/server/routes.py +141 -0
- llama_stack/core/server/server.py +542 -0
- llama_stack/core/server/tracing.py +80 -0
- llama_stack/core/stack.py +546 -0
- llama_stack/core/start_stack.sh +117 -0
- llama_stack/core/storage/datatypes.py +283 -0
- llama_stack/{cli/model → core/store}/__init__.py +1 -1
- llama_stack/core/store/registry.py +199 -0
- llama_stack/core/testing_context.py +49 -0
- llama_stack/core/ui/app.py +55 -0
- llama_stack/core/ui/modules/api.py +32 -0
- llama_stack/core/ui/modules/utils.py +42 -0
- llama_stack/core/ui/page/distribution/datasets.py +18 -0
- llama_stack/core/ui/page/distribution/eval_tasks.py +20 -0
- llama_stack/core/ui/page/distribution/models.py +18 -0
- llama_stack/core/ui/page/distribution/providers.py +27 -0
- llama_stack/core/ui/page/distribution/resources.py +48 -0
- llama_stack/core/ui/page/distribution/scoring_functions.py +18 -0
- llama_stack/core/ui/page/distribution/shields.py +19 -0
- llama_stack/core/ui/page/evaluations/app_eval.py +143 -0
- llama_stack/core/ui/page/evaluations/native_eval.py +253 -0
- llama_stack/core/ui/page/playground/chat.py +130 -0
- llama_stack/core/ui/page/playground/tools.py +352 -0
- llama_stack/core/utils/config.py +30 -0
- llama_stack/{distribution → core}/utils/config_dirs.py +3 -6
- llama_stack/core/utils/config_resolution.py +125 -0
- llama_stack/core/utils/context.py +84 -0
- llama_stack/core/utils/exec.py +96 -0
- llama_stack/{providers/impls/meta_reference/codeshield/config.py → core/utils/image_types.py} +4 -3
- llama_stack/{distribution → core}/utils/model_utils.py +2 -2
- llama_stack/{distribution → core}/utils/prompt_for_config.py +30 -63
- llama_stack/{apis/batch_inference → distributions/dell}/__init__.py +1 -1
- llama_stack/distributions/dell/build.yaml +33 -0
- llama_stack/distributions/dell/dell.py +158 -0
- llama_stack/distributions/dell/run-with-safety.yaml +141 -0
- llama_stack/distributions/dell/run.yaml +132 -0
- llama_stack/distributions/meta-reference-gpu/__init__.py +7 -0
- llama_stack/distributions/meta-reference-gpu/build.yaml +32 -0
- llama_stack/distributions/meta-reference-gpu/meta_reference.py +163 -0
- llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +154 -0
- llama_stack/distributions/meta-reference-gpu/run.yaml +139 -0
- llama_stack/{apis/evals → distributions/nvidia}/__init__.py +1 -1
- llama_stack/distributions/nvidia/build.yaml +29 -0
- llama_stack/distributions/nvidia/nvidia.py +154 -0
- llama_stack/distributions/nvidia/run-with-safety.yaml +137 -0
- llama_stack/distributions/nvidia/run.yaml +116 -0
- llama_stack/distributions/open-benchmark/__init__.py +7 -0
- llama_stack/distributions/open-benchmark/build.yaml +36 -0
- llama_stack/distributions/open-benchmark/open_benchmark.py +303 -0
- llama_stack/distributions/open-benchmark/run.yaml +252 -0
- llama_stack/distributions/postgres-demo/__init__.py +7 -0
- llama_stack/distributions/postgres-demo/build.yaml +23 -0
- llama_stack/distributions/postgres-demo/postgres_demo.py +125 -0
- llama_stack/distributions/postgres-demo/run.yaml +115 -0
- llama_stack/{apis/memory → distributions/starter}/__init__.py +1 -1
- llama_stack/distributions/starter/build.yaml +61 -0
- llama_stack/distributions/starter/run-with-postgres-store.yaml +285 -0
- llama_stack/distributions/starter/run.yaml +276 -0
- llama_stack/distributions/starter/starter.py +345 -0
- llama_stack/distributions/starter-gpu/__init__.py +7 -0
- llama_stack/distributions/starter-gpu/build.yaml +61 -0
- llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +288 -0
- llama_stack/distributions/starter-gpu/run.yaml +279 -0
- llama_stack/distributions/starter-gpu/starter_gpu.py +20 -0
- llama_stack/distributions/template.py +456 -0
- llama_stack/distributions/watsonx/__init__.py +7 -0
- llama_stack/distributions/watsonx/build.yaml +33 -0
- llama_stack/distributions/watsonx/run.yaml +133 -0
- llama_stack/distributions/watsonx/watsonx.py +95 -0
- llama_stack/env.py +24 -0
- llama_stack/log.py +314 -0
- llama_stack/models/llama/checkpoint.py +164 -0
- llama_stack/models/llama/datatypes.py +164 -0
- llama_stack/models/llama/hadamard_utils.py +86 -0
- llama_stack/models/llama/llama3/args.py +74 -0
- llama_stack/models/llama/llama3/chat_format.py +286 -0
- llama_stack/models/llama/llama3/generation.py +376 -0
- llama_stack/models/llama/llama3/interface.py +255 -0
- llama_stack/models/llama/llama3/model.py +304 -0
- llama_stack/models/llama/llama3/multimodal/__init__.py +12 -0
- llama_stack/models/llama/llama3/multimodal/encoder_utils.py +180 -0
- llama_stack/models/llama/llama3/multimodal/image_transform.py +409 -0
- llama_stack/models/llama/llama3/multimodal/model.py +1430 -0
- llama_stack/models/llama/llama3/multimodal/utils.py +26 -0
- llama_stack/models/llama/llama3/prompt_templates/__init__.py +22 -0
- llama_stack/models/llama/llama3/prompt_templates/base.py +39 -0
- llama_stack/models/llama/llama3/prompt_templates/system_prompts.py +319 -0
- llama_stack/models/llama/llama3/prompt_templates/tool_response.py +62 -0
- llama_stack/models/llama/llama3/quantization/loader.py +316 -0
- llama_stack/models/llama/llama3/template_data.py +116 -0
- llama_stack/models/llama/llama3/tokenizer.model +128000 -0
- llama_stack/models/llama/llama3/tokenizer.py +198 -0
- llama_stack/models/llama/llama3/tool_utils.py +266 -0
- llama_stack/models/llama/llama3_1/__init__.py +12 -0
- llama_stack/models/llama/llama3_1/prompt_format.md +358 -0
- llama_stack/models/llama/llama3_1/prompts.py +258 -0
- llama_stack/models/llama/llama3_2/prompts_text.py +229 -0
- llama_stack/models/llama/llama3_2/prompts_vision.py +126 -0
- llama_stack/models/llama/llama3_2/text_prompt_format.md +286 -0
- llama_stack/models/llama/llama3_2/vision_prompt_format.md +141 -0
- llama_stack/models/llama/llama3_3/prompts.py +259 -0
- llama_stack/models/llama/llama4/args.py +107 -0
- llama_stack/models/llama/llama4/chat_format.py +317 -0
- llama_stack/models/llama/llama4/datatypes.py +56 -0
- llama_stack/models/llama/llama4/ffn.py +58 -0
- llama_stack/models/llama/llama4/generation.py +313 -0
- llama_stack/models/llama/llama4/model.py +437 -0
- llama_stack/models/llama/llama4/moe.py +214 -0
- llama_stack/models/llama/llama4/preprocess.py +435 -0
- llama_stack/models/llama/llama4/prompt_format.md +304 -0
- llama_stack/models/llama/llama4/prompt_templates/system_prompts.py +136 -0
- llama_stack/models/llama/llama4/prompts.py +279 -0
- llama_stack/models/llama/llama4/quantization/__init__.py +5 -0
- llama_stack/models/llama/llama4/quantization/loader.py +226 -0
- llama_stack/models/llama/llama4/tokenizer.model +200000 -0
- llama_stack/models/llama/llama4/tokenizer.py +263 -0
- llama_stack/models/llama/llama4/vision/__init__.py +5 -0
- llama_stack/models/llama/llama4/vision/embedding.py +210 -0
- llama_stack/models/llama/llama4/vision/encoder.py +412 -0
- llama_stack/models/llama/prompt_format.py +191 -0
- llama_stack/models/llama/quantize_impls.py +316 -0
- llama_stack/models/llama/sku_list.py +1029 -0
- llama_stack/models/llama/sku_types.py +233 -0
- llama_stack/models/llama/tokenizer_utils.py +40 -0
- llama_stack/providers/datatypes.py +136 -107
- llama_stack/providers/inline/__init__.py +5 -0
- llama_stack/providers/inline/agents/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/agents/meta_reference}/__init__.py +12 -5
- llama_stack/providers/inline/agents/meta_reference/agent_instance.py +1024 -0
- llama_stack/providers/inline/agents/meta_reference/agents.py +383 -0
- llama_stack/providers/inline/agents/meta_reference/config.py +37 -0
- llama_stack/providers/inline/agents/meta_reference/persistence.py +228 -0
- llama_stack/providers/inline/agents/meta_reference/responses/__init__.py +5 -0
- llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +423 -0
- llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +1226 -0
- llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +449 -0
- llama_stack/providers/inline/agents/meta_reference/responses/types.py +194 -0
- llama_stack/providers/inline/agents/meta_reference/responses/utils.py +365 -0
- llama_stack/providers/inline/agents/meta_reference/safety.py +52 -0
- llama_stack/providers/inline/batches/__init__.py +5 -0
- llama_stack/providers/inline/batches/reference/__init__.py +36 -0
- llama_stack/providers/inline/batches/reference/batches.py +679 -0
- llama_stack/providers/inline/batches/reference/config.py +40 -0
- llama_stack/providers/inline/datasetio/__init__.py +5 -0
- llama_stack/providers/inline/datasetio/localfs/__init__.py +20 -0
- llama_stack/providers/inline/datasetio/localfs/config.py +23 -0
- llama_stack/providers/inline/datasetio/localfs/datasetio.py +113 -0
- llama_stack/providers/inline/eval/__init__.py +5 -0
- llama_stack/providers/inline/eval/meta_reference/__init__.py +28 -0
- llama_stack/providers/inline/eval/meta_reference/config.py +23 -0
- llama_stack/providers/inline/eval/meta_reference/eval.py +259 -0
- llama_stack/providers/inline/files/localfs/__init__.py +20 -0
- llama_stack/providers/inline/files/localfs/config.py +31 -0
- llama_stack/providers/inline/files/localfs/files.py +219 -0
- llama_stack/providers/inline/inference/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/__init__.py +4 -4
- llama_stack/providers/inline/inference/meta_reference/common.py +24 -0
- llama_stack/providers/inline/inference/meta_reference/config.py +68 -0
- llama_stack/providers/inline/inference/meta_reference/generators.py +211 -0
- llama_stack/providers/inline/inference/meta_reference/inference.py +158 -0
- llama_stack/providers/inline/inference/meta_reference/model_parallel.py +96 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/parallel_utils.py +56 -73
- llama_stack/providers/inline/inference/sentence_transformers/__init__.py +22 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/inference/sentence_transformers}/config.py +6 -4
- llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +83 -0
- llama_stack/providers/inline/post_training/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/utils.py +35 -0
- llama_stack/providers/inline/post_training/common/validator.py +36 -0
- llama_stack/providers/inline/post_training/huggingface/__init__.py +27 -0
- llama_stack/providers/inline/post_training/huggingface/config.py +83 -0
- llama_stack/providers/inline/post_training/huggingface/post_training.py +208 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +519 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +485 -0
- llama_stack/providers/inline/post_training/huggingface/utils.py +269 -0
- llama_stack/providers/inline/post_training/torchtune/__init__.py +27 -0
- llama_stack/providers/inline/post_training/torchtune/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +240 -0
- llama_stack/providers/inline/post_training/torchtune/common/utils.py +99 -0
- llama_stack/providers/inline/post_training/torchtune/config.py +20 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +57 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/sft.py +78 -0
- llama_stack/providers/inline/post_training/torchtune/post_training.py +178 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +588 -0
- llama_stack/providers/inline/safety/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/codeshield → inline/safety/code_scanner}/__init__.py +4 -2
- llama_stack/providers/inline/safety/code_scanner/code_scanner.py +128 -0
- llama_stack/providers/{impls/meta_reference/memory → inline/safety/code_scanner}/config.py +5 -3
- llama_stack/providers/inline/safety/llama_guard/__init__.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/config.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/llama_guard.py +489 -0
- llama_stack/providers/{adapters/memory/sample → inline/safety/prompt_guard}/__init__.py +4 -4
- llama_stack/providers/inline/safety/prompt_guard/config.py +32 -0
- llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +131 -0
- llama_stack/providers/inline/scoring/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/__init__.py +25 -0
- llama_stack/providers/{adapters/memory/weaviate → inline/scoring/basic}/config.py +5 -7
- llama_stack/providers/inline/scoring/basic/scoring.py +126 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +240 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +41 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/docvqa.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/equality.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/ifeval.py +23 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_math_response.py +27 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py +71 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/subset_of.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +80 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +66 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +58 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +38 -0
- llama_stack/providers/inline/scoring/basic/utils/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py +3319 -0
- llama_stack/providers/inline/scoring/basic/utils/math_utils.py +330 -0
- llama_stack/providers/inline/scoring/braintrust/__init__.py +27 -0
- llama_stack/providers/inline/scoring/braintrust/braintrust.py +230 -0
- llama_stack/providers/inline/scoring/braintrust/config.py +21 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_correctness.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_relevancy.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_similarity.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_entity_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_precision.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_relevancy.py +23 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/factuality.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/faithfulness.py +24 -0
- llama_stack/providers/inline/scoring/llm_as_judge/__init__.py +21 -0
- llama_stack/providers/inline/scoring/llm_as_judge/config.py +14 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +113 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +96 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +20 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +81 -0
- llama_stack/providers/inline/telemetry/__init__.py +5 -0
- llama_stack/providers/inline/telemetry/meta_reference/__init__.py +21 -0
- llama_stack/providers/inline/telemetry/meta_reference/config.py +47 -0
- llama_stack/providers/inline/telemetry/meta_reference/telemetry.py +252 -0
- llama_stack/providers/inline/tool_runtime/__init__.py +5 -0
- llama_stack/providers/inline/tool_runtime/rag/__init__.py +19 -0
- llama_stack/providers/{impls/meta_reference/telemetry → inline/tool_runtime/rag}/config.py +5 -3
- llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +77 -0
- llama_stack/providers/inline/tool_runtime/rag/memory.py +332 -0
- llama_stack/providers/inline/vector_io/__init__.py +5 -0
- llama_stack/providers/inline/vector_io/chroma/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/chroma/config.py +30 -0
- llama_stack/providers/inline/vector_io/faiss/__init__.py +21 -0
- llama_stack/providers/inline/vector_io/faiss/config.py +26 -0
- llama_stack/providers/inline/vector_io/faiss/faiss.py +293 -0
- llama_stack/providers/inline/vector_io/milvus/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/milvus/config.py +29 -0
- llama_stack/providers/inline/vector_io/qdrant/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/qdrant/config.py +29 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/config.py +26 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +483 -0
- llama_stack/providers/registry/agents.py +16 -18
- llama_stack/providers/registry/batches.py +26 -0
- llama_stack/providers/registry/datasetio.py +49 -0
- llama_stack/providers/registry/eval.py +46 -0
- llama_stack/providers/registry/files.py +31 -0
- llama_stack/providers/registry/inference.py +273 -118
- llama_stack/providers/registry/post_training.py +69 -0
- llama_stack/providers/registry/safety.py +46 -41
- llama_stack/providers/registry/scoring.py +51 -0
- llama_stack/providers/registry/tool_runtime.py +87 -0
- llama_stack/providers/registry/vector_io.py +828 -0
- llama_stack/providers/remote/__init__.py +5 -0
- llama_stack/providers/remote/agents/__init__.py +5 -0
- llama_stack/providers/remote/datasetio/__init__.py +5 -0
- llama_stack/providers/{adapters/memory/chroma → remote/datasetio/huggingface}/__init__.py +7 -4
- llama_stack/providers/remote/datasetio/huggingface/config.py +23 -0
- llama_stack/providers/remote/datasetio/huggingface/huggingface.py +99 -0
- llama_stack/providers/remote/datasetio/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/datasetio/nvidia/config.py +61 -0
- llama_stack/providers/remote/datasetio/nvidia/datasetio.py +116 -0
- llama_stack/providers/remote/eval/__init__.py +5 -0
- llama_stack/providers/remote/eval/nvidia/__init__.py +31 -0
- llama_stack/providers/remote/eval/nvidia/config.py +29 -0
- llama_stack/providers/remote/eval/nvidia/eval.py +162 -0
- llama_stack/providers/remote/files/s3/__init__.py +19 -0
- llama_stack/providers/remote/files/s3/config.py +42 -0
- llama_stack/providers/remote/files/s3/files.py +313 -0
- llama_stack/providers/remote/inference/__init__.py +5 -0
- llama_stack/providers/{adapters/safety/sample → remote/inference/anthropic}/__init__.py +4 -6
- llama_stack/providers/remote/inference/anthropic/anthropic.py +36 -0
- llama_stack/providers/remote/inference/anthropic/config.py +28 -0
- llama_stack/providers/{impls/meta_reference/telemetry → remote/inference/azure}/__init__.py +4 -4
- llama_stack/providers/remote/inference/azure/azure.py +25 -0
- llama_stack/providers/remote/inference/azure/config.py +61 -0
- llama_stack/providers/{adapters → remote}/inference/bedrock/__init__.py +18 -17
- llama_stack/providers/remote/inference/bedrock/bedrock.py +142 -0
- llama_stack/providers/{adapters/inference/sample → remote/inference/bedrock}/config.py +3 -4
- llama_stack/providers/remote/inference/bedrock/models.py +29 -0
- llama_stack/providers/remote/inference/cerebras/__init__.py +19 -0
- llama_stack/providers/remote/inference/cerebras/cerebras.py +28 -0
- llama_stack/providers/remote/inference/cerebras/config.py +30 -0
- llama_stack/providers/{adapters → remote}/inference/databricks/__init__.py +4 -5
- llama_stack/providers/remote/inference/databricks/config.py +37 -0
- llama_stack/providers/remote/inference/databricks/databricks.py +44 -0
- llama_stack/providers/{adapters → remote}/inference/fireworks/__init__.py +8 -4
- llama_stack/providers/remote/inference/fireworks/config.py +27 -0
- llama_stack/providers/remote/inference/fireworks/fireworks.py +27 -0
- llama_stack/providers/{adapters/memory/pgvector → remote/inference/gemini}/__init__.py +4 -4
- llama_stack/providers/remote/inference/gemini/config.py +28 -0
- llama_stack/providers/remote/inference/gemini/gemini.py +82 -0
- llama_stack/providers/remote/inference/groq/__init__.py +15 -0
- llama_stack/providers/remote/inference/groq/config.py +34 -0
- llama_stack/providers/remote/inference/groq/groq.py +18 -0
- llama_stack/providers/remote/inference/llama_openai_compat/__init__.py +15 -0
- llama_stack/providers/remote/inference/llama_openai_compat/config.py +34 -0
- llama_stack/providers/remote/inference/llama_openai_compat/llama.py +46 -0
- llama_stack/providers/remote/inference/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/inference/nvidia/config.py +64 -0
- llama_stack/providers/remote/inference/nvidia/nvidia.py +61 -0
- llama_stack/providers/{adapters/safety/sample/config.py → remote/inference/nvidia/utils.py} +3 -4
- llama_stack/providers/{impls/vllm → remote/inference/ollama}/__init__.py +4 -6
- llama_stack/providers/remote/inference/ollama/config.py +25 -0
- llama_stack/providers/remote/inference/ollama/ollama.py +102 -0
- llama_stack/providers/{adapters/telemetry/opentelemetry → remote/inference/openai}/__init__.py +4 -4
- llama_stack/providers/remote/inference/openai/config.py +39 -0
- llama_stack/providers/remote/inference/openai/openai.py +38 -0
- llama_stack/providers/remote/inference/passthrough/__init__.py +23 -0
- llama_stack/providers/remote/inference/passthrough/config.py +34 -0
- llama_stack/providers/remote/inference/passthrough/passthrough.py +122 -0
- llama_stack/providers/remote/inference/runpod/__init__.py +16 -0
- llama_stack/providers/remote/inference/runpod/config.py +32 -0
- llama_stack/providers/remote/inference/runpod/runpod.py +42 -0
- llama_stack/providers/remote/inference/sambanova/__init__.py +16 -0
- llama_stack/providers/remote/inference/sambanova/config.py +34 -0
- llama_stack/providers/remote/inference/sambanova/sambanova.py +28 -0
- llama_stack/providers/{adapters → remote}/inference/tgi/__init__.py +3 -4
- llama_stack/providers/remote/inference/tgi/config.py +76 -0
- llama_stack/providers/remote/inference/tgi/tgi.py +85 -0
- llama_stack/providers/{adapters → remote}/inference/together/__init__.py +8 -4
- llama_stack/providers/remote/inference/together/config.py +27 -0
- llama_stack/providers/remote/inference/together/together.py +102 -0
- llama_stack/providers/remote/inference/vertexai/__init__.py +15 -0
- llama_stack/providers/remote/inference/vertexai/config.py +48 -0
- llama_stack/providers/remote/inference/vertexai/vertexai.py +54 -0
- llama_stack/providers/remote/inference/vllm/__init__.py +22 -0
- llama_stack/providers/remote/inference/vllm/config.py +59 -0
- llama_stack/providers/remote/inference/vllm/vllm.py +111 -0
- llama_stack/providers/remote/inference/watsonx/__init__.py +15 -0
- llama_stack/providers/remote/inference/watsonx/config.py +45 -0
- llama_stack/providers/remote/inference/watsonx/watsonx.py +336 -0
- llama_stack/providers/remote/post_training/__init__.py +5 -0
- llama_stack/providers/remote/post_training/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/post_training/nvidia/config.py +113 -0
- llama_stack/providers/remote/post_training/nvidia/models.py +27 -0
- llama_stack/providers/remote/post_training/nvidia/post_training.py +430 -0
- llama_stack/providers/remote/post_training/nvidia/utils.py +63 -0
- llama_stack/providers/remote/safety/__init__.py +5 -0
- llama_stack/providers/remote/safety/bedrock/bedrock.py +111 -0
- llama_stack/providers/remote/safety/bedrock/config.py +14 -0
- llama_stack/providers/{adapters/inference/sample → remote/safety/nvidia}/__init__.py +5 -4
- llama_stack/providers/remote/safety/nvidia/config.py +40 -0
- llama_stack/providers/remote/safety/nvidia/nvidia.py +161 -0
- llama_stack/providers/{adapters/agents/sample → remote/safety/sambanova}/__init__.py +5 -4
- llama_stack/providers/remote/safety/sambanova/config.py +37 -0
- llama_stack/providers/remote/safety/sambanova/sambanova.py +98 -0
- llama_stack/providers/remote/tool_runtime/__init__.py +5 -0
- llama_stack/providers/remote/tool_runtime/bing_search/__init__.py +21 -0
- llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +112 -0
- llama_stack/providers/remote/tool_runtime/bing_search/config.py +22 -0
- llama_stack/providers/remote/tool_runtime/brave_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +148 -0
- llama_stack/providers/remote/tool_runtime/brave_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py +15 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py +20 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +73 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +84 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/__init__.py +22 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py +21 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +140 -0
- llama_stack/providers/remote/vector_io/__init__.py +5 -0
- llama_stack/providers/remote/vector_io/chroma/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/chroma/chroma.py +215 -0
- llama_stack/providers/remote/vector_io/chroma/config.py +28 -0
- llama_stack/providers/remote/vector_io/milvus/__init__.py +18 -0
- llama_stack/providers/remote/vector_io/milvus/config.py +35 -0
- llama_stack/providers/remote/vector_io/milvus/milvus.py +375 -0
- llama_stack/providers/remote/vector_io/pgvector/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/pgvector/config.py +47 -0
- llama_stack/providers/remote/vector_io/pgvector/pgvector.py +460 -0
- llama_stack/providers/remote/vector_io/qdrant/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/qdrant/config.py +37 -0
- llama_stack/providers/remote/vector_io/qdrant/qdrant.py +265 -0
- llama_stack/providers/remote/vector_io/weaviate/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/weaviate/config.py +32 -0
- llama_stack/providers/remote/vector_io/weaviate/weaviate.py +393 -0
- llama_stack/providers/utils/bedrock/__init__.py +5 -0
- llama_stack/providers/utils/bedrock/client.py +74 -0
- llama_stack/providers/utils/bedrock/config.py +64 -0
- llama_stack/providers/utils/bedrock/refreshable_boto_session.py +112 -0
- llama_stack/providers/utils/common/__init__.py +5 -0
- llama_stack/providers/utils/common/data_schema_validator.py +103 -0
- llama_stack/providers/utils/datasetio/__init__.py +5 -0
- llama_stack/providers/utils/datasetio/url_utils.py +47 -0
- llama_stack/providers/utils/files/__init__.py +5 -0
- llama_stack/providers/utils/files/form_data.py +69 -0
- llama_stack/providers/utils/inference/__init__.py +8 -7
- llama_stack/providers/utils/inference/embedding_mixin.py +101 -0
- llama_stack/providers/utils/inference/inference_store.py +264 -0
- llama_stack/providers/utils/inference/litellm_openai_mixin.py +336 -0
- llama_stack/providers/utils/inference/model_registry.py +173 -23
- llama_stack/providers/utils/inference/openai_compat.py +1261 -49
- llama_stack/providers/utils/inference/openai_mixin.py +506 -0
- llama_stack/providers/utils/inference/prompt_adapter.py +365 -67
- llama_stack/providers/utils/kvstore/api.py +6 -6
- llama_stack/providers/utils/kvstore/config.py +28 -48
- llama_stack/providers/utils/kvstore/kvstore.py +61 -15
- llama_stack/providers/utils/kvstore/mongodb/__init__.py +9 -0
- llama_stack/providers/utils/kvstore/mongodb/mongodb.py +82 -0
- llama_stack/providers/utils/kvstore/postgres/__init__.py +7 -0
- llama_stack/providers/utils/kvstore/postgres/postgres.py +114 -0
- llama_stack/providers/utils/kvstore/redis/redis.py +33 -9
- llama_stack/providers/utils/kvstore/sqlite/config.py +2 -1
- llama_stack/providers/utils/kvstore/sqlite/sqlite.py +123 -22
- llama_stack/providers/utils/memory/file_utils.py +1 -1
- llama_stack/providers/utils/memory/openai_vector_store_mixin.py +1304 -0
- llama_stack/providers/utils/memory/vector_store.py +220 -82
- llama_stack/providers/utils/pagination.py +43 -0
- llama_stack/providers/utils/responses/__init__.py +5 -0
- llama_stack/providers/utils/responses/responses_store.py +292 -0
- llama_stack/providers/utils/scheduler.py +270 -0
- llama_stack/providers/utils/scoring/__init__.py +5 -0
- llama_stack/providers/utils/scoring/aggregation_utils.py +75 -0
- llama_stack/providers/utils/scoring/base_scoring_fn.py +114 -0
- llama_stack/providers/utils/scoring/basic_scoring_utils.py +26 -0
- llama_stack/providers/utils/sqlstore/__init__.py +5 -0
- llama_stack/providers/utils/sqlstore/api.py +128 -0
- llama_stack/providers/utils/sqlstore/authorized_sqlstore.py +319 -0
- llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py +343 -0
- llama_stack/providers/utils/sqlstore/sqlstore.py +70 -0
- llama_stack/providers/utils/telemetry/trace_protocol.py +142 -0
- llama_stack/providers/utils/telemetry/tracing.py +192 -53
- llama_stack/providers/utils/tools/__init__.py +5 -0
- llama_stack/providers/utils/tools/mcp.py +148 -0
- llama_stack/providers/utils/tools/ttl_dict.py +70 -0
- llama_stack/providers/utils/vector_io/__init__.py +5 -0
- llama_stack/providers/utils/vector_io/vector_utils.py +156 -0
- llama_stack/schema_utils.py +118 -0
- llama_stack/strong_typing/__init__.py +19 -0
- llama_stack/strong_typing/auxiliary.py +228 -0
- llama_stack/strong_typing/classdef.py +440 -0
- llama_stack/strong_typing/core.py +46 -0
- llama_stack/strong_typing/deserializer.py +877 -0
- llama_stack/strong_typing/docstring.py +409 -0
- llama_stack/strong_typing/exception.py +23 -0
- llama_stack/strong_typing/inspection.py +1085 -0
- llama_stack/strong_typing/mapping.py +40 -0
- llama_stack/strong_typing/name.py +182 -0
- llama_stack/strong_typing/py.typed +0 -0
- llama_stack/strong_typing/schema.py +792 -0
- llama_stack/strong_typing/serialization.py +97 -0
- llama_stack/strong_typing/serializer.py +500 -0
- llama_stack/strong_typing/slots.py +27 -0
- llama_stack/strong_typing/topological.py +89 -0
- llama_stack/testing/__init__.py +5 -0
- llama_stack/testing/api_recorder.py +956 -0
- llama_stack/ui/node_modules/flatted/python/flatted.py +149 -0
- llama_stack-0.3.4.dist-info/METADATA +261 -0
- llama_stack-0.3.4.dist-info/RECORD +625 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/WHEEL +1 -1
- llama_stack/apis/agents/client.py +0 -292
- llama_stack/apis/agents/event_logger.py +0 -184
- llama_stack/apis/batch_inference/batch_inference.py +0 -72
- llama_stack/apis/common/deployment_types.py +0 -31
- llama_stack/apis/dataset/dataset.py +0 -63
- llama_stack/apis/evals/evals.py +0 -122
- llama_stack/apis/inference/client.py +0 -197
- llama_stack/apis/inspect/client.py +0 -82
- llama_stack/apis/memory/client.py +0 -155
- llama_stack/apis/memory/memory.py +0 -65
- llama_stack/apis/memory_banks/__init__.py +0 -7
- llama_stack/apis/memory_banks/client.py +0 -101
- llama_stack/apis/memory_banks/memory_banks.py +0 -78
- llama_stack/apis/models/client.py +0 -83
- llama_stack/apis/reward_scoring/__init__.py +0 -7
- llama_stack/apis/reward_scoring/reward_scoring.py +0 -55
- llama_stack/apis/safety/client.py +0 -105
- llama_stack/apis/shields/client.py +0 -79
- llama_stack/cli/download.py +0 -340
- llama_stack/cli/model/describe.py +0 -82
- llama_stack/cli/model/download.py +0 -24
- llama_stack/cli/model/list.py +0 -62
- llama_stack/cli/model/model.py +0 -34
- llama_stack/cli/model/prompt_format.py +0 -112
- llama_stack/cli/model/safety_models.py +0 -52
- llama_stack/cli/stack/build.py +0 -299
- llama_stack/cli/stack/configure.py +0 -178
- llama_stack/distribution/build.py +0 -123
- llama_stack/distribution/build_conda_env.sh +0 -136
- llama_stack/distribution/build_container.sh +0 -142
- llama_stack/distribution/common.sh +0 -40
- llama_stack/distribution/configure_container.sh +0 -47
- llama_stack/distribution/datatypes.py +0 -139
- llama_stack/distribution/distribution.py +0 -58
- llama_stack/distribution/inspect.py +0 -67
- llama_stack/distribution/request_headers.py +0 -57
- llama_stack/distribution/resolver.py +0 -323
- llama_stack/distribution/routers/__init__.py +0 -48
- llama_stack/distribution/routers/routers.py +0 -158
- llama_stack/distribution/routers/routing_tables.py +0 -173
- llama_stack/distribution/server/endpoints.py +0 -48
- llama_stack/distribution/server/server.py +0 -343
- llama_stack/distribution/start_conda_env.sh +0 -42
- llama_stack/distribution/start_container.sh +0 -64
- llama_stack/distribution/templates/local-bedrock-conda-example-build.yaml +0 -10
- llama_stack/distribution/templates/local-build.yaml +0 -10
- llama_stack/distribution/templates/local-databricks-build.yaml +0 -10
- llama_stack/distribution/templates/local-fireworks-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-endpoint-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-serverless-build.yaml +0 -10
- llama_stack/distribution/templates/local-ollama-build.yaml +0 -10
- llama_stack/distribution/templates/local-tgi-build.yaml +0 -10
- llama_stack/distribution/templates/local-together-build.yaml +0 -10
- llama_stack/distribution/templates/local-vllm-build.yaml +0 -10
- llama_stack/distribution/utils/exec.py +0 -105
- llama_stack/providers/adapters/agents/sample/sample.py +0 -18
- llama_stack/providers/adapters/inference/bedrock/bedrock.py +0 -451
- llama_stack/providers/adapters/inference/bedrock/config.py +0 -55
- llama_stack/providers/adapters/inference/databricks/config.py +0 -21
- llama_stack/providers/adapters/inference/databricks/databricks.py +0 -125
- llama_stack/providers/adapters/inference/fireworks/config.py +0 -20
- llama_stack/providers/adapters/inference/fireworks/fireworks.py +0 -130
- llama_stack/providers/adapters/inference/ollama/__init__.py +0 -19
- llama_stack/providers/adapters/inference/ollama/ollama.py +0 -175
- llama_stack/providers/adapters/inference/sample/sample.py +0 -23
- llama_stack/providers/adapters/inference/tgi/config.py +0 -43
- llama_stack/providers/adapters/inference/tgi/tgi.py +0 -200
- llama_stack/providers/adapters/inference/together/config.py +0 -22
- llama_stack/providers/adapters/inference/together/together.py +0 -143
- llama_stack/providers/adapters/memory/chroma/chroma.py +0 -157
- llama_stack/providers/adapters/memory/pgvector/config.py +0 -17
- llama_stack/providers/adapters/memory/pgvector/pgvector.py +0 -211
- llama_stack/providers/adapters/memory/sample/sample.py +0 -23
- llama_stack/providers/adapters/memory/weaviate/__init__.py +0 -15
- llama_stack/providers/adapters/memory/weaviate/weaviate.py +0 -190
- llama_stack/providers/adapters/safety/bedrock/bedrock.py +0 -113
- llama_stack/providers/adapters/safety/bedrock/config.py +0 -16
- llama_stack/providers/adapters/safety/sample/sample.py +0 -23
- llama_stack/providers/adapters/safety/together/__init__.py +0 -18
- llama_stack/providers/adapters/safety/together/config.py +0 -26
- llama_stack/providers/adapters/safety/together/together.py +0 -101
- llama_stack/providers/adapters/telemetry/opentelemetry/config.py +0 -12
- llama_stack/providers/adapters/telemetry/opentelemetry/opentelemetry.py +0 -201
- llama_stack/providers/adapters/telemetry/sample/__init__.py +0 -17
- llama_stack/providers/adapters/telemetry/sample/config.py +0 -12
- llama_stack/providers/adapters/telemetry/sample/sample.py +0 -18
- llama_stack/providers/impls/meta_reference/agents/agent_instance.py +0 -844
- llama_stack/providers/impls/meta_reference/agents/agents.py +0 -161
- llama_stack/providers/impls/meta_reference/agents/persistence.py +0 -84
- llama_stack/providers/impls/meta_reference/agents/rag/context_retriever.py +0 -74
- llama_stack/providers/impls/meta_reference/agents/safety.py +0 -57
- llama_stack/providers/impls/meta_reference/agents/tests/code_execution.py +0 -93
- llama_stack/providers/impls/meta_reference/agents/tests/test_chat_agent.py +0 -305
- llama_stack/providers/impls/meta_reference/agents/tools/base.py +0 -20
- llama_stack/providers/impls/meta_reference/agents/tools/builtin.py +0 -375
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_env_prefix.py +0 -133
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_execution.py +0 -256
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/matplotlib_custom_backend.py +0 -87
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/utils.py +0 -21
- llama_stack/providers/impls/meta_reference/agents/tools/safety.py +0 -43
- llama_stack/providers/impls/meta_reference/codeshield/code_scanner.py +0 -58
- llama_stack/providers/impls/meta_reference/inference/config.py +0 -45
- llama_stack/providers/impls/meta_reference/inference/generation.py +0 -376
- llama_stack/providers/impls/meta_reference/inference/inference.py +0 -280
- llama_stack/providers/impls/meta_reference/inference/model_parallel.py +0 -99
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_impls.py +0 -184
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_txest_disabled.py +0 -76
- llama_stack/providers/impls/meta_reference/inference/quantization/loader.py +0 -97
- llama_stack/providers/impls/meta_reference/inference/quantization/scripts/quantize_checkpoint.py +0 -161
- llama_stack/providers/impls/meta_reference/memory/__init__.py +0 -19
- llama_stack/providers/impls/meta_reference/memory/faiss.py +0 -113
- llama_stack/providers/impls/meta_reference/safety/__init__.py +0 -17
- llama_stack/providers/impls/meta_reference/safety/base.py +0 -57
- llama_stack/providers/impls/meta_reference/safety/config.py +0 -48
- llama_stack/providers/impls/meta_reference/safety/llama_guard.py +0 -268
- llama_stack/providers/impls/meta_reference/safety/prompt_guard.py +0 -145
- llama_stack/providers/impls/meta_reference/safety/safety.py +0 -112
- llama_stack/providers/impls/meta_reference/telemetry/console.py +0 -89
- llama_stack/providers/impls/vllm/config.py +0 -35
- llama_stack/providers/impls/vllm/vllm.py +0 -241
- llama_stack/providers/registry/memory.py +0 -78
- llama_stack/providers/registry/telemetry.py +0 -44
- llama_stack/providers/tests/agents/test_agents.py +0 -210
- llama_stack/providers/tests/inference/test_inference.py +0 -257
- llama_stack/providers/tests/inference/test_prompt_adapter.py +0 -126
- llama_stack/providers/tests/memory/test_memory.py +0 -136
- llama_stack/providers/tests/resolver.py +0 -100
- llama_stack/providers/tests/safety/test_safety.py +0 -77
- llama_stack-0.0.42.dist-info/METADATA +0 -137
- llama_stack-0.0.42.dist-info/RECORD +0 -256
- /llama_stack/{distribution → core}/__init__.py +0 -0
- /llama_stack/{distribution/server → core/access_control}/__init__.py +0 -0
- /llama_stack/{distribution/utils → core/conversations}/__init__.py +0 -0
- /llama_stack/{providers/adapters → core/prompts}/__init__.py +0 -0
- /llama_stack/{providers/adapters/agents → core/routing_tables}/__init__.py +0 -0
- /llama_stack/{providers/adapters/inference → core/server}/__init__.py +0 -0
- /llama_stack/{providers/adapters/memory → core/storage}/__init__.py +0 -0
- /llama_stack/{providers/adapters/safety → core/ui}/__init__.py +0 -0
- /llama_stack/{providers/adapters/telemetry → core/ui/modules}/__init__.py +0 -0
- /llama_stack/{providers/impls → core/ui/page}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference → core/ui/page/distribution}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/rag → core/ui/page/evaluations}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tests → core/ui/page/playground}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools → core/utils}/__init__.py +0 -0
- /llama_stack/{distribution → core}/utils/dynamic.py +0 -0
- /llama_stack/{distribution → core}/utils/serialize.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools/ipython_tool → distributions}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization → models}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization/scripts → models/llama}/__init__.py +0 -0
- /llama_stack/{providers/tests → models/llama/llama3}/__init__.py +0 -0
- /llama_stack/{providers/tests/agents → models/llama/llama3/quantization}/__init__.py +0 -0
- /llama_stack/{providers/tests/inference → models/llama/llama3_2}/__init__.py +0 -0
- /llama_stack/{providers/tests/memory → models/llama/llama3_3}/__init__.py +0 -0
- /llama_stack/{providers/tests/safety → models/llama/llama4}/__init__.py +0 -0
- /llama_stack/{scripts → models/llama/llama4/prompt_templates}/__init__.py +0 -0
- /llama_stack/providers/{adapters → remote}/safety/bedrock/__init__.py +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/entry_points.txt +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info/licenses}/LICENSE +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,645 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import time
|
|
9
|
+
from collections.abc import AsyncGenerator, AsyncIterator
|
|
10
|
+
from datetime import UTC, datetime
|
|
11
|
+
from typing import Annotated, Any
|
|
12
|
+
|
|
13
|
+
from fastapi import Body
|
|
14
|
+
from openai.types.chat import ChatCompletionToolChoiceOptionParam as OpenAIChatCompletionToolChoiceOptionParam
|
|
15
|
+
from openai.types.chat import ChatCompletionToolParam as OpenAIChatCompletionToolParam
|
|
16
|
+
from pydantic import TypeAdapter
|
|
17
|
+
|
|
18
|
+
from llama_stack.apis.common.content_types import (
|
|
19
|
+
InterleavedContent,
|
|
20
|
+
)
|
|
21
|
+
from llama_stack.apis.common.errors import ModelNotFoundError, ModelTypeError
|
|
22
|
+
from llama_stack.apis.inference import (
|
|
23
|
+
ChatCompletionResponse,
|
|
24
|
+
ChatCompletionResponseEventType,
|
|
25
|
+
ChatCompletionResponseStreamChunk,
|
|
26
|
+
CompletionMessage,
|
|
27
|
+
CompletionResponse,
|
|
28
|
+
CompletionResponseStreamChunk,
|
|
29
|
+
Inference,
|
|
30
|
+
ListOpenAIChatCompletionResponse,
|
|
31
|
+
Message,
|
|
32
|
+
OpenAIAssistantMessageParam,
|
|
33
|
+
OpenAIChatCompletion,
|
|
34
|
+
OpenAIChatCompletionChunk,
|
|
35
|
+
OpenAIChatCompletionRequestWithExtraBody,
|
|
36
|
+
OpenAIChatCompletionToolCall,
|
|
37
|
+
OpenAIChatCompletionToolCallFunction,
|
|
38
|
+
OpenAIChoice,
|
|
39
|
+
OpenAIChoiceLogprobs,
|
|
40
|
+
OpenAICompletion,
|
|
41
|
+
OpenAICompletionRequestWithExtraBody,
|
|
42
|
+
OpenAICompletionWithInputMessages,
|
|
43
|
+
OpenAIEmbeddingsRequestWithExtraBody,
|
|
44
|
+
OpenAIEmbeddingsResponse,
|
|
45
|
+
OpenAIMessageParam,
|
|
46
|
+
Order,
|
|
47
|
+
StopReason,
|
|
48
|
+
ToolPromptFormat,
|
|
49
|
+
)
|
|
50
|
+
from llama_stack.apis.models import Model, ModelType
|
|
51
|
+
from llama_stack.apis.telemetry import MetricEvent, MetricInResponse, Telemetry
|
|
52
|
+
from llama_stack.core.access_control.access_control import is_action_allowed
|
|
53
|
+
from llama_stack.core.datatypes import ModelWithOwner
|
|
54
|
+
from llama_stack.core.request_headers import get_authenticated_user
|
|
55
|
+
from llama_stack.log import get_logger
|
|
56
|
+
from llama_stack.models.llama.llama3.chat_format import ChatFormat
|
|
57
|
+
from llama_stack.models.llama.llama3.tokenizer import Tokenizer
|
|
58
|
+
from llama_stack.providers.datatypes import (
|
|
59
|
+
HealthResponse,
|
|
60
|
+
HealthStatus,
|
|
61
|
+
RoutingTable,
|
|
62
|
+
)
|
|
63
|
+
from llama_stack.providers.utils.inference.inference_store import InferenceStore
|
|
64
|
+
from llama_stack.providers.utils.telemetry.tracing import enqueue_event, get_current_span
|
|
65
|
+
|
|
66
|
+
logger = get_logger(name=__name__, category="core::routers")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class InferenceRouter(Inference):
|
|
70
|
+
"""Routes to an provider based on the model"""
|
|
71
|
+
|
|
72
|
+
def __init__(
|
|
73
|
+
self,
|
|
74
|
+
routing_table: RoutingTable,
|
|
75
|
+
telemetry: Telemetry | None = None,
|
|
76
|
+
store: InferenceStore | None = None,
|
|
77
|
+
) -> None:
|
|
78
|
+
logger.debug("Initializing InferenceRouter")
|
|
79
|
+
self.routing_table = routing_table
|
|
80
|
+
self.telemetry = telemetry
|
|
81
|
+
self.store = store
|
|
82
|
+
if self.telemetry:
|
|
83
|
+
self.tokenizer = Tokenizer.get_instance()
|
|
84
|
+
self.formatter = ChatFormat(self.tokenizer)
|
|
85
|
+
|
|
86
|
+
async def initialize(self) -> None:
|
|
87
|
+
logger.debug("InferenceRouter.initialize")
|
|
88
|
+
|
|
89
|
+
async def shutdown(self) -> None:
|
|
90
|
+
logger.debug("InferenceRouter.shutdown")
|
|
91
|
+
if self.store:
|
|
92
|
+
try:
|
|
93
|
+
await self.store.shutdown()
|
|
94
|
+
except Exception as e:
|
|
95
|
+
logger.warning(f"Error during InferenceStore shutdown: {e}")
|
|
96
|
+
|
|
97
|
+
async def register_model(
|
|
98
|
+
self,
|
|
99
|
+
model_id: str,
|
|
100
|
+
provider_model_id: str | None = None,
|
|
101
|
+
provider_id: str | None = None,
|
|
102
|
+
metadata: dict[str, Any] | None = None,
|
|
103
|
+
model_type: ModelType | None = None,
|
|
104
|
+
) -> None:
|
|
105
|
+
logger.debug(
|
|
106
|
+
f"InferenceRouter.register_model: {model_id=} {provider_model_id=} {provider_id=} {metadata=} {model_type=}",
|
|
107
|
+
)
|
|
108
|
+
await self.routing_table.register_model(model_id, provider_model_id, provider_id, metadata, model_type)
|
|
109
|
+
|
|
110
|
+
def _construct_metrics(
|
|
111
|
+
self,
|
|
112
|
+
prompt_tokens: int,
|
|
113
|
+
completion_tokens: int,
|
|
114
|
+
total_tokens: int,
|
|
115
|
+
fully_qualified_model_id: str,
|
|
116
|
+
provider_id: str,
|
|
117
|
+
) -> list[MetricEvent]:
|
|
118
|
+
"""Constructs a list of MetricEvent objects containing token usage metrics.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
prompt_tokens: Number of tokens in the prompt
|
|
122
|
+
completion_tokens: Number of tokens in the completion
|
|
123
|
+
total_tokens: Total number of tokens used
|
|
124
|
+
fully_qualified_model_id:
|
|
125
|
+
provider_id: The provider identifier
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
List of MetricEvent objects with token usage metrics
|
|
129
|
+
"""
|
|
130
|
+
span = get_current_span()
|
|
131
|
+
if span is None:
|
|
132
|
+
logger.warning("No span found for token usage metrics")
|
|
133
|
+
return []
|
|
134
|
+
|
|
135
|
+
metrics = [
|
|
136
|
+
("prompt_tokens", prompt_tokens),
|
|
137
|
+
("completion_tokens", completion_tokens),
|
|
138
|
+
("total_tokens", total_tokens),
|
|
139
|
+
]
|
|
140
|
+
metric_events = []
|
|
141
|
+
for metric_name, value in metrics:
|
|
142
|
+
metric_events.append(
|
|
143
|
+
MetricEvent(
|
|
144
|
+
trace_id=span.trace_id,
|
|
145
|
+
span_id=span.span_id,
|
|
146
|
+
metric=metric_name,
|
|
147
|
+
value=value,
|
|
148
|
+
timestamp=datetime.now(UTC),
|
|
149
|
+
unit="tokens",
|
|
150
|
+
attributes={
|
|
151
|
+
"model_id": fully_qualified_model_id,
|
|
152
|
+
"provider_id": provider_id,
|
|
153
|
+
},
|
|
154
|
+
)
|
|
155
|
+
)
|
|
156
|
+
return metric_events
|
|
157
|
+
|
|
158
|
+
async def _compute_and_log_token_usage(
|
|
159
|
+
self,
|
|
160
|
+
prompt_tokens: int,
|
|
161
|
+
completion_tokens: int,
|
|
162
|
+
total_tokens: int,
|
|
163
|
+
model: Model,
|
|
164
|
+
) -> list[MetricInResponse]:
|
|
165
|
+
metrics = self._construct_metrics(
|
|
166
|
+
prompt_tokens, completion_tokens, total_tokens, model.model_id, model.provider_id
|
|
167
|
+
)
|
|
168
|
+
if self.telemetry:
|
|
169
|
+
for metric in metrics:
|
|
170
|
+
enqueue_event(metric)
|
|
171
|
+
return [MetricInResponse(metric=metric.metric, value=metric.value) for metric in metrics]
|
|
172
|
+
|
|
173
|
+
async def _count_tokens(
|
|
174
|
+
self,
|
|
175
|
+
messages: list[Message] | InterleavedContent,
|
|
176
|
+
tool_prompt_format: ToolPromptFormat | None = None,
|
|
177
|
+
) -> int | None:
|
|
178
|
+
if not hasattr(self, "formatter") or self.formatter is None:
|
|
179
|
+
return None
|
|
180
|
+
|
|
181
|
+
if isinstance(messages, list):
|
|
182
|
+
encoded = self.formatter.encode_dialog_prompt(messages, tool_prompt_format)
|
|
183
|
+
else:
|
|
184
|
+
encoded = self.formatter.encode_content(messages)
|
|
185
|
+
return len(encoded.tokens) if encoded and encoded.tokens else 0
|
|
186
|
+
|
|
187
|
+
async def _get_model_provider(self, model_id: str, expected_model_type: str) -> tuple[Inference, str]:
|
|
188
|
+
model = await self.routing_table.get_object_by_identifier("model", model_id)
|
|
189
|
+
if model:
|
|
190
|
+
if model.model_type != expected_model_type:
|
|
191
|
+
raise ModelTypeError(model_id, model.model_type, expected_model_type)
|
|
192
|
+
|
|
193
|
+
provider = await self.routing_table.get_provider_impl(model.identifier)
|
|
194
|
+
return provider, model.provider_resource_id
|
|
195
|
+
|
|
196
|
+
# Handles cases where clients use the provider format directly
|
|
197
|
+
return await self._get_provider_by_fallback(model_id, expected_model_type)
|
|
198
|
+
|
|
199
|
+
async def _get_provider_by_fallback(self, model_id: str, expected_model_type: str) -> tuple[Inference, str]:
|
|
200
|
+
"""
|
|
201
|
+
Handle fallback case where model_id is in provider_id/provider_resource_id format.
|
|
202
|
+
"""
|
|
203
|
+
splits = model_id.split("/", maxsplit=1)
|
|
204
|
+
if len(splits) != 2:
|
|
205
|
+
raise ModelNotFoundError(model_id)
|
|
206
|
+
|
|
207
|
+
provider_id, provider_resource_id = splits
|
|
208
|
+
|
|
209
|
+
# Check if provider exists
|
|
210
|
+
if provider_id not in self.routing_table.impls_by_provider_id:
|
|
211
|
+
logger.warning(f"Provider {provider_id} not found for model {model_id}")
|
|
212
|
+
raise ModelNotFoundError(model_id)
|
|
213
|
+
|
|
214
|
+
# Create a temporary model object for RBAC check
|
|
215
|
+
temp_model = ModelWithOwner(
|
|
216
|
+
identifier=model_id,
|
|
217
|
+
provider_id=provider_id,
|
|
218
|
+
provider_resource_id=provider_resource_id,
|
|
219
|
+
model_type=expected_model_type,
|
|
220
|
+
metadata={}, # Empty metadata for temporary object
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
# Perform RBAC check
|
|
224
|
+
user = get_authenticated_user()
|
|
225
|
+
if not is_action_allowed(self.routing_table.policy, "read", temp_model, user):
|
|
226
|
+
logger.debug(
|
|
227
|
+
f"Access denied to model '{model_id}' via fallback path for user {user.principal if user else 'anonymous'}"
|
|
228
|
+
)
|
|
229
|
+
raise ModelNotFoundError(model_id)
|
|
230
|
+
|
|
231
|
+
return self.routing_table.impls_by_provider_id[provider_id], provider_resource_id
|
|
232
|
+
|
|
233
|
+
async def openai_completion(
|
|
234
|
+
self,
|
|
235
|
+
params: Annotated[OpenAICompletionRequestWithExtraBody, Body(...)],
|
|
236
|
+
) -> OpenAICompletion:
|
|
237
|
+
logger.debug(
|
|
238
|
+
f"InferenceRouter.openai_completion: model={params.model}, stream={params.stream}, prompt={params.prompt}",
|
|
239
|
+
)
|
|
240
|
+
request_model_id = params.model
|
|
241
|
+
provider, provider_resource_id = await self._get_model_provider(params.model, ModelType.llm)
|
|
242
|
+
params.model = provider_resource_id
|
|
243
|
+
|
|
244
|
+
if params.stream:
|
|
245
|
+
return await provider.openai_completion(params)
|
|
246
|
+
# TODO: Metrics do NOT work with openai_completion stream=True due to the fact
|
|
247
|
+
# that we do not return an AsyncIterator, our tests expect a stream of chunks we cannot intercept currently.
|
|
248
|
+
|
|
249
|
+
response = await provider.openai_completion(params)
|
|
250
|
+
response.model = request_model_id
|
|
251
|
+
if self.telemetry:
|
|
252
|
+
metrics = self._construct_metrics(
|
|
253
|
+
prompt_tokens=response.usage.prompt_tokens,
|
|
254
|
+
completion_tokens=response.usage.completion_tokens,
|
|
255
|
+
total_tokens=response.usage.total_tokens,
|
|
256
|
+
fully_qualified_model_id=request_model_id,
|
|
257
|
+
provider_id=provider.__provider_id__,
|
|
258
|
+
)
|
|
259
|
+
for metric in metrics:
|
|
260
|
+
enqueue_event(metric)
|
|
261
|
+
|
|
262
|
+
# these metrics will show up in the client response.
|
|
263
|
+
response.metrics = (
|
|
264
|
+
metrics if not hasattr(response, "metrics") or response.metrics is None else response.metrics + metrics
|
|
265
|
+
)
|
|
266
|
+
return response
|
|
267
|
+
|
|
268
|
+
async def openai_chat_completion(
|
|
269
|
+
self,
|
|
270
|
+
params: Annotated[OpenAIChatCompletionRequestWithExtraBody, Body(...)],
|
|
271
|
+
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
|
|
272
|
+
logger.debug(
|
|
273
|
+
f"InferenceRouter.openai_chat_completion: model={params.model}, stream={params.stream}, messages={params.messages}",
|
|
274
|
+
)
|
|
275
|
+
request_model_id = params.model
|
|
276
|
+
provider, provider_resource_id = await self._get_model_provider(params.model, ModelType.llm)
|
|
277
|
+
params.model = provider_resource_id
|
|
278
|
+
|
|
279
|
+
# Use the OpenAI client for a bit of extra input validation without
|
|
280
|
+
# exposing the OpenAI client itself as part of our API surface
|
|
281
|
+
if params.tool_choice:
|
|
282
|
+
TypeAdapter(OpenAIChatCompletionToolChoiceOptionParam).validate_python(params.tool_choice)
|
|
283
|
+
if params.tools is None:
|
|
284
|
+
raise ValueError("'tool_choice' is only allowed when 'tools' is also provided")
|
|
285
|
+
if params.tools:
|
|
286
|
+
for tool in params.tools:
|
|
287
|
+
TypeAdapter(OpenAIChatCompletionToolParam).validate_python(tool)
|
|
288
|
+
|
|
289
|
+
# Some providers make tool calls even when tool_choice is "none"
|
|
290
|
+
# so just clear them both out to avoid unexpected tool calls
|
|
291
|
+
if params.tool_choice == "none" and params.tools is not None:
|
|
292
|
+
params.tool_choice = None
|
|
293
|
+
params.tools = None
|
|
294
|
+
|
|
295
|
+
if params.stream:
|
|
296
|
+
response_stream = await provider.openai_chat_completion(params)
|
|
297
|
+
|
|
298
|
+
# For streaming, the provider returns AsyncIterator[OpenAIChatCompletionChunk]
|
|
299
|
+
# We need to add metrics to each chunk and store the final completion
|
|
300
|
+
return self.stream_tokens_and_compute_metrics_openai_chat(
|
|
301
|
+
response=response_stream,
|
|
302
|
+
fully_qualified_model_id=request_model_id,
|
|
303
|
+
provider_id=provider.__provider_id__,
|
|
304
|
+
messages=params.messages,
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
response = await self._nonstream_openai_chat_completion(provider, params)
|
|
308
|
+
response.model = request_model_id
|
|
309
|
+
|
|
310
|
+
# Store the response with the ID that will be returned to the client
|
|
311
|
+
if self.store:
|
|
312
|
+
asyncio.create_task(self.store.store_chat_completion(response, params.messages))
|
|
313
|
+
|
|
314
|
+
if self.telemetry:
|
|
315
|
+
metrics = self._construct_metrics(
|
|
316
|
+
prompt_tokens=response.usage.prompt_tokens,
|
|
317
|
+
completion_tokens=response.usage.completion_tokens,
|
|
318
|
+
total_tokens=response.usage.total_tokens,
|
|
319
|
+
fully_qualified_model_id=request_model_id,
|
|
320
|
+
provider_id=provider.__provider_id__,
|
|
321
|
+
)
|
|
322
|
+
for metric in metrics:
|
|
323
|
+
enqueue_event(metric)
|
|
324
|
+
# these metrics will show up in the client response.
|
|
325
|
+
response.metrics = (
|
|
326
|
+
metrics if not hasattr(response, "metrics") or response.metrics is None else response.metrics + metrics
|
|
327
|
+
)
|
|
328
|
+
return response
|
|
329
|
+
|
|
330
|
+
async def openai_embeddings(
|
|
331
|
+
self,
|
|
332
|
+
params: Annotated[OpenAIEmbeddingsRequestWithExtraBody, Body(...)],
|
|
333
|
+
) -> OpenAIEmbeddingsResponse:
|
|
334
|
+
logger.debug(
|
|
335
|
+
f"InferenceRouter.openai_embeddings: model={params.model}, input_type={type(params.input)}, encoding_format={params.encoding_format}, dimensions={params.dimensions}",
|
|
336
|
+
)
|
|
337
|
+
request_model_id = params.model
|
|
338
|
+
provider, provider_resource_id = await self._get_model_provider(params.model, ModelType.embedding)
|
|
339
|
+
params.model = provider_resource_id
|
|
340
|
+
|
|
341
|
+
response = await provider.openai_embeddings(params)
|
|
342
|
+
response.model = request_model_id
|
|
343
|
+
return response
|
|
344
|
+
|
|
345
|
+
async def list_chat_completions(
|
|
346
|
+
self,
|
|
347
|
+
after: str | None = None,
|
|
348
|
+
limit: int | None = 20,
|
|
349
|
+
model: str | None = None,
|
|
350
|
+
order: Order | None = Order.desc,
|
|
351
|
+
) -> ListOpenAIChatCompletionResponse:
|
|
352
|
+
if self.store:
|
|
353
|
+
return await self.store.list_chat_completions(after, limit, model, order)
|
|
354
|
+
raise NotImplementedError("List chat completions is not supported: inference store is not configured.")
|
|
355
|
+
|
|
356
|
+
async def get_chat_completion(self, completion_id: str) -> OpenAICompletionWithInputMessages:
|
|
357
|
+
if self.store:
|
|
358
|
+
return await self.store.get_chat_completion(completion_id)
|
|
359
|
+
raise NotImplementedError("Get chat completion is not supported: inference store is not configured.")
|
|
360
|
+
|
|
361
|
+
async def _nonstream_openai_chat_completion(
|
|
362
|
+
self, provider: Inference, params: OpenAIChatCompletionRequestWithExtraBody
|
|
363
|
+
) -> OpenAIChatCompletion:
|
|
364
|
+
response = await provider.openai_chat_completion(params)
|
|
365
|
+
for choice in response.choices:
|
|
366
|
+
# some providers return an empty list for no tool calls in non-streaming responses
|
|
367
|
+
# but the OpenAI API returns None. So, set tool_calls to None if it's empty
|
|
368
|
+
if choice.message and choice.message.tool_calls is not None and len(choice.message.tool_calls) == 0:
|
|
369
|
+
choice.message.tool_calls = None
|
|
370
|
+
return response
|
|
371
|
+
|
|
372
|
+
async def health(self) -> dict[str, HealthResponse]:
|
|
373
|
+
health_statuses = {}
|
|
374
|
+
timeout = 1 # increasing the timeout to 1 second for health checks
|
|
375
|
+
for provider_id, impl in self.routing_table.impls_by_provider_id.items():
|
|
376
|
+
try:
|
|
377
|
+
# check if the provider has a health method
|
|
378
|
+
if not hasattr(impl, "health"):
|
|
379
|
+
continue
|
|
380
|
+
health = await asyncio.wait_for(impl.health(), timeout=timeout)
|
|
381
|
+
health_statuses[provider_id] = health
|
|
382
|
+
except TimeoutError:
|
|
383
|
+
health_statuses[provider_id] = HealthResponse(
|
|
384
|
+
status=HealthStatus.ERROR,
|
|
385
|
+
message=f"Health check timed out after {timeout} seconds",
|
|
386
|
+
)
|
|
387
|
+
except NotImplementedError:
|
|
388
|
+
health_statuses[provider_id] = HealthResponse(status=HealthStatus.NOT_IMPLEMENTED)
|
|
389
|
+
except Exception as e:
|
|
390
|
+
health_statuses[provider_id] = HealthResponse(
|
|
391
|
+
status=HealthStatus.ERROR, message=f"Health check failed: {str(e)}"
|
|
392
|
+
)
|
|
393
|
+
return health_statuses
|
|
394
|
+
|
|
395
|
+
async def stream_tokens_and_compute_metrics(
|
|
396
|
+
self,
|
|
397
|
+
response,
|
|
398
|
+
prompt_tokens,
|
|
399
|
+
fully_qualified_model_id: str,
|
|
400
|
+
provider_id: str,
|
|
401
|
+
tool_prompt_format: ToolPromptFormat | None = None,
|
|
402
|
+
) -> AsyncGenerator[ChatCompletionResponseStreamChunk, None] | AsyncGenerator[CompletionResponseStreamChunk, None]:
|
|
403
|
+
completion_text = ""
|
|
404
|
+
async for chunk in response:
|
|
405
|
+
complete = False
|
|
406
|
+
if hasattr(chunk, "event"): # only ChatCompletions have .event
|
|
407
|
+
if chunk.event.event_type == ChatCompletionResponseEventType.progress:
|
|
408
|
+
if chunk.event.delta.type == "text":
|
|
409
|
+
completion_text += chunk.event.delta.text
|
|
410
|
+
if chunk.event.event_type == ChatCompletionResponseEventType.complete:
|
|
411
|
+
complete = True
|
|
412
|
+
completion_tokens = await self._count_tokens(
|
|
413
|
+
[
|
|
414
|
+
CompletionMessage(
|
|
415
|
+
content=completion_text,
|
|
416
|
+
stop_reason=StopReason.end_of_turn,
|
|
417
|
+
)
|
|
418
|
+
],
|
|
419
|
+
tool_prompt_format=tool_prompt_format,
|
|
420
|
+
)
|
|
421
|
+
else:
|
|
422
|
+
if hasattr(chunk, "delta"):
|
|
423
|
+
completion_text += chunk.delta
|
|
424
|
+
if hasattr(chunk, "stop_reason") and chunk.stop_reason and self.telemetry:
|
|
425
|
+
complete = True
|
|
426
|
+
completion_tokens = await self._count_tokens(completion_text)
|
|
427
|
+
# if we are done receiving tokens
|
|
428
|
+
if complete:
|
|
429
|
+
total_tokens = (prompt_tokens or 0) + (completion_tokens or 0)
|
|
430
|
+
|
|
431
|
+
# Create a separate span for streaming completion metrics
|
|
432
|
+
if self.telemetry:
|
|
433
|
+
# Log metrics in the new span context
|
|
434
|
+
completion_metrics = self._construct_metrics(
|
|
435
|
+
prompt_tokens=prompt_tokens,
|
|
436
|
+
completion_tokens=completion_tokens,
|
|
437
|
+
total_tokens=total_tokens,
|
|
438
|
+
fully_qualified_model_id=fully_qualified_model_id,
|
|
439
|
+
provider_id=provider_id,
|
|
440
|
+
)
|
|
441
|
+
for metric in completion_metrics:
|
|
442
|
+
if metric.metric in [
|
|
443
|
+
"completion_tokens",
|
|
444
|
+
"total_tokens",
|
|
445
|
+
]: # Only log completion and total tokens
|
|
446
|
+
enqueue_event(metric)
|
|
447
|
+
|
|
448
|
+
# Return metrics in response
|
|
449
|
+
async_metrics = [
|
|
450
|
+
MetricInResponse(metric=metric.metric, value=metric.value) for metric in completion_metrics
|
|
451
|
+
]
|
|
452
|
+
chunk.metrics = async_metrics if chunk.metrics is None else chunk.metrics + async_metrics
|
|
453
|
+
else:
|
|
454
|
+
# Fallback if no telemetry
|
|
455
|
+
completion_metrics = self._construct_metrics(
|
|
456
|
+
prompt_tokens or 0,
|
|
457
|
+
completion_tokens or 0,
|
|
458
|
+
total_tokens,
|
|
459
|
+
fully_qualified_model_id=fully_qualified_model_id,
|
|
460
|
+
provider_id=provider_id,
|
|
461
|
+
)
|
|
462
|
+
async_metrics = [
|
|
463
|
+
MetricInResponse(metric=metric.metric, value=metric.value) for metric in completion_metrics
|
|
464
|
+
]
|
|
465
|
+
chunk.metrics = async_metrics if chunk.metrics is None else chunk.metrics + async_metrics
|
|
466
|
+
yield chunk
|
|
467
|
+
|
|
468
|
+
async def count_tokens_and_compute_metrics(
|
|
469
|
+
self,
|
|
470
|
+
response: ChatCompletionResponse | CompletionResponse,
|
|
471
|
+
prompt_tokens,
|
|
472
|
+
fully_qualified_model_id: str,
|
|
473
|
+
provider_id: str,
|
|
474
|
+
tool_prompt_format: ToolPromptFormat | None = None,
|
|
475
|
+
):
|
|
476
|
+
if isinstance(response, ChatCompletionResponse):
|
|
477
|
+
content = [response.completion_message]
|
|
478
|
+
else:
|
|
479
|
+
content = response.content
|
|
480
|
+
completion_tokens = await self._count_tokens(messages=content, tool_prompt_format=tool_prompt_format)
|
|
481
|
+
total_tokens = (prompt_tokens or 0) + (completion_tokens or 0)
|
|
482
|
+
|
|
483
|
+
# Create a separate span for completion metrics
|
|
484
|
+
if self.telemetry:
|
|
485
|
+
# Log metrics in the new span context
|
|
486
|
+
completion_metrics = self._construct_metrics(
|
|
487
|
+
prompt_tokens=prompt_tokens,
|
|
488
|
+
completion_tokens=completion_tokens,
|
|
489
|
+
total_tokens=total_tokens,
|
|
490
|
+
fully_qualified_model_id=fully_qualified_model_id,
|
|
491
|
+
provider_id=provider_id,
|
|
492
|
+
)
|
|
493
|
+
for metric in completion_metrics:
|
|
494
|
+
if metric.metric in ["completion_tokens", "total_tokens"]: # Only log completion and total tokens
|
|
495
|
+
enqueue_event(metric)
|
|
496
|
+
|
|
497
|
+
# Return metrics in response
|
|
498
|
+
return [MetricInResponse(metric=metric.metric, value=metric.value) for metric in completion_metrics]
|
|
499
|
+
|
|
500
|
+
# Fallback if no telemetry
|
|
501
|
+
metrics = self._construct_metrics(
|
|
502
|
+
prompt_tokens or 0,
|
|
503
|
+
completion_tokens or 0,
|
|
504
|
+
total_tokens,
|
|
505
|
+
fully_qualified_model_id=fully_qualified_model_id,
|
|
506
|
+
provider_id=provider_id,
|
|
507
|
+
)
|
|
508
|
+
return [MetricInResponse(metric=metric.metric, value=metric.value) for metric in metrics]
|
|
509
|
+
|
|
510
|
+
async def stream_tokens_and_compute_metrics_openai_chat(
|
|
511
|
+
self,
|
|
512
|
+
response: AsyncIterator[OpenAIChatCompletionChunk],
|
|
513
|
+
fully_qualified_model_id: str,
|
|
514
|
+
provider_id: str,
|
|
515
|
+
messages: list[OpenAIMessageParam] | None = None,
|
|
516
|
+
) -> AsyncIterator[OpenAIChatCompletionChunk]:
|
|
517
|
+
"""Stream OpenAI chat completion chunks, compute metrics, and store the final completion."""
|
|
518
|
+
id = None
|
|
519
|
+
created = None
|
|
520
|
+
choices_data: dict[int, dict[str, Any]] = {}
|
|
521
|
+
|
|
522
|
+
try:
|
|
523
|
+
async for chunk in response:
|
|
524
|
+
# Skip None chunks
|
|
525
|
+
if chunk is None:
|
|
526
|
+
continue
|
|
527
|
+
|
|
528
|
+
# Capture ID and created timestamp from first chunk
|
|
529
|
+
if id is None and chunk.id:
|
|
530
|
+
id = chunk.id
|
|
531
|
+
if created is None and chunk.created:
|
|
532
|
+
created = chunk.created
|
|
533
|
+
|
|
534
|
+
chunk.model = fully_qualified_model_id
|
|
535
|
+
|
|
536
|
+
# Accumulate choice data for final assembly
|
|
537
|
+
if chunk.choices:
|
|
538
|
+
for choice_delta in chunk.choices:
|
|
539
|
+
idx = choice_delta.index
|
|
540
|
+
if idx not in choices_data:
|
|
541
|
+
choices_data[idx] = {
|
|
542
|
+
"content_parts": [],
|
|
543
|
+
"tool_calls_builder": {},
|
|
544
|
+
"finish_reason": "stop",
|
|
545
|
+
"logprobs_content_parts": [],
|
|
546
|
+
}
|
|
547
|
+
current_choice_data = choices_data[idx]
|
|
548
|
+
|
|
549
|
+
if choice_delta.delta:
|
|
550
|
+
delta = choice_delta.delta
|
|
551
|
+
if delta.content:
|
|
552
|
+
current_choice_data["content_parts"].append(delta.content)
|
|
553
|
+
if delta.tool_calls:
|
|
554
|
+
for tool_call_delta in delta.tool_calls:
|
|
555
|
+
tc_idx = tool_call_delta.index
|
|
556
|
+
if tc_idx not in current_choice_data["tool_calls_builder"]:
|
|
557
|
+
current_choice_data["tool_calls_builder"][tc_idx] = {
|
|
558
|
+
"id": None,
|
|
559
|
+
"type": "function",
|
|
560
|
+
"function_name_parts": [],
|
|
561
|
+
"function_arguments_parts": [],
|
|
562
|
+
}
|
|
563
|
+
builder = current_choice_data["tool_calls_builder"][tc_idx]
|
|
564
|
+
if tool_call_delta.id:
|
|
565
|
+
builder["id"] = tool_call_delta.id
|
|
566
|
+
if tool_call_delta.type:
|
|
567
|
+
builder["type"] = tool_call_delta.type
|
|
568
|
+
if tool_call_delta.function:
|
|
569
|
+
if tool_call_delta.function.name:
|
|
570
|
+
builder["function_name_parts"].append(tool_call_delta.function.name)
|
|
571
|
+
if tool_call_delta.function.arguments:
|
|
572
|
+
builder["function_arguments_parts"].append(
|
|
573
|
+
tool_call_delta.function.arguments
|
|
574
|
+
)
|
|
575
|
+
if choice_delta.finish_reason:
|
|
576
|
+
current_choice_data["finish_reason"] = choice_delta.finish_reason
|
|
577
|
+
if choice_delta.logprobs and choice_delta.logprobs.content:
|
|
578
|
+
current_choice_data["logprobs_content_parts"].extend(choice_delta.logprobs.content)
|
|
579
|
+
|
|
580
|
+
# Compute metrics on final chunk
|
|
581
|
+
if chunk.choices and chunk.choices[0].finish_reason:
|
|
582
|
+
completion_text = ""
|
|
583
|
+
for choice_data in choices_data.values():
|
|
584
|
+
completion_text += "".join(choice_data["content_parts"])
|
|
585
|
+
|
|
586
|
+
# Add metrics to the chunk
|
|
587
|
+
if self.telemetry and hasattr(chunk, "usage") and chunk.usage:
|
|
588
|
+
metrics = self._construct_metrics(
|
|
589
|
+
prompt_tokens=chunk.usage.prompt_tokens,
|
|
590
|
+
completion_tokens=chunk.usage.completion_tokens,
|
|
591
|
+
total_tokens=chunk.usage.total_tokens,
|
|
592
|
+
model_id=fully_qualified_model_id,
|
|
593
|
+
provider_id=provider_id,
|
|
594
|
+
)
|
|
595
|
+
for metric in metrics:
|
|
596
|
+
enqueue_event(metric)
|
|
597
|
+
|
|
598
|
+
yield chunk
|
|
599
|
+
finally:
|
|
600
|
+
# Store the final assembled completion
|
|
601
|
+
if id and self.store and messages:
|
|
602
|
+
assembled_choices: list[OpenAIChoice] = []
|
|
603
|
+
for choice_idx, choice_data in choices_data.items():
|
|
604
|
+
content_str = "".join(choice_data["content_parts"])
|
|
605
|
+
assembled_tool_calls: list[OpenAIChatCompletionToolCall] = []
|
|
606
|
+
if choice_data["tool_calls_builder"]:
|
|
607
|
+
for tc_build_data in choice_data["tool_calls_builder"].values():
|
|
608
|
+
if tc_build_data["id"]:
|
|
609
|
+
func_name = "".join(tc_build_data["function_name_parts"])
|
|
610
|
+
func_args = "".join(tc_build_data["function_arguments_parts"])
|
|
611
|
+
assembled_tool_calls.append(
|
|
612
|
+
OpenAIChatCompletionToolCall(
|
|
613
|
+
id=tc_build_data["id"],
|
|
614
|
+
type=tc_build_data["type"],
|
|
615
|
+
function=OpenAIChatCompletionToolCallFunction(
|
|
616
|
+
name=func_name, arguments=func_args
|
|
617
|
+
),
|
|
618
|
+
)
|
|
619
|
+
)
|
|
620
|
+
message = OpenAIAssistantMessageParam(
|
|
621
|
+
role="assistant",
|
|
622
|
+
content=content_str if content_str else None,
|
|
623
|
+
tool_calls=assembled_tool_calls if assembled_tool_calls else None,
|
|
624
|
+
)
|
|
625
|
+
logprobs_content = choice_data["logprobs_content_parts"]
|
|
626
|
+
final_logprobs = OpenAIChoiceLogprobs(content=logprobs_content) if logprobs_content else None
|
|
627
|
+
|
|
628
|
+
assembled_choices.append(
|
|
629
|
+
OpenAIChoice(
|
|
630
|
+
finish_reason=choice_data["finish_reason"],
|
|
631
|
+
index=choice_idx,
|
|
632
|
+
message=message,
|
|
633
|
+
logprobs=final_logprobs,
|
|
634
|
+
)
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
final_response = OpenAIChatCompletion(
|
|
638
|
+
id=id,
|
|
639
|
+
choices=assembled_choices,
|
|
640
|
+
created=created or int(time.time()),
|
|
641
|
+
model=fully_qualified_model_id,
|
|
642
|
+
object="chat.completion",
|
|
643
|
+
)
|
|
644
|
+
logger.debug(f"InferenceRouter.completion_response: {final_response}")
|
|
645
|
+
asyncio.create_task(self.store.store_chat_completion(final_response, messages))
|