llama-stack 0.0.42__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack/__init__.py +5 -0
- llama_stack/apis/agents/__init__.py +1 -1
- llama_stack/apis/agents/agents.py +700 -281
- llama_stack/apis/agents/openai_responses.py +1311 -0
- llama_stack/{providers/adapters/memory/sample/config.py → apis/batches/__init__.py} +2 -5
- llama_stack/apis/batches/batches.py +100 -0
- llama_stack/apis/benchmarks/__init__.py +7 -0
- llama_stack/apis/benchmarks/benchmarks.py +108 -0
- llama_stack/apis/common/content_types.py +143 -0
- llama_stack/apis/common/errors.py +103 -0
- llama_stack/apis/common/job_types.py +38 -0
- llama_stack/apis/common/responses.py +36 -0
- llama_stack/apis/common/training_types.py +36 -5
- llama_stack/apis/common/type_system.py +158 -0
- llama_stack/apis/conversations/__init__.py +31 -0
- llama_stack/apis/conversations/conversations.py +286 -0
- llama_stack/apis/datasetio/__init__.py +7 -0
- llama_stack/apis/datasetio/datasetio.py +59 -0
- llama_stack/apis/datasets/__init__.py +7 -0
- llama_stack/apis/datasets/datasets.py +251 -0
- llama_stack/apis/datatypes.py +160 -0
- llama_stack/apis/eval/__init__.py +7 -0
- llama_stack/apis/eval/eval.py +169 -0
- llama_stack/apis/files/__init__.py +7 -0
- llama_stack/apis/files/files.py +199 -0
- llama_stack/apis/inference/__init__.py +1 -1
- llama_stack/apis/inference/inference.py +1169 -113
- llama_stack/apis/inspect/__init__.py +1 -1
- llama_stack/apis/inspect/inspect.py +69 -16
- llama_stack/apis/models/__init__.py +1 -1
- llama_stack/apis/models/models.py +148 -21
- llama_stack/apis/post_training/__init__.py +1 -1
- llama_stack/apis/post_training/post_training.py +265 -120
- llama_stack/{providers/adapters/agents/sample/config.py → apis/prompts/__init__.py} +2 -5
- llama_stack/apis/prompts/prompts.py +204 -0
- llama_stack/apis/providers/__init__.py +7 -0
- llama_stack/apis/providers/providers.py +69 -0
- llama_stack/apis/resource.py +37 -0
- llama_stack/apis/safety/__init__.py +1 -1
- llama_stack/apis/safety/safety.py +95 -12
- llama_stack/apis/scoring/__init__.py +7 -0
- llama_stack/apis/scoring/scoring.py +93 -0
- llama_stack/apis/scoring_functions/__init__.py +7 -0
- llama_stack/apis/scoring_functions/scoring_functions.py +208 -0
- llama_stack/apis/shields/__init__.py +1 -1
- llama_stack/apis/shields/shields.py +76 -33
- llama_stack/apis/synthetic_data_generation/__init__.py +1 -1
- llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +40 -17
- llama_stack/apis/telemetry/__init__.py +1 -1
- llama_stack/apis/telemetry/telemetry.py +322 -31
- llama_stack/apis/{dataset → tools}/__init__.py +2 -1
- llama_stack/apis/tools/rag_tool.py +218 -0
- llama_stack/apis/tools/tools.py +221 -0
- llama_stack/apis/vector_io/__init__.py +7 -0
- llama_stack/apis/vector_io/vector_io.py +960 -0
- llama_stack/apis/vector_stores/__init__.py +7 -0
- llama_stack/apis/vector_stores/vector_stores.py +51 -0
- llama_stack/apis/version.py +9 -0
- llama_stack/cli/llama.py +13 -5
- llama_stack/cli/stack/_list_deps.py +182 -0
- llama_stack/cli/stack/list_apis.py +1 -1
- llama_stack/cli/stack/list_deps.py +55 -0
- llama_stack/cli/stack/list_providers.py +24 -10
- llama_stack/cli/stack/list_stacks.py +56 -0
- llama_stack/cli/stack/remove.py +115 -0
- llama_stack/cli/stack/run.py +169 -56
- llama_stack/cli/stack/stack.py +18 -4
- llama_stack/cli/stack/utils.py +151 -0
- llama_stack/cli/table.py +23 -61
- llama_stack/cli/utils.py +29 -0
- llama_stack/core/access_control/access_control.py +131 -0
- llama_stack/core/access_control/conditions.py +129 -0
- llama_stack/core/access_control/datatypes.py +107 -0
- llama_stack/core/build.py +164 -0
- llama_stack/core/client.py +205 -0
- llama_stack/core/common.sh +37 -0
- llama_stack/{distribution → core}/configure.py +74 -55
- llama_stack/core/conversations/conversations.py +309 -0
- llama_stack/core/datatypes.py +625 -0
- llama_stack/core/distribution.py +276 -0
- llama_stack/core/external.py +54 -0
- llama_stack/core/id_generation.py +42 -0
- llama_stack/core/inspect.py +86 -0
- llama_stack/core/library_client.py +539 -0
- llama_stack/core/prompts/prompts.py +234 -0
- llama_stack/core/providers.py +137 -0
- llama_stack/core/request_headers.py +115 -0
- llama_stack/core/resolver.py +506 -0
- llama_stack/core/routers/__init__.py +101 -0
- llama_stack/core/routers/datasets.py +73 -0
- llama_stack/core/routers/eval_scoring.py +155 -0
- llama_stack/core/routers/inference.py +645 -0
- llama_stack/core/routers/safety.py +85 -0
- llama_stack/core/routers/tool_runtime.py +91 -0
- llama_stack/core/routers/vector_io.py +442 -0
- llama_stack/core/routing_tables/benchmarks.py +62 -0
- llama_stack/core/routing_tables/common.py +254 -0
- llama_stack/core/routing_tables/datasets.py +91 -0
- llama_stack/core/routing_tables/models.py +163 -0
- llama_stack/core/routing_tables/scoring_functions.py +66 -0
- llama_stack/core/routing_tables/shields.py +61 -0
- llama_stack/core/routing_tables/toolgroups.py +129 -0
- llama_stack/core/routing_tables/vector_stores.py +292 -0
- llama_stack/core/server/auth.py +187 -0
- llama_stack/core/server/auth_providers.py +494 -0
- llama_stack/core/server/quota.py +110 -0
- llama_stack/core/server/routes.py +141 -0
- llama_stack/core/server/server.py +542 -0
- llama_stack/core/server/tracing.py +80 -0
- llama_stack/core/stack.py +546 -0
- llama_stack/core/start_stack.sh +117 -0
- llama_stack/core/storage/datatypes.py +283 -0
- llama_stack/{cli/model → core/store}/__init__.py +1 -1
- llama_stack/core/store/registry.py +199 -0
- llama_stack/core/testing_context.py +49 -0
- llama_stack/core/ui/app.py +55 -0
- llama_stack/core/ui/modules/api.py +32 -0
- llama_stack/core/ui/modules/utils.py +42 -0
- llama_stack/core/ui/page/distribution/datasets.py +18 -0
- llama_stack/core/ui/page/distribution/eval_tasks.py +20 -0
- llama_stack/core/ui/page/distribution/models.py +18 -0
- llama_stack/core/ui/page/distribution/providers.py +27 -0
- llama_stack/core/ui/page/distribution/resources.py +48 -0
- llama_stack/core/ui/page/distribution/scoring_functions.py +18 -0
- llama_stack/core/ui/page/distribution/shields.py +19 -0
- llama_stack/core/ui/page/evaluations/app_eval.py +143 -0
- llama_stack/core/ui/page/evaluations/native_eval.py +253 -0
- llama_stack/core/ui/page/playground/chat.py +130 -0
- llama_stack/core/ui/page/playground/tools.py +352 -0
- llama_stack/core/utils/config.py +30 -0
- llama_stack/{distribution → core}/utils/config_dirs.py +3 -6
- llama_stack/core/utils/config_resolution.py +125 -0
- llama_stack/core/utils/context.py +84 -0
- llama_stack/core/utils/exec.py +96 -0
- llama_stack/{providers/impls/meta_reference/codeshield/config.py → core/utils/image_types.py} +4 -3
- llama_stack/{distribution → core}/utils/model_utils.py +2 -2
- llama_stack/{distribution → core}/utils/prompt_for_config.py +30 -63
- llama_stack/{apis/batch_inference → distributions/dell}/__init__.py +1 -1
- llama_stack/distributions/dell/build.yaml +33 -0
- llama_stack/distributions/dell/dell.py +158 -0
- llama_stack/distributions/dell/run-with-safety.yaml +141 -0
- llama_stack/distributions/dell/run.yaml +132 -0
- llama_stack/distributions/meta-reference-gpu/__init__.py +7 -0
- llama_stack/distributions/meta-reference-gpu/build.yaml +32 -0
- llama_stack/distributions/meta-reference-gpu/meta_reference.py +163 -0
- llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +154 -0
- llama_stack/distributions/meta-reference-gpu/run.yaml +139 -0
- llama_stack/{apis/evals → distributions/nvidia}/__init__.py +1 -1
- llama_stack/distributions/nvidia/build.yaml +29 -0
- llama_stack/distributions/nvidia/nvidia.py +154 -0
- llama_stack/distributions/nvidia/run-with-safety.yaml +137 -0
- llama_stack/distributions/nvidia/run.yaml +116 -0
- llama_stack/distributions/open-benchmark/__init__.py +7 -0
- llama_stack/distributions/open-benchmark/build.yaml +36 -0
- llama_stack/distributions/open-benchmark/open_benchmark.py +303 -0
- llama_stack/distributions/open-benchmark/run.yaml +252 -0
- llama_stack/distributions/postgres-demo/__init__.py +7 -0
- llama_stack/distributions/postgres-demo/build.yaml +23 -0
- llama_stack/distributions/postgres-demo/postgres_demo.py +125 -0
- llama_stack/distributions/postgres-demo/run.yaml +115 -0
- llama_stack/{apis/memory → distributions/starter}/__init__.py +1 -1
- llama_stack/distributions/starter/build.yaml +61 -0
- llama_stack/distributions/starter/run-with-postgres-store.yaml +285 -0
- llama_stack/distributions/starter/run.yaml +276 -0
- llama_stack/distributions/starter/starter.py +345 -0
- llama_stack/distributions/starter-gpu/__init__.py +7 -0
- llama_stack/distributions/starter-gpu/build.yaml +61 -0
- llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +288 -0
- llama_stack/distributions/starter-gpu/run.yaml +279 -0
- llama_stack/distributions/starter-gpu/starter_gpu.py +20 -0
- llama_stack/distributions/template.py +456 -0
- llama_stack/distributions/watsonx/__init__.py +7 -0
- llama_stack/distributions/watsonx/build.yaml +33 -0
- llama_stack/distributions/watsonx/run.yaml +133 -0
- llama_stack/distributions/watsonx/watsonx.py +95 -0
- llama_stack/env.py +24 -0
- llama_stack/log.py +314 -0
- llama_stack/models/llama/checkpoint.py +164 -0
- llama_stack/models/llama/datatypes.py +164 -0
- llama_stack/models/llama/hadamard_utils.py +86 -0
- llama_stack/models/llama/llama3/args.py +74 -0
- llama_stack/models/llama/llama3/chat_format.py +286 -0
- llama_stack/models/llama/llama3/generation.py +376 -0
- llama_stack/models/llama/llama3/interface.py +255 -0
- llama_stack/models/llama/llama3/model.py +304 -0
- llama_stack/models/llama/llama3/multimodal/__init__.py +12 -0
- llama_stack/models/llama/llama3/multimodal/encoder_utils.py +180 -0
- llama_stack/models/llama/llama3/multimodal/image_transform.py +409 -0
- llama_stack/models/llama/llama3/multimodal/model.py +1430 -0
- llama_stack/models/llama/llama3/multimodal/utils.py +26 -0
- llama_stack/models/llama/llama3/prompt_templates/__init__.py +22 -0
- llama_stack/models/llama/llama3/prompt_templates/base.py +39 -0
- llama_stack/models/llama/llama3/prompt_templates/system_prompts.py +319 -0
- llama_stack/models/llama/llama3/prompt_templates/tool_response.py +62 -0
- llama_stack/models/llama/llama3/quantization/loader.py +316 -0
- llama_stack/models/llama/llama3/template_data.py +116 -0
- llama_stack/models/llama/llama3/tokenizer.model +128000 -0
- llama_stack/models/llama/llama3/tokenizer.py +198 -0
- llama_stack/models/llama/llama3/tool_utils.py +266 -0
- llama_stack/models/llama/llama3_1/__init__.py +12 -0
- llama_stack/models/llama/llama3_1/prompt_format.md +358 -0
- llama_stack/models/llama/llama3_1/prompts.py +258 -0
- llama_stack/models/llama/llama3_2/prompts_text.py +229 -0
- llama_stack/models/llama/llama3_2/prompts_vision.py +126 -0
- llama_stack/models/llama/llama3_2/text_prompt_format.md +286 -0
- llama_stack/models/llama/llama3_2/vision_prompt_format.md +141 -0
- llama_stack/models/llama/llama3_3/prompts.py +259 -0
- llama_stack/models/llama/llama4/args.py +107 -0
- llama_stack/models/llama/llama4/chat_format.py +317 -0
- llama_stack/models/llama/llama4/datatypes.py +56 -0
- llama_stack/models/llama/llama4/ffn.py +58 -0
- llama_stack/models/llama/llama4/generation.py +313 -0
- llama_stack/models/llama/llama4/model.py +437 -0
- llama_stack/models/llama/llama4/moe.py +214 -0
- llama_stack/models/llama/llama4/preprocess.py +435 -0
- llama_stack/models/llama/llama4/prompt_format.md +304 -0
- llama_stack/models/llama/llama4/prompt_templates/system_prompts.py +136 -0
- llama_stack/models/llama/llama4/prompts.py +279 -0
- llama_stack/models/llama/llama4/quantization/__init__.py +5 -0
- llama_stack/models/llama/llama4/quantization/loader.py +226 -0
- llama_stack/models/llama/llama4/tokenizer.model +200000 -0
- llama_stack/models/llama/llama4/tokenizer.py +263 -0
- llama_stack/models/llama/llama4/vision/__init__.py +5 -0
- llama_stack/models/llama/llama4/vision/embedding.py +210 -0
- llama_stack/models/llama/llama4/vision/encoder.py +412 -0
- llama_stack/models/llama/prompt_format.py +191 -0
- llama_stack/models/llama/quantize_impls.py +316 -0
- llama_stack/models/llama/sku_list.py +1029 -0
- llama_stack/models/llama/sku_types.py +233 -0
- llama_stack/models/llama/tokenizer_utils.py +40 -0
- llama_stack/providers/datatypes.py +136 -107
- llama_stack/providers/inline/__init__.py +5 -0
- llama_stack/providers/inline/agents/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/agents/meta_reference}/__init__.py +12 -5
- llama_stack/providers/inline/agents/meta_reference/agent_instance.py +1024 -0
- llama_stack/providers/inline/agents/meta_reference/agents.py +383 -0
- llama_stack/providers/inline/agents/meta_reference/config.py +37 -0
- llama_stack/providers/inline/agents/meta_reference/persistence.py +228 -0
- llama_stack/providers/inline/agents/meta_reference/responses/__init__.py +5 -0
- llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +423 -0
- llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +1226 -0
- llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +449 -0
- llama_stack/providers/inline/agents/meta_reference/responses/types.py +194 -0
- llama_stack/providers/inline/agents/meta_reference/responses/utils.py +365 -0
- llama_stack/providers/inline/agents/meta_reference/safety.py +52 -0
- llama_stack/providers/inline/batches/__init__.py +5 -0
- llama_stack/providers/inline/batches/reference/__init__.py +36 -0
- llama_stack/providers/inline/batches/reference/batches.py +679 -0
- llama_stack/providers/inline/batches/reference/config.py +40 -0
- llama_stack/providers/inline/datasetio/__init__.py +5 -0
- llama_stack/providers/inline/datasetio/localfs/__init__.py +20 -0
- llama_stack/providers/inline/datasetio/localfs/config.py +23 -0
- llama_stack/providers/inline/datasetio/localfs/datasetio.py +113 -0
- llama_stack/providers/inline/eval/__init__.py +5 -0
- llama_stack/providers/inline/eval/meta_reference/__init__.py +28 -0
- llama_stack/providers/inline/eval/meta_reference/config.py +23 -0
- llama_stack/providers/inline/eval/meta_reference/eval.py +259 -0
- llama_stack/providers/inline/files/localfs/__init__.py +20 -0
- llama_stack/providers/inline/files/localfs/config.py +31 -0
- llama_stack/providers/inline/files/localfs/files.py +219 -0
- llama_stack/providers/inline/inference/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/__init__.py +4 -4
- llama_stack/providers/inline/inference/meta_reference/common.py +24 -0
- llama_stack/providers/inline/inference/meta_reference/config.py +68 -0
- llama_stack/providers/inline/inference/meta_reference/generators.py +211 -0
- llama_stack/providers/inline/inference/meta_reference/inference.py +158 -0
- llama_stack/providers/inline/inference/meta_reference/model_parallel.py +96 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/parallel_utils.py +56 -73
- llama_stack/providers/inline/inference/sentence_transformers/__init__.py +22 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/inference/sentence_transformers}/config.py +6 -4
- llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +83 -0
- llama_stack/providers/inline/post_training/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/utils.py +35 -0
- llama_stack/providers/inline/post_training/common/validator.py +36 -0
- llama_stack/providers/inline/post_training/huggingface/__init__.py +27 -0
- llama_stack/providers/inline/post_training/huggingface/config.py +83 -0
- llama_stack/providers/inline/post_training/huggingface/post_training.py +208 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +519 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +485 -0
- llama_stack/providers/inline/post_training/huggingface/utils.py +269 -0
- llama_stack/providers/inline/post_training/torchtune/__init__.py +27 -0
- llama_stack/providers/inline/post_training/torchtune/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +240 -0
- llama_stack/providers/inline/post_training/torchtune/common/utils.py +99 -0
- llama_stack/providers/inline/post_training/torchtune/config.py +20 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +57 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/sft.py +78 -0
- llama_stack/providers/inline/post_training/torchtune/post_training.py +178 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +588 -0
- llama_stack/providers/inline/safety/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/codeshield → inline/safety/code_scanner}/__init__.py +4 -2
- llama_stack/providers/inline/safety/code_scanner/code_scanner.py +128 -0
- llama_stack/providers/{impls/meta_reference/memory → inline/safety/code_scanner}/config.py +5 -3
- llama_stack/providers/inline/safety/llama_guard/__init__.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/config.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/llama_guard.py +489 -0
- llama_stack/providers/{adapters/memory/sample → inline/safety/prompt_guard}/__init__.py +4 -4
- llama_stack/providers/inline/safety/prompt_guard/config.py +32 -0
- llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +131 -0
- llama_stack/providers/inline/scoring/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/__init__.py +25 -0
- llama_stack/providers/{adapters/memory/weaviate → inline/scoring/basic}/config.py +5 -7
- llama_stack/providers/inline/scoring/basic/scoring.py +126 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +240 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +41 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/docvqa.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/equality.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/ifeval.py +23 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_math_response.py +27 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py +71 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/subset_of.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +80 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +66 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +58 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +38 -0
- llama_stack/providers/inline/scoring/basic/utils/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py +3319 -0
- llama_stack/providers/inline/scoring/basic/utils/math_utils.py +330 -0
- llama_stack/providers/inline/scoring/braintrust/__init__.py +27 -0
- llama_stack/providers/inline/scoring/braintrust/braintrust.py +230 -0
- llama_stack/providers/inline/scoring/braintrust/config.py +21 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_correctness.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_relevancy.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_similarity.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_entity_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_precision.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_relevancy.py +23 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/factuality.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/faithfulness.py +24 -0
- llama_stack/providers/inline/scoring/llm_as_judge/__init__.py +21 -0
- llama_stack/providers/inline/scoring/llm_as_judge/config.py +14 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +113 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +96 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +20 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +81 -0
- llama_stack/providers/inline/telemetry/__init__.py +5 -0
- llama_stack/providers/inline/telemetry/meta_reference/__init__.py +21 -0
- llama_stack/providers/inline/telemetry/meta_reference/config.py +47 -0
- llama_stack/providers/inline/telemetry/meta_reference/telemetry.py +252 -0
- llama_stack/providers/inline/tool_runtime/__init__.py +5 -0
- llama_stack/providers/inline/tool_runtime/rag/__init__.py +19 -0
- llama_stack/providers/{impls/meta_reference/telemetry → inline/tool_runtime/rag}/config.py +5 -3
- llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +77 -0
- llama_stack/providers/inline/tool_runtime/rag/memory.py +332 -0
- llama_stack/providers/inline/vector_io/__init__.py +5 -0
- llama_stack/providers/inline/vector_io/chroma/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/chroma/config.py +30 -0
- llama_stack/providers/inline/vector_io/faiss/__init__.py +21 -0
- llama_stack/providers/inline/vector_io/faiss/config.py +26 -0
- llama_stack/providers/inline/vector_io/faiss/faiss.py +293 -0
- llama_stack/providers/inline/vector_io/milvus/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/milvus/config.py +29 -0
- llama_stack/providers/inline/vector_io/qdrant/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/qdrant/config.py +29 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/config.py +26 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +483 -0
- llama_stack/providers/registry/agents.py +16 -18
- llama_stack/providers/registry/batches.py +26 -0
- llama_stack/providers/registry/datasetio.py +49 -0
- llama_stack/providers/registry/eval.py +46 -0
- llama_stack/providers/registry/files.py +31 -0
- llama_stack/providers/registry/inference.py +273 -118
- llama_stack/providers/registry/post_training.py +69 -0
- llama_stack/providers/registry/safety.py +46 -41
- llama_stack/providers/registry/scoring.py +51 -0
- llama_stack/providers/registry/tool_runtime.py +87 -0
- llama_stack/providers/registry/vector_io.py +828 -0
- llama_stack/providers/remote/__init__.py +5 -0
- llama_stack/providers/remote/agents/__init__.py +5 -0
- llama_stack/providers/remote/datasetio/__init__.py +5 -0
- llama_stack/providers/{adapters/memory/chroma → remote/datasetio/huggingface}/__init__.py +7 -4
- llama_stack/providers/remote/datasetio/huggingface/config.py +23 -0
- llama_stack/providers/remote/datasetio/huggingface/huggingface.py +99 -0
- llama_stack/providers/remote/datasetio/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/datasetio/nvidia/config.py +61 -0
- llama_stack/providers/remote/datasetio/nvidia/datasetio.py +116 -0
- llama_stack/providers/remote/eval/__init__.py +5 -0
- llama_stack/providers/remote/eval/nvidia/__init__.py +31 -0
- llama_stack/providers/remote/eval/nvidia/config.py +29 -0
- llama_stack/providers/remote/eval/nvidia/eval.py +162 -0
- llama_stack/providers/remote/files/s3/__init__.py +19 -0
- llama_stack/providers/remote/files/s3/config.py +42 -0
- llama_stack/providers/remote/files/s3/files.py +313 -0
- llama_stack/providers/remote/inference/__init__.py +5 -0
- llama_stack/providers/{adapters/safety/sample → remote/inference/anthropic}/__init__.py +4 -6
- llama_stack/providers/remote/inference/anthropic/anthropic.py +36 -0
- llama_stack/providers/remote/inference/anthropic/config.py +28 -0
- llama_stack/providers/{impls/meta_reference/telemetry → remote/inference/azure}/__init__.py +4 -4
- llama_stack/providers/remote/inference/azure/azure.py +25 -0
- llama_stack/providers/remote/inference/azure/config.py +61 -0
- llama_stack/providers/{adapters → remote}/inference/bedrock/__init__.py +18 -17
- llama_stack/providers/remote/inference/bedrock/bedrock.py +142 -0
- llama_stack/providers/{adapters/inference/sample → remote/inference/bedrock}/config.py +3 -4
- llama_stack/providers/remote/inference/bedrock/models.py +29 -0
- llama_stack/providers/remote/inference/cerebras/__init__.py +19 -0
- llama_stack/providers/remote/inference/cerebras/cerebras.py +28 -0
- llama_stack/providers/remote/inference/cerebras/config.py +30 -0
- llama_stack/providers/{adapters → remote}/inference/databricks/__init__.py +4 -5
- llama_stack/providers/remote/inference/databricks/config.py +37 -0
- llama_stack/providers/remote/inference/databricks/databricks.py +44 -0
- llama_stack/providers/{adapters → remote}/inference/fireworks/__init__.py +8 -4
- llama_stack/providers/remote/inference/fireworks/config.py +27 -0
- llama_stack/providers/remote/inference/fireworks/fireworks.py +27 -0
- llama_stack/providers/{adapters/memory/pgvector → remote/inference/gemini}/__init__.py +4 -4
- llama_stack/providers/remote/inference/gemini/config.py +28 -0
- llama_stack/providers/remote/inference/gemini/gemini.py +82 -0
- llama_stack/providers/remote/inference/groq/__init__.py +15 -0
- llama_stack/providers/remote/inference/groq/config.py +34 -0
- llama_stack/providers/remote/inference/groq/groq.py +18 -0
- llama_stack/providers/remote/inference/llama_openai_compat/__init__.py +15 -0
- llama_stack/providers/remote/inference/llama_openai_compat/config.py +34 -0
- llama_stack/providers/remote/inference/llama_openai_compat/llama.py +46 -0
- llama_stack/providers/remote/inference/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/inference/nvidia/config.py +64 -0
- llama_stack/providers/remote/inference/nvidia/nvidia.py +61 -0
- llama_stack/providers/{adapters/safety/sample/config.py → remote/inference/nvidia/utils.py} +3 -4
- llama_stack/providers/{impls/vllm → remote/inference/ollama}/__init__.py +4 -6
- llama_stack/providers/remote/inference/ollama/config.py +25 -0
- llama_stack/providers/remote/inference/ollama/ollama.py +102 -0
- llama_stack/providers/{adapters/telemetry/opentelemetry → remote/inference/openai}/__init__.py +4 -4
- llama_stack/providers/remote/inference/openai/config.py +39 -0
- llama_stack/providers/remote/inference/openai/openai.py +38 -0
- llama_stack/providers/remote/inference/passthrough/__init__.py +23 -0
- llama_stack/providers/remote/inference/passthrough/config.py +34 -0
- llama_stack/providers/remote/inference/passthrough/passthrough.py +122 -0
- llama_stack/providers/remote/inference/runpod/__init__.py +16 -0
- llama_stack/providers/remote/inference/runpod/config.py +32 -0
- llama_stack/providers/remote/inference/runpod/runpod.py +42 -0
- llama_stack/providers/remote/inference/sambanova/__init__.py +16 -0
- llama_stack/providers/remote/inference/sambanova/config.py +34 -0
- llama_stack/providers/remote/inference/sambanova/sambanova.py +28 -0
- llama_stack/providers/{adapters → remote}/inference/tgi/__init__.py +3 -4
- llama_stack/providers/remote/inference/tgi/config.py +76 -0
- llama_stack/providers/remote/inference/tgi/tgi.py +85 -0
- llama_stack/providers/{adapters → remote}/inference/together/__init__.py +8 -4
- llama_stack/providers/remote/inference/together/config.py +27 -0
- llama_stack/providers/remote/inference/together/together.py +102 -0
- llama_stack/providers/remote/inference/vertexai/__init__.py +15 -0
- llama_stack/providers/remote/inference/vertexai/config.py +48 -0
- llama_stack/providers/remote/inference/vertexai/vertexai.py +54 -0
- llama_stack/providers/remote/inference/vllm/__init__.py +22 -0
- llama_stack/providers/remote/inference/vllm/config.py +59 -0
- llama_stack/providers/remote/inference/vllm/vllm.py +111 -0
- llama_stack/providers/remote/inference/watsonx/__init__.py +15 -0
- llama_stack/providers/remote/inference/watsonx/config.py +45 -0
- llama_stack/providers/remote/inference/watsonx/watsonx.py +336 -0
- llama_stack/providers/remote/post_training/__init__.py +5 -0
- llama_stack/providers/remote/post_training/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/post_training/nvidia/config.py +113 -0
- llama_stack/providers/remote/post_training/nvidia/models.py +27 -0
- llama_stack/providers/remote/post_training/nvidia/post_training.py +430 -0
- llama_stack/providers/remote/post_training/nvidia/utils.py +63 -0
- llama_stack/providers/remote/safety/__init__.py +5 -0
- llama_stack/providers/remote/safety/bedrock/bedrock.py +111 -0
- llama_stack/providers/remote/safety/bedrock/config.py +14 -0
- llama_stack/providers/{adapters/inference/sample → remote/safety/nvidia}/__init__.py +5 -4
- llama_stack/providers/remote/safety/nvidia/config.py +40 -0
- llama_stack/providers/remote/safety/nvidia/nvidia.py +161 -0
- llama_stack/providers/{adapters/agents/sample → remote/safety/sambanova}/__init__.py +5 -4
- llama_stack/providers/remote/safety/sambanova/config.py +37 -0
- llama_stack/providers/remote/safety/sambanova/sambanova.py +98 -0
- llama_stack/providers/remote/tool_runtime/__init__.py +5 -0
- llama_stack/providers/remote/tool_runtime/bing_search/__init__.py +21 -0
- llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +112 -0
- llama_stack/providers/remote/tool_runtime/bing_search/config.py +22 -0
- llama_stack/providers/remote/tool_runtime/brave_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +148 -0
- llama_stack/providers/remote/tool_runtime/brave_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py +15 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py +20 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +73 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +84 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/__init__.py +22 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py +21 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +140 -0
- llama_stack/providers/remote/vector_io/__init__.py +5 -0
- llama_stack/providers/remote/vector_io/chroma/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/chroma/chroma.py +215 -0
- llama_stack/providers/remote/vector_io/chroma/config.py +28 -0
- llama_stack/providers/remote/vector_io/milvus/__init__.py +18 -0
- llama_stack/providers/remote/vector_io/milvus/config.py +35 -0
- llama_stack/providers/remote/vector_io/milvus/milvus.py +375 -0
- llama_stack/providers/remote/vector_io/pgvector/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/pgvector/config.py +47 -0
- llama_stack/providers/remote/vector_io/pgvector/pgvector.py +460 -0
- llama_stack/providers/remote/vector_io/qdrant/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/qdrant/config.py +37 -0
- llama_stack/providers/remote/vector_io/qdrant/qdrant.py +265 -0
- llama_stack/providers/remote/vector_io/weaviate/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/weaviate/config.py +32 -0
- llama_stack/providers/remote/vector_io/weaviate/weaviate.py +393 -0
- llama_stack/providers/utils/bedrock/__init__.py +5 -0
- llama_stack/providers/utils/bedrock/client.py +74 -0
- llama_stack/providers/utils/bedrock/config.py +64 -0
- llama_stack/providers/utils/bedrock/refreshable_boto_session.py +112 -0
- llama_stack/providers/utils/common/__init__.py +5 -0
- llama_stack/providers/utils/common/data_schema_validator.py +103 -0
- llama_stack/providers/utils/datasetio/__init__.py +5 -0
- llama_stack/providers/utils/datasetio/url_utils.py +47 -0
- llama_stack/providers/utils/files/__init__.py +5 -0
- llama_stack/providers/utils/files/form_data.py +69 -0
- llama_stack/providers/utils/inference/__init__.py +8 -7
- llama_stack/providers/utils/inference/embedding_mixin.py +101 -0
- llama_stack/providers/utils/inference/inference_store.py +264 -0
- llama_stack/providers/utils/inference/litellm_openai_mixin.py +336 -0
- llama_stack/providers/utils/inference/model_registry.py +173 -23
- llama_stack/providers/utils/inference/openai_compat.py +1261 -49
- llama_stack/providers/utils/inference/openai_mixin.py +506 -0
- llama_stack/providers/utils/inference/prompt_adapter.py +365 -67
- llama_stack/providers/utils/kvstore/api.py +6 -6
- llama_stack/providers/utils/kvstore/config.py +28 -48
- llama_stack/providers/utils/kvstore/kvstore.py +61 -15
- llama_stack/providers/utils/kvstore/mongodb/__init__.py +9 -0
- llama_stack/providers/utils/kvstore/mongodb/mongodb.py +82 -0
- llama_stack/providers/utils/kvstore/postgres/__init__.py +7 -0
- llama_stack/providers/utils/kvstore/postgres/postgres.py +114 -0
- llama_stack/providers/utils/kvstore/redis/redis.py +33 -9
- llama_stack/providers/utils/kvstore/sqlite/config.py +2 -1
- llama_stack/providers/utils/kvstore/sqlite/sqlite.py +123 -22
- llama_stack/providers/utils/memory/file_utils.py +1 -1
- llama_stack/providers/utils/memory/openai_vector_store_mixin.py +1304 -0
- llama_stack/providers/utils/memory/vector_store.py +220 -82
- llama_stack/providers/utils/pagination.py +43 -0
- llama_stack/providers/utils/responses/__init__.py +5 -0
- llama_stack/providers/utils/responses/responses_store.py +292 -0
- llama_stack/providers/utils/scheduler.py +270 -0
- llama_stack/providers/utils/scoring/__init__.py +5 -0
- llama_stack/providers/utils/scoring/aggregation_utils.py +75 -0
- llama_stack/providers/utils/scoring/base_scoring_fn.py +114 -0
- llama_stack/providers/utils/scoring/basic_scoring_utils.py +26 -0
- llama_stack/providers/utils/sqlstore/__init__.py +5 -0
- llama_stack/providers/utils/sqlstore/api.py +128 -0
- llama_stack/providers/utils/sqlstore/authorized_sqlstore.py +319 -0
- llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py +343 -0
- llama_stack/providers/utils/sqlstore/sqlstore.py +70 -0
- llama_stack/providers/utils/telemetry/trace_protocol.py +142 -0
- llama_stack/providers/utils/telemetry/tracing.py +192 -53
- llama_stack/providers/utils/tools/__init__.py +5 -0
- llama_stack/providers/utils/tools/mcp.py +148 -0
- llama_stack/providers/utils/tools/ttl_dict.py +70 -0
- llama_stack/providers/utils/vector_io/__init__.py +5 -0
- llama_stack/providers/utils/vector_io/vector_utils.py +156 -0
- llama_stack/schema_utils.py +118 -0
- llama_stack/strong_typing/__init__.py +19 -0
- llama_stack/strong_typing/auxiliary.py +228 -0
- llama_stack/strong_typing/classdef.py +440 -0
- llama_stack/strong_typing/core.py +46 -0
- llama_stack/strong_typing/deserializer.py +877 -0
- llama_stack/strong_typing/docstring.py +409 -0
- llama_stack/strong_typing/exception.py +23 -0
- llama_stack/strong_typing/inspection.py +1085 -0
- llama_stack/strong_typing/mapping.py +40 -0
- llama_stack/strong_typing/name.py +182 -0
- llama_stack/strong_typing/py.typed +0 -0
- llama_stack/strong_typing/schema.py +792 -0
- llama_stack/strong_typing/serialization.py +97 -0
- llama_stack/strong_typing/serializer.py +500 -0
- llama_stack/strong_typing/slots.py +27 -0
- llama_stack/strong_typing/topological.py +89 -0
- llama_stack/testing/__init__.py +5 -0
- llama_stack/testing/api_recorder.py +956 -0
- llama_stack/ui/node_modules/flatted/python/flatted.py +149 -0
- llama_stack-0.3.4.dist-info/METADATA +261 -0
- llama_stack-0.3.4.dist-info/RECORD +625 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/WHEEL +1 -1
- llama_stack/apis/agents/client.py +0 -292
- llama_stack/apis/agents/event_logger.py +0 -184
- llama_stack/apis/batch_inference/batch_inference.py +0 -72
- llama_stack/apis/common/deployment_types.py +0 -31
- llama_stack/apis/dataset/dataset.py +0 -63
- llama_stack/apis/evals/evals.py +0 -122
- llama_stack/apis/inference/client.py +0 -197
- llama_stack/apis/inspect/client.py +0 -82
- llama_stack/apis/memory/client.py +0 -155
- llama_stack/apis/memory/memory.py +0 -65
- llama_stack/apis/memory_banks/__init__.py +0 -7
- llama_stack/apis/memory_banks/client.py +0 -101
- llama_stack/apis/memory_banks/memory_banks.py +0 -78
- llama_stack/apis/models/client.py +0 -83
- llama_stack/apis/reward_scoring/__init__.py +0 -7
- llama_stack/apis/reward_scoring/reward_scoring.py +0 -55
- llama_stack/apis/safety/client.py +0 -105
- llama_stack/apis/shields/client.py +0 -79
- llama_stack/cli/download.py +0 -340
- llama_stack/cli/model/describe.py +0 -82
- llama_stack/cli/model/download.py +0 -24
- llama_stack/cli/model/list.py +0 -62
- llama_stack/cli/model/model.py +0 -34
- llama_stack/cli/model/prompt_format.py +0 -112
- llama_stack/cli/model/safety_models.py +0 -52
- llama_stack/cli/stack/build.py +0 -299
- llama_stack/cli/stack/configure.py +0 -178
- llama_stack/distribution/build.py +0 -123
- llama_stack/distribution/build_conda_env.sh +0 -136
- llama_stack/distribution/build_container.sh +0 -142
- llama_stack/distribution/common.sh +0 -40
- llama_stack/distribution/configure_container.sh +0 -47
- llama_stack/distribution/datatypes.py +0 -139
- llama_stack/distribution/distribution.py +0 -58
- llama_stack/distribution/inspect.py +0 -67
- llama_stack/distribution/request_headers.py +0 -57
- llama_stack/distribution/resolver.py +0 -323
- llama_stack/distribution/routers/__init__.py +0 -48
- llama_stack/distribution/routers/routers.py +0 -158
- llama_stack/distribution/routers/routing_tables.py +0 -173
- llama_stack/distribution/server/endpoints.py +0 -48
- llama_stack/distribution/server/server.py +0 -343
- llama_stack/distribution/start_conda_env.sh +0 -42
- llama_stack/distribution/start_container.sh +0 -64
- llama_stack/distribution/templates/local-bedrock-conda-example-build.yaml +0 -10
- llama_stack/distribution/templates/local-build.yaml +0 -10
- llama_stack/distribution/templates/local-databricks-build.yaml +0 -10
- llama_stack/distribution/templates/local-fireworks-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-endpoint-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-serverless-build.yaml +0 -10
- llama_stack/distribution/templates/local-ollama-build.yaml +0 -10
- llama_stack/distribution/templates/local-tgi-build.yaml +0 -10
- llama_stack/distribution/templates/local-together-build.yaml +0 -10
- llama_stack/distribution/templates/local-vllm-build.yaml +0 -10
- llama_stack/distribution/utils/exec.py +0 -105
- llama_stack/providers/adapters/agents/sample/sample.py +0 -18
- llama_stack/providers/adapters/inference/bedrock/bedrock.py +0 -451
- llama_stack/providers/adapters/inference/bedrock/config.py +0 -55
- llama_stack/providers/adapters/inference/databricks/config.py +0 -21
- llama_stack/providers/adapters/inference/databricks/databricks.py +0 -125
- llama_stack/providers/adapters/inference/fireworks/config.py +0 -20
- llama_stack/providers/adapters/inference/fireworks/fireworks.py +0 -130
- llama_stack/providers/adapters/inference/ollama/__init__.py +0 -19
- llama_stack/providers/adapters/inference/ollama/ollama.py +0 -175
- llama_stack/providers/adapters/inference/sample/sample.py +0 -23
- llama_stack/providers/adapters/inference/tgi/config.py +0 -43
- llama_stack/providers/adapters/inference/tgi/tgi.py +0 -200
- llama_stack/providers/adapters/inference/together/config.py +0 -22
- llama_stack/providers/adapters/inference/together/together.py +0 -143
- llama_stack/providers/adapters/memory/chroma/chroma.py +0 -157
- llama_stack/providers/adapters/memory/pgvector/config.py +0 -17
- llama_stack/providers/adapters/memory/pgvector/pgvector.py +0 -211
- llama_stack/providers/adapters/memory/sample/sample.py +0 -23
- llama_stack/providers/adapters/memory/weaviate/__init__.py +0 -15
- llama_stack/providers/adapters/memory/weaviate/weaviate.py +0 -190
- llama_stack/providers/adapters/safety/bedrock/bedrock.py +0 -113
- llama_stack/providers/adapters/safety/bedrock/config.py +0 -16
- llama_stack/providers/adapters/safety/sample/sample.py +0 -23
- llama_stack/providers/adapters/safety/together/__init__.py +0 -18
- llama_stack/providers/adapters/safety/together/config.py +0 -26
- llama_stack/providers/adapters/safety/together/together.py +0 -101
- llama_stack/providers/adapters/telemetry/opentelemetry/config.py +0 -12
- llama_stack/providers/adapters/telemetry/opentelemetry/opentelemetry.py +0 -201
- llama_stack/providers/adapters/telemetry/sample/__init__.py +0 -17
- llama_stack/providers/adapters/telemetry/sample/config.py +0 -12
- llama_stack/providers/adapters/telemetry/sample/sample.py +0 -18
- llama_stack/providers/impls/meta_reference/agents/agent_instance.py +0 -844
- llama_stack/providers/impls/meta_reference/agents/agents.py +0 -161
- llama_stack/providers/impls/meta_reference/agents/persistence.py +0 -84
- llama_stack/providers/impls/meta_reference/agents/rag/context_retriever.py +0 -74
- llama_stack/providers/impls/meta_reference/agents/safety.py +0 -57
- llama_stack/providers/impls/meta_reference/agents/tests/code_execution.py +0 -93
- llama_stack/providers/impls/meta_reference/agents/tests/test_chat_agent.py +0 -305
- llama_stack/providers/impls/meta_reference/agents/tools/base.py +0 -20
- llama_stack/providers/impls/meta_reference/agents/tools/builtin.py +0 -375
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_env_prefix.py +0 -133
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_execution.py +0 -256
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/matplotlib_custom_backend.py +0 -87
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/utils.py +0 -21
- llama_stack/providers/impls/meta_reference/agents/tools/safety.py +0 -43
- llama_stack/providers/impls/meta_reference/codeshield/code_scanner.py +0 -58
- llama_stack/providers/impls/meta_reference/inference/config.py +0 -45
- llama_stack/providers/impls/meta_reference/inference/generation.py +0 -376
- llama_stack/providers/impls/meta_reference/inference/inference.py +0 -280
- llama_stack/providers/impls/meta_reference/inference/model_parallel.py +0 -99
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_impls.py +0 -184
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_txest_disabled.py +0 -76
- llama_stack/providers/impls/meta_reference/inference/quantization/loader.py +0 -97
- llama_stack/providers/impls/meta_reference/inference/quantization/scripts/quantize_checkpoint.py +0 -161
- llama_stack/providers/impls/meta_reference/memory/__init__.py +0 -19
- llama_stack/providers/impls/meta_reference/memory/faiss.py +0 -113
- llama_stack/providers/impls/meta_reference/safety/__init__.py +0 -17
- llama_stack/providers/impls/meta_reference/safety/base.py +0 -57
- llama_stack/providers/impls/meta_reference/safety/config.py +0 -48
- llama_stack/providers/impls/meta_reference/safety/llama_guard.py +0 -268
- llama_stack/providers/impls/meta_reference/safety/prompt_guard.py +0 -145
- llama_stack/providers/impls/meta_reference/safety/safety.py +0 -112
- llama_stack/providers/impls/meta_reference/telemetry/console.py +0 -89
- llama_stack/providers/impls/vllm/config.py +0 -35
- llama_stack/providers/impls/vllm/vllm.py +0 -241
- llama_stack/providers/registry/memory.py +0 -78
- llama_stack/providers/registry/telemetry.py +0 -44
- llama_stack/providers/tests/agents/test_agents.py +0 -210
- llama_stack/providers/tests/inference/test_inference.py +0 -257
- llama_stack/providers/tests/inference/test_prompt_adapter.py +0 -126
- llama_stack/providers/tests/memory/test_memory.py +0 -136
- llama_stack/providers/tests/resolver.py +0 -100
- llama_stack/providers/tests/safety/test_safety.py +0 -77
- llama_stack-0.0.42.dist-info/METADATA +0 -137
- llama_stack-0.0.42.dist-info/RECORD +0 -256
- /llama_stack/{distribution → core}/__init__.py +0 -0
- /llama_stack/{distribution/server → core/access_control}/__init__.py +0 -0
- /llama_stack/{distribution/utils → core/conversations}/__init__.py +0 -0
- /llama_stack/{providers/adapters → core/prompts}/__init__.py +0 -0
- /llama_stack/{providers/adapters/agents → core/routing_tables}/__init__.py +0 -0
- /llama_stack/{providers/adapters/inference → core/server}/__init__.py +0 -0
- /llama_stack/{providers/adapters/memory → core/storage}/__init__.py +0 -0
- /llama_stack/{providers/adapters/safety → core/ui}/__init__.py +0 -0
- /llama_stack/{providers/adapters/telemetry → core/ui/modules}/__init__.py +0 -0
- /llama_stack/{providers/impls → core/ui/page}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference → core/ui/page/distribution}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/rag → core/ui/page/evaluations}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tests → core/ui/page/playground}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools → core/utils}/__init__.py +0 -0
- /llama_stack/{distribution → core}/utils/dynamic.py +0 -0
- /llama_stack/{distribution → core}/utils/serialize.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools/ipython_tool → distributions}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization → models}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization/scripts → models/llama}/__init__.py +0 -0
- /llama_stack/{providers/tests → models/llama/llama3}/__init__.py +0 -0
- /llama_stack/{providers/tests/agents → models/llama/llama3/quantization}/__init__.py +0 -0
- /llama_stack/{providers/tests/inference → models/llama/llama3_2}/__init__.py +0 -0
- /llama_stack/{providers/tests/memory → models/llama/llama3_3}/__init__.py +0 -0
- /llama_stack/{providers/tests/safety → models/llama/llama4}/__init__.py +0 -0
- /llama_stack/{scripts → models/llama/llama4/prompt_templates}/__init__.py +0 -0
- /llama_stack/providers/{adapters → remote}/safety/bedrock/__init__.py +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/entry_points.txt +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info/licenses}/LICENSE +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,679 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import hashlib
|
|
9
|
+
import itertools
|
|
10
|
+
import json
|
|
11
|
+
import time
|
|
12
|
+
import uuid
|
|
13
|
+
from io import BytesIO
|
|
14
|
+
from typing import Any, Literal
|
|
15
|
+
|
|
16
|
+
from openai.types.batch import BatchError, Errors
|
|
17
|
+
from pydantic import BaseModel
|
|
18
|
+
|
|
19
|
+
from llama_stack.apis.batches import Batches, BatchObject, ListBatchesResponse
|
|
20
|
+
from llama_stack.apis.common.errors import ConflictError, ResourceNotFoundError
|
|
21
|
+
from llama_stack.apis.files import Files, OpenAIFilePurpose
|
|
22
|
+
from llama_stack.apis.inference import (
|
|
23
|
+
Inference,
|
|
24
|
+
OpenAIAssistantMessageParam,
|
|
25
|
+
OpenAIChatCompletionRequestWithExtraBody,
|
|
26
|
+
OpenAICompletionRequestWithExtraBody,
|
|
27
|
+
OpenAIDeveloperMessageParam,
|
|
28
|
+
OpenAIEmbeddingsRequestWithExtraBody,
|
|
29
|
+
OpenAIMessageParam,
|
|
30
|
+
OpenAISystemMessageParam,
|
|
31
|
+
OpenAIToolMessageParam,
|
|
32
|
+
OpenAIUserMessageParam,
|
|
33
|
+
)
|
|
34
|
+
from llama_stack.apis.models import Models
|
|
35
|
+
from llama_stack.log import get_logger
|
|
36
|
+
from llama_stack.providers.utils.kvstore import KVStore
|
|
37
|
+
|
|
38
|
+
from .config import ReferenceBatchesImplConfig
|
|
39
|
+
|
|
40
|
+
BATCH_PREFIX = "batch:"
|
|
41
|
+
|
|
42
|
+
logger = get_logger(__name__)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class AsyncBytesIO:
|
|
46
|
+
"""
|
|
47
|
+
Async-compatible BytesIO wrapper to allow async file-like operations.
|
|
48
|
+
|
|
49
|
+
We use this when uploading files to the Files API, as it expects an
|
|
50
|
+
async file-like object.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(self, data: bytes):
|
|
54
|
+
self._buffer = BytesIO(data)
|
|
55
|
+
|
|
56
|
+
async def read(self, n=-1):
|
|
57
|
+
return self._buffer.read(n)
|
|
58
|
+
|
|
59
|
+
async def seek(self, pos, whence=0):
|
|
60
|
+
return self._buffer.seek(pos, whence)
|
|
61
|
+
|
|
62
|
+
def __enter__(self):
|
|
63
|
+
return self
|
|
64
|
+
|
|
65
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
66
|
+
self._buffer.close()
|
|
67
|
+
|
|
68
|
+
def __getattr__(self, name):
|
|
69
|
+
return getattr(self._buffer, name)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class BatchRequest(BaseModel):
|
|
73
|
+
line_num: int
|
|
74
|
+
custom_id: str
|
|
75
|
+
method: str
|
|
76
|
+
url: str
|
|
77
|
+
body: dict[str, Any]
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def convert_to_openai_message_param(msg: dict[str, Any]) -> OpenAIMessageParam:
|
|
81
|
+
"""Convert a message dictionary to OpenAIMessageParam based on role."""
|
|
82
|
+
role = msg.get("role")
|
|
83
|
+
|
|
84
|
+
if role == "user":
|
|
85
|
+
return OpenAIUserMessageParam(**msg)
|
|
86
|
+
elif role == "system":
|
|
87
|
+
return OpenAISystemMessageParam(**msg)
|
|
88
|
+
elif role == "assistant":
|
|
89
|
+
return OpenAIAssistantMessageParam(**msg)
|
|
90
|
+
elif role == "tool":
|
|
91
|
+
return OpenAIToolMessageParam(**msg)
|
|
92
|
+
elif role == "developer":
|
|
93
|
+
return OpenAIDeveloperMessageParam(**msg)
|
|
94
|
+
else:
|
|
95
|
+
raise ValueError(f"Unknown message role: {role}")
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class ReferenceBatchesImpl(Batches):
|
|
99
|
+
"""Reference implementation of the Batches API.
|
|
100
|
+
|
|
101
|
+
This implementation processes batch files by making individual requests
|
|
102
|
+
to the inference API and generates output files with results.
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
def __init__(
|
|
106
|
+
self,
|
|
107
|
+
config: ReferenceBatchesImplConfig,
|
|
108
|
+
inference_api: Inference,
|
|
109
|
+
files_api: Files,
|
|
110
|
+
models_api: Models,
|
|
111
|
+
kvstore: KVStore,
|
|
112
|
+
) -> None:
|
|
113
|
+
self.config = config
|
|
114
|
+
self.kvstore = kvstore
|
|
115
|
+
self.inference_api = inference_api
|
|
116
|
+
self.files_api = files_api
|
|
117
|
+
self.models_api = models_api
|
|
118
|
+
self._processing_tasks: dict[str, asyncio.Task] = {}
|
|
119
|
+
self._batch_semaphore = asyncio.Semaphore(config.max_concurrent_batches)
|
|
120
|
+
self._update_batch_lock = asyncio.Lock()
|
|
121
|
+
|
|
122
|
+
# this is to allow tests to disable background processing
|
|
123
|
+
self.process_batches = True
|
|
124
|
+
|
|
125
|
+
async def initialize(self) -> None:
|
|
126
|
+
# TODO: start background processing of existing tasks
|
|
127
|
+
pass
|
|
128
|
+
|
|
129
|
+
async def shutdown(self) -> None:
|
|
130
|
+
"""Shutdown the batches provider."""
|
|
131
|
+
if self._processing_tasks:
|
|
132
|
+
# don't cancel tasks - just let them stop naturally on shutdown
|
|
133
|
+
# cancelling would mark batches as "cancelled" in the database
|
|
134
|
+
logger.info(f"Shutdown initiated with {len(self._processing_tasks)} active batch processing tasks")
|
|
135
|
+
|
|
136
|
+
# TODO (SECURITY): this currently works w/ configured api keys, not with x-llamastack-provider-data or with user policy restrictions
|
|
137
|
+
async def create_batch(
|
|
138
|
+
self,
|
|
139
|
+
input_file_id: str,
|
|
140
|
+
endpoint: str,
|
|
141
|
+
completion_window: Literal["24h"],
|
|
142
|
+
metadata: dict[str, str] | None = None,
|
|
143
|
+
idempotency_key: str | None = None,
|
|
144
|
+
) -> BatchObject:
|
|
145
|
+
"""
|
|
146
|
+
Create a new batch for processing multiple API requests.
|
|
147
|
+
|
|
148
|
+
This implementation provides optional idempotency: when an idempotency key
|
|
149
|
+
(idempotency_key) is provided, a deterministic ID is generated based on the input
|
|
150
|
+
parameters. If a batch with the same parameters already exists, it will be
|
|
151
|
+
returned instead of creating a duplicate. Without an idempotency key,
|
|
152
|
+
each request creates a new batch with a unique ID.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
input_file_id: The ID of an uploaded file containing requests for the batch.
|
|
156
|
+
endpoint: The endpoint to be used for all requests in the batch.
|
|
157
|
+
completion_window: The time window within which the batch should be processed.
|
|
158
|
+
metadata: Optional metadata for the batch.
|
|
159
|
+
idempotency_key: Optional idempotency key for enabling idempotent behavior.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
The created or existing batch object.
|
|
163
|
+
"""
|
|
164
|
+
|
|
165
|
+
# Error handling by levels -
|
|
166
|
+
# 0. Input param handling, results in 40x errors before processing, e.g.
|
|
167
|
+
# - Wrong completion_window
|
|
168
|
+
# - Invalid metadata types
|
|
169
|
+
# - Unknown endpoint
|
|
170
|
+
# -> no batch created
|
|
171
|
+
# 1. Errors preventing processing, result in BatchErrors aggregated in process_batch, e.g.
|
|
172
|
+
# - input_file_id missing
|
|
173
|
+
# - invalid json in file
|
|
174
|
+
# - missing custom_id, method, url, body
|
|
175
|
+
# - invalid model
|
|
176
|
+
# - streaming
|
|
177
|
+
# -> batch created, validation sends to failed status
|
|
178
|
+
# 2. Processing errors, result in error_file_id entries, e.g.
|
|
179
|
+
# - Any error returned from inference endpoint
|
|
180
|
+
# -> batch created, goes to completed status
|
|
181
|
+
|
|
182
|
+
# TODO: set expiration time for garbage collection
|
|
183
|
+
|
|
184
|
+
if endpoint not in ["/v1/chat/completions", "/v1/completions", "/v1/embeddings"]:
|
|
185
|
+
raise ValueError(
|
|
186
|
+
f"Invalid endpoint: {endpoint}. Supported values: /v1/chat/completions, /v1/completions, /v1/embeddings. Code: invalid_value. Param: endpoint",
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
if completion_window != "24h":
|
|
190
|
+
raise ValueError(
|
|
191
|
+
f"Invalid completion_window: {completion_window}. Supported values are: 24h. Code: invalid_value. Param: completion_window",
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
batch_id = f"batch_{uuid.uuid4().hex[:16]}"
|
|
195
|
+
|
|
196
|
+
# For idempotent requests, use the idempotency key for the batch ID
|
|
197
|
+
# This ensures the same key always maps to the same batch ID,
|
|
198
|
+
# allowing us to detect parameter conflicts
|
|
199
|
+
if idempotency_key is not None:
|
|
200
|
+
hash_input = idempotency_key.encode("utf-8")
|
|
201
|
+
hash_digest = hashlib.sha256(hash_input).hexdigest()[:24]
|
|
202
|
+
batch_id = f"batch_{hash_digest}"
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
existing_batch = await self.retrieve_batch(batch_id)
|
|
206
|
+
|
|
207
|
+
if (
|
|
208
|
+
existing_batch.input_file_id != input_file_id
|
|
209
|
+
or existing_batch.endpoint != endpoint
|
|
210
|
+
or existing_batch.completion_window != completion_window
|
|
211
|
+
or existing_batch.metadata != metadata
|
|
212
|
+
):
|
|
213
|
+
raise ConflictError(
|
|
214
|
+
f"Idempotency key '{idempotency_key}' was previously used with different parameters. "
|
|
215
|
+
"Either use a new idempotency key or ensure all parameters match the original request."
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
logger.info(f"Returning existing batch with ID: {batch_id}")
|
|
219
|
+
return existing_batch
|
|
220
|
+
except ResourceNotFoundError:
|
|
221
|
+
# Batch doesn't exist, continue with creation
|
|
222
|
+
pass
|
|
223
|
+
|
|
224
|
+
current_time = int(time.time())
|
|
225
|
+
|
|
226
|
+
batch = BatchObject(
|
|
227
|
+
id=batch_id,
|
|
228
|
+
object="batch",
|
|
229
|
+
endpoint=endpoint,
|
|
230
|
+
input_file_id=input_file_id,
|
|
231
|
+
completion_window=completion_window,
|
|
232
|
+
status="validating",
|
|
233
|
+
created_at=current_time,
|
|
234
|
+
metadata=metadata,
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
await self.kvstore.set(f"batch:{batch_id}", batch.to_json())
|
|
238
|
+
logger.info(f"Created new batch with ID: {batch_id}")
|
|
239
|
+
|
|
240
|
+
if self.process_batches:
|
|
241
|
+
task = asyncio.create_task(self._process_batch(batch_id))
|
|
242
|
+
self._processing_tasks[batch_id] = task
|
|
243
|
+
|
|
244
|
+
return batch
|
|
245
|
+
|
|
246
|
+
async def cancel_batch(self, batch_id: str) -> BatchObject:
|
|
247
|
+
"""Cancel a batch that is in progress."""
|
|
248
|
+
batch = await self.retrieve_batch(batch_id)
|
|
249
|
+
|
|
250
|
+
if batch.status in ["cancelled", "cancelling"]:
|
|
251
|
+
return batch
|
|
252
|
+
|
|
253
|
+
if batch.status in ["completed", "failed", "expired"]:
|
|
254
|
+
raise ConflictError(f"Cannot cancel batch '{batch_id}' with status '{batch.status}'")
|
|
255
|
+
|
|
256
|
+
await self._update_batch(batch_id, status="cancelling", cancelling_at=int(time.time()))
|
|
257
|
+
|
|
258
|
+
if batch_id in self._processing_tasks:
|
|
259
|
+
self._processing_tasks[batch_id].cancel()
|
|
260
|
+
# note: task removal and status="cancelled" handled in finally block of _process_batch
|
|
261
|
+
|
|
262
|
+
return await self.retrieve_batch(batch_id)
|
|
263
|
+
|
|
264
|
+
async def list_batches(
|
|
265
|
+
self,
|
|
266
|
+
after: str | None = None,
|
|
267
|
+
limit: int = 20,
|
|
268
|
+
) -> ListBatchesResponse:
|
|
269
|
+
"""
|
|
270
|
+
List all batches, eventually only for the current user.
|
|
271
|
+
|
|
272
|
+
With no notion of user, we return all batches.
|
|
273
|
+
"""
|
|
274
|
+
batch_values = await self.kvstore.values_in_range("batch:", "batch:\xff")
|
|
275
|
+
|
|
276
|
+
batches = []
|
|
277
|
+
for batch_data in batch_values:
|
|
278
|
+
if batch_data:
|
|
279
|
+
batches.append(BatchObject.model_validate_json(batch_data))
|
|
280
|
+
|
|
281
|
+
batches.sort(key=lambda b: b.created_at, reverse=True)
|
|
282
|
+
|
|
283
|
+
start_idx = 0
|
|
284
|
+
if after:
|
|
285
|
+
for i, batch in enumerate(batches):
|
|
286
|
+
if batch.id == after:
|
|
287
|
+
start_idx = i + 1
|
|
288
|
+
break
|
|
289
|
+
|
|
290
|
+
page_batches = batches[start_idx : start_idx + limit]
|
|
291
|
+
has_more = (start_idx + limit) < len(batches)
|
|
292
|
+
|
|
293
|
+
first_id = page_batches[0].id if page_batches else None
|
|
294
|
+
last_id = page_batches[-1].id if page_batches else None
|
|
295
|
+
|
|
296
|
+
return ListBatchesResponse(
|
|
297
|
+
data=page_batches,
|
|
298
|
+
first_id=first_id,
|
|
299
|
+
last_id=last_id,
|
|
300
|
+
has_more=has_more,
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
async def retrieve_batch(self, batch_id: str) -> BatchObject:
|
|
304
|
+
"""Retrieve information about a specific batch."""
|
|
305
|
+
batch_data = await self.kvstore.get(f"batch:{batch_id}")
|
|
306
|
+
if not batch_data:
|
|
307
|
+
raise ResourceNotFoundError(batch_id, "Batch", "batches.list()")
|
|
308
|
+
|
|
309
|
+
return BatchObject.model_validate_json(batch_data)
|
|
310
|
+
|
|
311
|
+
async def _update_batch(self, batch_id: str, **updates) -> None:
|
|
312
|
+
"""Update batch fields in kvstore."""
|
|
313
|
+
async with self._update_batch_lock:
|
|
314
|
+
try:
|
|
315
|
+
batch = await self.retrieve_batch(batch_id)
|
|
316
|
+
|
|
317
|
+
# batch processing is async. once cancelling, only allow "cancelled" status updates
|
|
318
|
+
if batch.status == "cancelling" and updates.get("status") != "cancelled":
|
|
319
|
+
logger.info(
|
|
320
|
+
f"Skipping status update for cancelled batch {batch_id}: attempted {updates.get('status')}"
|
|
321
|
+
)
|
|
322
|
+
return
|
|
323
|
+
|
|
324
|
+
if "errors" in updates:
|
|
325
|
+
updates["errors"] = updates["errors"].model_dump()
|
|
326
|
+
|
|
327
|
+
batch_dict = batch.model_dump()
|
|
328
|
+
batch_dict.update(updates)
|
|
329
|
+
|
|
330
|
+
await self.kvstore.set(f"batch:{batch_id}", json.dumps(batch_dict))
|
|
331
|
+
except Exception as e:
|
|
332
|
+
logger.error(f"Failed to update batch {batch_id}: {e}")
|
|
333
|
+
|
|
334
|
+
async def _validate_input(self, batch: BatchObject) -> tuple[list[BatchError], list[BatchRequest]]:
|
|
335
|
+
"""
|
|
336
|
+
Read & validate input, return errors and valid input.
|
|
337
|
+
|
|
338
|
+
Validation of
|
|
339
|
+
- input_file_id existance
|
|
340
|
+
- valid json
|
|
341
|
+
- custom_id, method, url, body presence and valid
|
|
342
|
+
- no streaming
|
|
343
|
+
"""
|
|
344
|
+
requests: list[BatchRequest] = []
|
|
345
|
+
errors: list[BatchError] = []
|
|
346
|
+
try:
|
|
347
|
+
await self.files_api.openai_retrieve_file(batch.input_file_id)
|
|
348
|
+
except Exception:
|
|
349
|
+
errors.append(
|
|
350
|
+
BatchError(
|
|
351
|
+
code="invalid_request",
|
|
352
|
+
line=None,
|
|
353
|
+
message=f"Cannot find file {batch.input_file_id}.",
|
|
354
|
+
param="input_file_id",
|
|
355
|
+
)
|
|
356
|
+
)
|
|
357
|
+
return errors, requests
|
|
358
|
+
|
|
359
|
+
# TODO(SECURITY): do something about large files
|
|
360
|
+
file_content_response = await self.files_api.openai_retrieve_file_content(batch.input_file_id)
|
|
361
|
+
file_content = file_content_response.body.decode("utf-8")
|
|
362
|
+
for line_num, line in enumerate(file_content.strip().split("\n"), 1):
|
|
363
|
+
if line.strip(): # skip empty lines
|
|
364
|
+
try:
|
|
365
|
+
request = json.loads(line)
|
|
366
|
+
|
|
367
|
+
if not isinstance(request, dict):
|
|
368
|
+
errors.append(
|
|
369
|
+
BatchError(
|
|
370
|
+
code="invalid_request",
|
|
371
|
+
line=line_num,
|
|
372
|
+
message="Each line must be a JSON dictionary object",
|
|
373
|
+
)
|
|
374
|
+
)
|
|
375
|
+
continue
|
|
376
|
+
|
|
377
|
+
valid = True
|
|
378
|
+
|
|
379
|
+
for param, expected_type, type_string in [
|
|
380
|
+
("custom_id", str, "string"),
|
|
381
|
+
("method", str, "string"),
|
|
382
|
+
("url", str, "string"),
|
|
383
|
+
("body", dict, "JSON dictionary object"),
|
|
384
|
+
]:
|
|
385
|
+
if param not in request:
|
|
386
|
+
errors.append(
|
|
387
|
+
BatchError(
|
|
388
|
+
code="missing_required_parameter",
|
|
389
|
+
line=line_num,
|
|
390
|
+
message=f"Missing required parameter: {param}",
|
|
391
|
+
param=param,
|
|
392
|
+
)
|
|
393
|
+
)
|
|
394
|
+
valid = False
|
|
395
|
+
elif not isinstance(request[param], expected_type):
|
|
396
|
+
param_name = "URL" if param == "url" else param.capitalize()
|
|
397
|
+
errors.append(
|
|
398
|
+
BatchError(
|
|
399
|
+
code="invalid_request",
|
|
400
|
+
line=line_num,
|
|
401
|
+
message=f"{param_name} must be a {type_string}",
|
|
402
|
+
param=param,
|
|
403
|
+
)
|
|
404
|
+
)
|
|
405
|
+
valid = False
|
|
406
|
+
|
|
407
|
+
if (url := request.get("url")) and isinstance(url, str) and url != batch.endpoint:
|
|
408
|
+
errors.append(
|
|
409
|
+
BatchError(
|
|
410
|
+
code="invalid_url",
|
|
411
|
+
line=line_num,
|
|
412
|
+
message="URL provided for this request does not match the batch endpoint",
|
|
413
|
+
param="url",
|
|
414
|
+
)
|
|
415
|
+
)
|
|
416
|
+
valid = False
|
|
417
|
+
|
|
418
|
+
if (body := request.get("body")) and isinstance(body, dict):
|
|
419
|
+
if body.get("stream", False):
|
|
420
|
+
errors.append(
|
|
421
|
+
BatchError(
|
|
422
|
+
code="streaming_unsupported",
|
|
423
|
+
line=line_num,
|
|
424
|
+
message="Streaming is not supported in batch processing",
|
|
425
|
+
param="body.stream",
|
|
426
|
+
)
|
|
427
|
+
)
|
|
428
|
+
valid = False
|
|
429
|
+
|
|
430
|
+
if batch.endpoint == "/v1/chat/completions":
|
|
431
|
+
required_params: list[tuple[str, Any, str]] = [
|
|
432
|
+
("model", str, "a string"),
|
|
433
|
+
# messages is specific to /v1/chat/completions
|
|
434
|
+
# we could skip validating messages here and let inference fail. however,
|
|
435
|
+
# that would be a very expensive way to find out messages is wrong.
|
|
436
|
+
("messages", list, "an array"), # TODO: allow messages to be a string?
|
|
437
|
+
]
|
|
438
|
+
elif batch.endpoint == "/v1/completions":
|
|
439
|
+
required_params = [
|
|
440
|
+
("model", str, "a string"),
|
|
441
|
+
("prompt", str, "a string"), # TODO: allow prompt to be a list of strings??
|
|
442
|
+
]
|
|
443
|
+
else: # /v1/embeddings
|
|
444
|
+
required_params = [
|
|
445
|
+
("model", str, "a string"),
|
|
446
|
+
("input", (str, list), "a string or array of strings"),
|
|
447
|
+
]
|
|
448
|
+
|
|
449
|
+
for param, expected_type, type_string in required_params:
|
|
450
|
+
if param not in body:
|
|
451
|
+
errors.append(
|
|
452
|
+
BatchError(
|
|
453
|
+
code="invalid_request",
|
|
454
|
+
line=line_num,
|
|
455
|
+
message=f"{param.capitalize()} parameter is required",
|
|
456
|
+
param=f"body.{param}",
|
|
457
|
+
)
|
|
458
|
+
)
|
|
459
|
+
valid = False
|
|
460
|
+
elif not isinstance(body[param], expected_type):
|
|
461
|
+
errors.append(
|
|
462
|
+
BatchError(
|
|
463
|
+
code="invalid_request",
|
|
464
|
+
line=line_num,
|
|
465
|
+
message=f"{param.capitalize()} must be {type_string}",
|
|
466
|
+
param=f"body.{param}",
|
|
467
|
+
)
|
|
468
|
+
)
|
|
469
|
+
valid = False
|
|
470
|
+
|
|
471
|
+
if "model" in body and isinstance(body["model"], str):
|
|
472
|
+
try:
|
|
473
|
+
await self.models_api.get_model(body["model"])
|
|
474
|
+
except Exception:
|
|
475
|
+
errors.append(
|
|
476
|
+
BatchError(
|
|
477
|
+
code="model_not_found",
|
|
478
|
+
line=line_num,
|
|
479
|
+
message=f"Model '{body['model']}' does not exist or is not supported",
|
|
480
|
+
param="body.model",
|
|
481
|
+
)
|
|
482
|
+
)
|
|
483
|
+
valid = False
|
|
484
|
+
|
|
485
|
+
if valid:
|
|
486
|
+
assert isinstance(url, str), "URL must be a string" # for mypy
|
|
487
|
+
assert isinstance(body, dict), "Body must be a dictionary" # for mypy
|
|
488
|
+
requests.append(
|
|
489
|
+
BatchRequest(
|
|
490
|
+
line_num=line_num,
|
|
491
|
+
url=url,
|
|
492
|
+
method=request["method"],
|
|
493
|
+
custom_id=request["custom_id"],
|
|
494
|
+
body=body,
|
|
495
|
+
),
|
|
496
|
+
)
|
|
497
|
+
except json.JSONDecodeError:
|
|
498
|
+
errors.append(
|
|
499
|
+
BatchError(
|
|
500
|
+
code="invalid_json_line",
|
|
501
|
+
line=line_num,
|
|
502
|
+
message="This line is not parseable as valid JSON.",
|
|
503
|
+
)
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
return errors, requests
|
|
507
|
+
|
|
508
|
+
async def _process_batch(self, batch_id: str) -> None:
|
|
509
|
+
"""Background task to process a batch of requests."""
|
|
510
|
+
try:
|
|
511
|
+
logger.info(f"Starting batch processing for {batch_id}")
|
|
512
|
+
async with self._batch_semaphore: # semaphore to limit concurrency
|
|
513
|
+
logger.info(f"Acquired semaphore for batch {batch_id}")
|
|
514
|
+
await self._process_batch_impl(batch_id)
|
|
515
|
+
except asyncio.CancelledError:
|
|
516
|
+
logger.info(f"Batch processing cancelled for {batch_id}")
|
|
517
|
+
await self._update_batch(batch_id, status="cancelled", cancelled_at=int(time.time()))
|
|
518
|
+
except Exception as e:
|
|
519
|
+
logger.error(f"Batch processing failed for {batch_id}: {e}")
|
|
520
|
+
await self._update_batch(
|
|
521
|
+
batch_id,
|
|
522
|
+
status="failed",
|
|
523
|
+
failed_at=int(time.time()),
|
|
524
|
+
errors=Errors(data=[BatchError(code="internal_error", message=str(e))]),
|
|
525
|
+
)
|
|
526
|
+
finally:
|
|
527
|
+
self._processing_tasks.pop(batch_id, None)
|
|
528
|
+
|
|
529
|
+
async def _process_batch_impl(self, batch_id: str) -> None:
|
|
530
|
+
"""Implementation of batch processing logic."""
|
|
531
|
+
errors: list[BatchError] = []
|
|
532
|
+
batch = await self.retrieve_batch(batch_id)
|
|
533
|
+
|
|
534
|
+
errors, requests = await self._validate_input(batch)
|
|
535
|
+
if errors:
|
|
536
|
+
await self._update_batch(batch_id, status="failed", failed_at=int(time.time()), errors=Errors(data=errors))
|
|
537
|
+
logger.info(f"Batch validation failed for {batch_id} with {len(errors)} errors")
|
|
538
|
+
return
|
|
539
|
+
|
|
540
|
+
logger.info(f"Processing {len(requests)} requests for batch {batch_id}")
|
|
541
|
+
|
|
542
|
+
total_requests = len(requests)
|
|
543
|
+
await self._update_batch(
|
|
544
|
+
batch_id,
|
|
545
|
+
status="in_progress",
|
|
546
|
+
request_counts={"total": total_requests, "completed": 0, "failed": 0},
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
error_results = []
|
|
550
|
+
success_results = []
|
|
551
|
+
completed_count = 0
|
|
552
|
+
failed_count = 0
|
|
553
|
+
|
|
554
|
+
for chunk in itertools.batched(requests, self.config.max_concurrent_requests_per_batch):
|
|
555
|
+
# we use a TaskGroup to ensure all process-single-request tasks are canceled when process-batch is cancelled
|
|
556
|
+
async with asyncio.TaskGroup() as tg:
|
|
557
|
+
chunk_tasks = [tg.create_task(self._process_single_request(batch_id, request)) for request in chunk]
|
|
558
|
+
|
|
559
|
+
chunk_results = await asyncio.gather(*chunk_tasks, return_exceptions=True)
|
|
560
|
+
|
|
561
|
+
for result in chunk_results:
|
|
562
|
+
if isinstance(result, dict) and result.get("error") is not None: # error response from inference
|
|
563
|
+
failed_count += 1
|
|
564
|
+
error_results.append(result)
|
|
565
|
+
elif isinstance(result, dict) and result.get("response") is not None: # successful inference
|
|
566
|
+
completed_count += 1
|
|
567
|
+
success_results.append(result)
|
|
568
|
+
else: # unexpected result
|
|
569
|
+
failed_count += 1
|
|
570
|
+
errors.append(BatchError(code="internal_error", message=f"Unexpected result: {result}"))
|
|
571
|
+
|
|
572
|
+
await self._update_batch(
|
|
573
|
+
batch_id,
|
|
574
|
+
request_counts={"total": total_requests, "completed": completed_count, "failed": failed_count},
|
|
575
|
+
)
|
|
576
|
+
|
|
577
|
+
if errors:
|
|
578
|
+
await self._update_batch(
|
|
579
|
+
batch_id, status="failed", failed_at=int(time.time()), errors=Errors(data=errors)
|
|
580
|
+
)
|
|
581
|
+
return
|
|
582
|
+
|
|
583
|
+
try:
|
|
584
|
+
output_file_id = await self._create_output_file(batch_id, success_results, "success")
|
|
585
|
+
await self._update_batch(batch_id, output_file_id=output_file_id)
|
|
586
|
+
|
|
587
|
+
error_file_id = await self._create_output_file(batch_id, error_results, "error")
|
|
588
|
+
await self._update_batch(batch_id, error_file_id=error_file_id)
|
|
589
|
+
|
|
590
|
+
await self._update_batch(batch_id, status="completed", completed_at=int(time.time()))
|
|
591
|
+
|
|
592
|
+
logger.info(
|
|
593
|
+
f"Batch processing completed for {batch_id}: {completed_count} completed, {failed_count} failed"
|
|
594
|
+
)
|
|
595
|
+
except Exception as e:
|
|
596
|
+
# note: errors is empty at this point, so we don't lose anything by ignoring it
|
|
597
|
+
await self._update_batch(
|
|
598
|
+
batch_id,
|
|
599
|
+
status="failed",
|
|
600
|
+
failed_at=int(time.time()),
|
|
601
|
+
errors=Errors(data=[BatchError(code="output_failed", message=str(e))]),
|
|
602
|
+
)
|
|
603
|
+
|
|
604
|
+
async def _process_single_request(self, batch_id: str, request: BatchRequest) -> dict:
|
|
605
|
+
"""Process a single request from the batch."""
|
|
606
|
+
request_id = f"batch_req_{batch_id}_{request.line_num}"
|
|
607
|
+
|
|
608
|
+
try:
|
|
609
|
+
# TODO(SECURITY): review body for security issues
|
|
610
|
+
if request.url == "/v1/chat/completions":
|
|
611
|
+
request.body["messages"] = [convert_to_openai_message_param(msg) for msg in request.body["messages"]]
|
|
612
|
+
chat_params = OpenAIChatCompletionRequestWithExtraBody(**request.body)
|
|
613
|
+
chat_response = await self.inference_api.openai_chat_completion(chat_params)
|
|
614
|
+
|
|
615
|
+
# this is for mypy, we don't allow streaming so we'll get the right type
|
|
616
|
+
assert hasattr(chat_response, "model_dump_json"), "Chat response must have model_dump_json method"
|
|
617
|
+
return {
|
|
618
|
+
"id": request_id,
|
|
619
|
+
"custom_id": request.custom_id,
|
|
620
|
+
"response": {
|
|
621
|
+
"status_code": 200,
|
|
622
|
+
"request_id": request_id, # TODO: should this be different?
|
|
623
|
+
"body": chat_response.model_dump_json(),
|
|
624
|
+
},
|
|
625
|
+
}
|
|
626
|
+
elif request.url == "/v1/completions":
|
|
627
|
+
completion_params = OpenAICompletionRequestWithExtraBody(**request.body)
|
|
628
|
+
completion_response = await self.inference_api.openai_completion(completion_params)
|
|
629
|
+
|
|
630
|
+
# this is for mypy, we don't allow streaming so we'll get the right type
|
|
631
|
+
assert hasattr(completion_response, "model_dump_json"), (
|
|
632
|
+
"Completion response must have model_dump_json method"
|
|
633
|
+
)
|
|
634
|
+
return {
|
|
635
|
+
"id": request_id,
|
|
636
|
+
"custom_id": request.custom_id,
|
|
637
|
+
"response": {
|
|
638
|
+
"status_code": 200,
|
|
639
|
+
"request_id": request_id,
|
|
640
|
+
"body": completion_response.model_dump_json(),
|
|
641
|
+
},
|
|
642
|
+
}
|
|
643
|
+
else: # /v1/embeddings
|
|
644
|
+
embeddings_response = await self.inference_api.openai_embeddings(
|
|
645
|
+
OpenAIEmbeddingsRequestWithExtraBody(**request.body)
|
|
646
|
+
)
|
|
647
|
+
assert hasattr(embeddings_response, "model_dump_json"), (
|
|
648
|
+
"Embeddings response must have model_dump_json method"
|
|
649
|
+
)
|
|
650
|
+
return {
|
|
651
|
+
"id": request_id,
|
|
652
|
+
"custom_id": request.custom_id,
|
|
653
|
+
"response": {
|
|
654
|
+
"status_code": 200,
|
|
655
|
+
"request_id": request_id, # TODO: should this be different?
|
|
656
|
+
"body": embeddings_response.model_dump_json(),
|
|
657
|
+
},
|
|
658
|
+
}
|
|
659
|
+
except Exception as e:
|
|
660
|
+
logger.info(f"Error processing request {request.custom_id} in batch {batch_id}: {e}")
|
|
661
|
+
return {
|
|
662
|
+
"id": request_id,
|
|
663
|
+
"custom_id": request.custom_id,
|
|
664
|
+
"error": {"type": "request_failed", "message": str(e)},
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
async def _create_output_file(self, batch_id: str, results: list[dict], file_type: str) -> str:
|
|
668
|
+
"""
|
|
669
|
+
Create an output file with batch results.
|
|
670
|
+
|
|
671
|
+
This function filters results based on the specified file_type
|
|
672
|
+
and uploads the file to the Files API.
|
|
673
|
+
"""
|
|
674
|
+
output_lines = [json.dumps(result) for result in results]
|
|
675
|
+
|
|
676
|
+
with AsyncBytesIO("\n".join(output_lines).encode("utf-8")) as file_buffer:
|
|
677
|
+
file_buffer.filename = f"{batch_id}_{file_type}.jsonl"
|
|
678
|
+
uploaded_file = await self.files_api.openai_upload_file(file=file_buffer, purpose=OpenAIFilePurpose.BATCH)
|
|
679
|
+
return uploaded_file.id
|