llama-stack 0.0.42__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack/__init__.py +5 -0
- llama_stack/apis/agents/__init__.py +1 -1
- llama_stack/apis/agents/agents.py +700 -281
- llama_stack/apis/agents/openai_responses.py +1311 -0
- llama_stack/{providers/adapters/memory/sample/config.py → apis/batches/__init__.py} +2 -5
- llama_stack/apis/batches/batches.py +100 -0
- llama_stack/apis/benchmarks/__init__.py +7 -0
- llama_stack/apis/benchmarks/benchmarks.py +108 -0
- llama_stack/apis/common/content_types.py +143 -0
- llama_stack/apis/common/errors.py +103 -0
- llama_stack/apis/common/job_types.py +38 -0
- llama_stack/apis/common/responses.py +36 -0
- llama_stack/apis/common/training_types.py +36 -5
- llama_stack/apis/common/type_system.py +158 -0
- llama_stack/apis/conversations/__init__.py +31 -0
- llama_stack/apis/conversations/conversations.py +286 -0
- llama_stack/apis/datasetio/__init__.py +7 -0
- llama_stack/apis/datasetio/datasetio.py +59 -0
- llama_stack/apis/datasets/__init__.py +7 -0
- llama_stack/apis/datasets/datasets.py +251 -0
- llama_stack/apis/datatypes.py +160 -0
- llama_stack/apis/eval/__init__.py +7 -0
- llama_stack/apis/eval/eval.py +169 -0
- llama_stack/apis/files/__init__.py +7 -0
- llama_stack/apis/files/files.py +199 -0
- llama_stack/apis/inference/__init__.py +1 -1
- llama_stack/apis/inference/inference.py +1169 -113
- llama_stack/apis/inspect/__init__.py +1 -1
- llama_stack/apis/inspect/inspect.py +69 -16
- llama_stack/apis/models/__init__.py +1 -1
- llama_stack/apis/models/models.py +148 -21
- llama_stack/apis/post_training/__init__.py +1 -1
- llama_stack/apis/post_training/post_training.py +265 -120
- llama_stack/{providers/adapters/agents/sample/config.py → apis/prompts/__init__.py} +2 -5
- llama_stack/apis/prompts/prompts.py +204 -0
- llama_stack/apis/providers/__init__.py +7 -0
- llama_stack/apis/providers/providers.py +69 -0
- llama_stack/apis/resource.py +37 -0
- llama_stack/apis/safety/__init__.py +1 -1
- llama_stack/apis/safety/safety.py +95 -12
- llama_stack/apis/scoring/__init__.py +7 -0
- llama_stack/apis/scoring/scoring.py +93 -0
- llama_stack/apis/scoring_functions/__init__.py +7 -0
- llama_stack/apis/scoring_functions/scoring_functions.py +208 -0
- llama_stack/apis/shields/__init__.py +1 -1
- llama_stack/apis/shields/shields.py +76 -33
- llama_stack/apis/synthetic_data_generation/__init__.py +1 -1
- llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +40 -17
- llama_stack/apis/telemetry/__init__.py +1 -1
- llama_stack/apis/telemetry/telemetry.py +322 -31
- llama_stack/apis/{dataset → tools}/__init__.py +2 -1
- llama_stack/apis/tools/rag_tool.py +218 -0
- llama_stack/apis/tools/tools.py +221 -0
- llama_stack/apis/vector_io/__init__.py +7 -0
- llama_stack/apis/vector_io/vector_io.py +960 -0
- llama_stack/apis/vector_stores/__init__.py +7 -0
- llama_stack/apis/vector_stores/vector_stores.py +51 -0
- llama_stack/apis/version.py +9 -0
- llama_stack/cli/llama.py +13 -5
- llama_stack/cli/stack/_list_deps.py +182 -0
- llama_stack/cli/stack/list_apis.py +1 -1
- llama_stack/cli/stack/list_deps.py +55 -0
- llama_stack/cli/stack/list_providers.py +24 -10
- llama_stack/cli/stack/list_stacks.py +56 -0
- llama_stack/cli/stack/remove.py +115 -0
- llama_stack/cli/stack/run.py +169 -56
- llama_stack/cli/stack/stack.py +18 -4
- llama_stack/cli/stack/utils.py +151 -0
- llama_stack/cli/table.py +23 -61
- llama_stack/cli/utils.py +29 -0
- llama_stack/core/access_control/access_control.py +131 -0
- llama_stack/core/access_control/conditions.py +129 -0
- llama_stack/core/access_control/datatypes.py +107 -0
- llama_stack/core/build.py +164 -0
- llama_stack/core/client.py +205 -0
- llama_stack/core/common.sh +37 -0
- llama_stack/{distribution → core}/configure.py +74 -55
- llama_stack/core/conversations/conversations.py +309 -0
- llama_stack/core/datatypes.py +625 -0
- llama_stack/core/distribution.py +276 -0
- llama_stack/core/external.py +54 -0
- llama_stack/core/id_generation.py +42 -0
- llama_stack/core/inspect.py +86 -0
- llama_stack/core/library_client.py +539 -0
- llama_stack/core/prompts/prompts.py +234 -0
- llama_stack/core/providers.py +137 -0
- llama_stack/core/request_headers.py +115 -0
- llama_stack/core/resolver.py +506 -0
- llama_stack/core/routers/__init__.py +101 -0
- llama_stack/core/routers/datasets.py +73 -0
- llama_stack/core/routers/eval_scoring.py +155 -0
- llama_stack/core/routers/inference.py +645 -0
- llama_stack/core/routers/safety.py +85 -0
- llama_stack/core/routers/tool_runtime.py +91 -0
- llama_stack/core/routers/vector_io.py +442 -0
- llama_stack/core/routing_tables/benchmarks.py +62 -0
- llama_stack/core/routing_tables/common.py +254 -0
- llama_stack/core/routing_tables/datasets.py +91 -0
- llama_stack/core/routing_tables/models.py +163 -0
- llama_stack/core/routing_tables/scoring_functions.py +66 -0
- llama_stack/core/routing_tables/shields.py +61 -0
- llama_stack/core/routing_tables/toolgroups.py +129 -0
- llama_stack/core/routing_tables/vector_stores.py +292 -0
- llama_stack/core/server/auth.py +187 -0
- llama_stack/core/server/auth_providers.py +494 -0
- llama_stack/core/server/quota.py +110 -0
- llama_stack/core/server/routes.py +141 -0
- llama_stack/core/server/server.py +542 -0
- llama_stack/core/server/tracing.py +80 -0
- llama_stack/core/stack.py +546 -0
- llama_stack/core/start_stack.sh +117 -0
- llama_stack/core/storage/datatypes.py +283 -0
- llama_stack/{cli/model → core/store}/__init__.py +1 -1
- llama_stack/core/store/registry.py +199 -0
- llama_stack/core/testing_context.py +49 -0
- llama_stack/core/ui/app.py +55 -0
- llama_stack/core/ui/modules/api.py +32 -0
- llama_stack/core/ui/modules/utils.py +42 -0
- llama_stack/core/ui/page/distribution/datasets.py +18 -0
- llama_stack/core/ui/page/distribution/eval_tasks.py +20 -0
- llama_stack/core/ui/page/distribution/models.py +18 -0
- llama_stack/core/ui/page/distribution/providers.py +27 -0
- llama_stack/core/ui/page/distribution/resources.py +48 -0
- llama_stack/core/ui/page/distribution/scoring_functions.py +18 -0
- llama_stack/core/ui/page/distribution/shields.py +19 -0
- llama_stack/core/ui/page/evaluations/app_eval.py +143 -0
- llama_stack/core/ui/page/evaluations/native_eval.py +253 -0
- llama_stack/core/ui/page/playground/chat.py +130 -0
- llama_stack/core/ui/page/playground/tools.py +352 -0
- llama_stack/core/utils/config.py +30 -0
- llama_stack/{distribution → core}/utils/config_dirs.py +3 -6
- llama_stack/core/utils/config_resolution.py +125 -0
- llama_stack/core/utils/context.py +84 -0
- llama_stack/core/utils/exec.py +96 -0
- llama_stack/{providers/impls/meta_reference/codeshield/config.py → core/utils/image_types.py} +4 -3
- llama_stack/{distribution → core}/utils/model_utils.py +2 -2
- llama_stack/{distribution → core}/utils/prompt_for_config.py +30 -63
- llama_stack/{apis/batch_inference → distributions/dell}/__init__.py +1 -1
- llama_stack/distributions/dell/build.yaml +33 -0
- llama_stack/distributions/dell/dell.py +158 -0
- llama_stack/distributions/dell/run-with-safety.yaml +141 -0
- llama_stack/distributions/dell/run.yaml +132 -0
- llama_stack/distributions/meta-reference-gpu/__init__.py +7 -0
- llama_stack/distributions/meta-reference-gpu/build.yaml +32 -0
- llama_stack/distributions/meta-reference-gpu/meta_reference.py +163 -0
- llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +154 -0
- llama_stack/distributions/meta-reference-gpu/run.yaml +139 -0
- llama_stack/{apis/evals → distributions/nvidia}/__init__.py +1 -1
- llama_stack/distributions/nvidia/build.yaml +29 -0
- llama_stack/distributions/nvidia/nvidia.py +154 -0
- llama_stack/distributions/nvidia/run-with-safety.yaml +137 -0
- llama_stack/distributions/nvidia/run.yaml +116 -0
- llama_stack/distributions/open-benchmark/__init__.py +7 -0
- llama_stack/distributions/open-benchmark/build.yaml +36 -0
- llama_stack/distributions/open-benchmark/open_benchmark.py +303 -0
- llama_stack/distributions/open-benchmark/run.yaml +252 -0
- llama_stack/distributions/postgres-demo/__init__.py +7 -0
- llama_stack/distributions/postgres-demo/build.yaml +23 -0
- llama_stack/distributions/postgres-demo/postgres_demo.py +125 -0
- llama_stack/distributions/postgres-demo/run.yaml +115 -0
- llama_stack/{apis/memory → distributions/starter}/__init__.py +1 -1
- llama_stack/distributions/starter/build.yaml +61 -0
- llama_stack/distributions/starter/run-with-postgres-store.yaml +285 -0
- llama_stack/distributions/starter/run.yaml +276 -0
- llama_stack/distributions/starter/starter.py +345 -0
- llama_stack/distributions/starter-gpu/__init__.py +7 -0
- llama_stack/distributions/starter-gpu/build.yaml +61 -0
- llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +288 -0
- llama_stack/distributions/starter-gpu/run.yaml +279 -0
- llama_stack/distributions/starter-gpu/starter_gpu.py +20 -0
- llama_stack/distributions/template.py +456 -0
- llama_stack/distributions/watsonx/__init__.py +7 -0
- llama_stack/distributions/watsonx/build.yaml +33 -0
- llama_stack/distributions/watsonx/run.yaml +133 -0
- llama_stack/distributions/watsonx/watsonx.py +95 -0
- llama_stack/env.py +24 -0
- llama_stack/log.py +314 -0
- llama_stack/models/llama/checkpoint.py +164 -0
- llama_stack/models/llama/datatypes.py +164 -0
- llama_stack/models/llama/hadamard_utils.py +86 -0
- llama_stack/models/llama/llama3/args.py +74 -0
- llama_stack/models/llama/llama3/chat_format.py +286 -0
- llama_stack/models/llama/llama3/generation.py +376 -0
- llama_stack/models/llama/llama3/interface.py +255 -0
- llama_stack/models/llama/llama3/model.py +304 -0
- llama_stack/models/llama/llama3/multimodal/__init__.py +12 -0
- llama_stack/models/llama/llama3/multimodal/encoder_utils.py +180 -0
- llama_stack/models/llama/llama3/multimodal/image_transform.py +409 -0
- llama_stack/models/llama/llama3/multimodal/model.py +1430 -0
- llama_stack/models/llama/llama3/multimodal/utils.py +26 -0
- llama_stack/models/llama/llama3/prompt_templates/__init__.py +22 -0
- llama_stack/models/llama/llama3/prompt_templates/base.py +39 -0
- llama_stack/models/llama/llama3/prompt_templates/system_prompts.py +319 -0
- llama_stack/models/llama/llama3/prompt_templates/tool_response.py +62 -0
- llama_stack/models/llama/llama3/quantization/loader.py +316 -0
- llama_stack/models/llama/llama3/template_data.py +116 -0
- llama_stack/models/llama/llama3/tokenizer.model +128000 -0
- llama_stack/models/llama/llama3/tokenizer.py +198 -0
- llama_stack/models/llama/llama3/tool_utils.py +266 -0
- llama_stack/models/llama/llama3_1/__init__.py +12 -0
- llama_stack/models/llama/llama3_1/prompt_format.md +358 -0
- llama_stack/models/llama/llama3_1/prompts.py +258 -0
- llama_stack/models/llama/llama3_2/prompts_text.py +229 -0
- llama_stack/models/llama/llama3_2/prompts_vision.py +126 -0
- llama_stack/models/llama/llama3_2/text_prompt_format.md +286 -0
- llama_stack/models/llama/llama3_2/vision_prompt_format.md +141 -0
- llama_stack/models/llama/llama3_3/prompts.py +259 -0
- llama_stack/models/llama/llama4/args.py +107 -0
- llama_stack/models/llama/llama4/chat_format.py +317 -0
- llama_stack/models/llama/llama4/datatypes.py +56 -0
- llama_stack/models/llama/llama4/ffn.py +58 -0
- llama_stack/models/llama/llama4/generation.py +313 -0
- llama_stack/models/llama/llama4/model.py +437 -0
- llama_stack/models/llama/llama4/moe.py +214 -0
- llama_stack/models/llama/llama4/preprocess.py +435 -0
- llama_stack/models/llama/llama4/prompt_format.md +304 -0
- llama_stack/models/llama/llama4/prompt_templates/system_prompts.py +136 -0
- llama_stack/models/llama/llama4/prompts.py +279 -0
- llama_stack/models/llama/llama4/quantization/__init__.py +5 -0
- llama_stack/models/llama/llama4/quantization/loader.py +226 -0
- llama_stack/models/llama/llama4/tokenizer.model +200000 -0
- llama_stack/models/llama/llama4/tokenizer.py +263 -0
- llama_stack/models/llama/llama4/vision/__init__.py +5 -0
- llama_stack/models/llama/llama4/vision/embedding.py +210 -0
- llama_stack/models/llama/llama4/vision/encoder.py +412 -0
- llama_stack/models/llama/prompt_format.py +191 -0
- llama_stack/models/llama/quantize_impls.py +316 -0
- llama_stack/models/llama/sku_list.py +1029 -0
- llama_stack/models/llama/sku_types.py +233 -0
- llama_stack/models/llama/tokenizer_utils.py +40 -0
- llama_stack/providers/datatypes.py +136 -107
- llama_stack/providers/inline/__init__.py +5 -0
- llama_stack/providers/inline/agents/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/agents/meta_reference}/__init__.py +12 -5
- llama_stack/providers/inline/agents/meta_reference/agent_instance.py +1024 -0
- llama_stack/providers/inline/agents/meta_reference/agents.py +383 -0
- llama_stack/providers/inline/agents/meta_reference/config.py +37 -0
- llama_stack/providers/inline/agents/meta_reference/persistence.py +228 -0
- llama_stack/providers/inline/agents/meta_reference/responses/__init__.py +5 -0
- llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +423 -0
- llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +1226 -0
- llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +449 -0
- llama_stack/providers/inline/agents/meta_reference/responses/types.py +194 -0
- llama_stack/providers/inline/agents/meta_reference/responses/utils.py +365 -0
- llama_stack/providers/inline/agents/meta_reference/safety.py +52 -0
- llama_stack/providers/inline/batches/__init__.py +5 -0
- llama_stack/providers/inline/batches/reference/__init__.py +36 -0
- llama_stack/providers/inline/batches/reference/batches.py +679 -0
- llama_stack/providers/inline/batches/reference/config.py +40 -0
- llama_stack/providers/inline/datasetio/__init__.py +5 -0
- llama_stack/providers/inline/datasetio/localfs/__init__.py +20 -0
- llama_stack/providers/inline/datasetio/localfs/config.py +23 -0
- llama_stack/providers/inline/datasetio/localfs/datasetio.py +113 -0
- llama_stack/providers/inline/eval/__init__.py +5 -0
- llama_stack/providers/inline/eval/meta_reference/__init__.py +28 -0
- llama_stack/providers/inline/eval/meta_reference/config.py +23 -0
- llama_stack/providers/inline/eval/meta_reference/eval.py +259 -0
- llama_stack/providers/inline/files/localfs/__init__.py +20 -0
- llama_stack/providers/inline/files/localfs/config.py +31 -0
- llama_stack/providers/inline/files/localfs/files.py +219 -0
- llama_stack/providers/inline/inference/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/__init__.py +4 -4
- llama_stack/providers/inline/inference/meta_reference/common.py +24 -0
- llama_stack/providers/inline/inference/meta_reference/config.py +68 -0
- llama_stack/providers/inline/inference/meta_reference/generators.py +211 -0
- llama_stack/providers/inline/inference/meta_reference/inference.py +158 -0
- llama_stack/providers/inline/inference/meta_reference/model_parallel.py +96 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/parallel_utils.py +56 -73
- llama_stack/providers/inline/inference/sentence_transformers/__init__.py +22 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/inference/sentence_transformers}/config.py +6 -4
- llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +83 -0
- llama_stack/providers/inline/post_training/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/utils.py +35 -0
- llama_stack/providers/inline/post_training/common/validator.py +36 -0
- llama_stack/providers/inline/post_training/huggingface/__init__.py +27 -0
- llama_stack/providers/inline/post_training/huggingface/config.py +83 -0
- llama_stack/providers/inline/post_training/huggingface/post_training.py +208 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +519 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +485 -0
- llama_stack/providers/inline/post_training/huggingface/utils.py +269 -0
- llama_stack/providers/inline/post_training/torchtune/__init__.py +27 -0
- llama_stack/providers/inline/post_training/torchtune/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +240 -0
- llama_stack/providers/inline/post_training/torchtune/common/utils.py +99 -0
- llama_stack/providers/inline/post_training/torchtune/config.py +20 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +57 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/sft.py +78 -0
- llama_stack/providers/inline/post_training/torchtune/post_training.py +178 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +588 -0
- llama_stack/providers/inline/safety/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/codeshield → inline/safety/code_scanner}/__init__.py +4 -2
- llama_stack/providers/inline/safety/code_scanner/code_scanner.py +128 -0
- llama_stack/providers/{impls/meta_reference/memory → inline/safety/code_scanner}/config.py +5 -3
- llama_stack/providers/inline/safety/llama_guard/__init__.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/config.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/llama_guard.py +489 -0
- llama_stack/providers/{adapters/memory/sample → inline/safety/prompt_guard}/__init__.py +4 -4
- llama_stack/providers/inline/safety/prompt_guard/config.py +32 -0
- llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +131 -0
- llama_stack/providers/inline/scoring/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/__init__.py +25 -0
- llama_stack/providers/{adapters/memory/weaviate → inline/scoring/basic}/config.py +5 -7
- llama_stack/providers/inline/scoring/basic/scoring.py +126 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +240 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +41 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/docvqa.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/equality.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/ifeval.py +23 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_math_response.py +27 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py +71 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/subset_of.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +80 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +66 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +58 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +38 -0
- llama_stack/providers/inline/scoring/basic/utils/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py +3319 -0
- llama_stack/providers/inline/scoring/basic/utils/math_utils.py +330 -0
- llama_stack/providers/inline/scoring/braintrust/__init__.py +27 -0
- llama_stack/providers/inline/scoring/braintrust/braintrust.py +230 -0
- llama_stack/providers/inline/scoring/braintrust/config.py +21 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_correctness.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_relevancy.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_similarity.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_entity_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_precision.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_relevancy.py +23 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/factuality.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/faithfulness.py +24 -0
- llama_stack/providers/inline/scoring/llm_as_judge/__init__.py +21 -0
- llama_stack/providers/inline/scoring/llm_as_judge/config.py +14 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +113 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +96 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +20 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +81 -0
- llama_stack/providers/inline/telemetry/__init__.py +5 -0
- llama_stack/providers/inline/telemetry/meta_reference/__init__.py +21 -0
- llama_stack/providers/inline/telemetry/meta_reference/config.py +47 -0
- llama_stack/providers/inline/telemetry/meta_reference/telemetry.py +252 -0
- llama_stack/providers/inline/tool_runtime/__init__.py +5 -0
- llama_stack/providers/inline/tool_runtime/rag/__init__.py +19 -0
- llama_stack/providers/{impls/meta_reference/telemetry → inline/tool_runtime/rag}/config.py +5 -3
- llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +77 -0
- llama_stack/providers/inline/tool_runtime/rag/memory.py +332 -0
- llama_stack/providers/inline/vector_io/__init__.py +5 -0
- llama_stack/providers/inline/vector_io/chroma/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/chroma/config.py +30 -0
- llama_stack/providers/inline/vector_io/faiss/__init__.py +21 -0
- llama_stack/providers/inline/vector_io/faiss/config.py +26 -0
- llama_stack/providers/inline/vector_io/faiss/faiss.py +293 -0
- llama_stack/providers/inline/vector_io/milvus/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/milvus/config.py +29 -0
- llama_stack/providers/inline/vector_io/qdrant/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/qdrant/config.py +29 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/config.py +26 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +483 -0
- llama_stack/providers/registry/agents.py +16 -18
- llama_stack/providers/registry/batches.py +26 -0
- llama_stack/providers/registry/datasetio.py +49 -0
- llama_stack/providers/registry/eval.py +46 -0
- llama_stack/providers/registry/files.py +31 -0
- llama_stack/providers/registry/inference.py +273 -118
- llama_stack/providers/registry/post_training.py +69 -0
- llama_stack/providers/registry/safety.py +46 -41
- llama_stack/providers/registry/scoring.py +51 -0
- llama_stack/providers/registry/tool_runtime.py +87 -0
- llama_stack/providers/registry/vector_io.py +828 -0
- llama_stack/providers/remote/__init__.py +5 -0
- llama_stack/providers/remote/agents/__init__.py +5 -0
- llama_stack/providers/remote/datasetio/__init__.py +5 -0
- llama_stack/providers/{adapters/memory/chroma → remote/datasetio/huggingface}/__init__.py +7 -4
- llama_stack/providers/remote/datasetio/huggingface/config.py +23 -0
- llama_stack/providers/remote/datasetio/huggingface/huggingface.py +99 -0
- llama_stack/providers/remote/datasetio/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/datasetio/nvidia/config.py +61 -0
- llama_stack/providers/remote/datasetio/nvidia/datasetio.py +116 -0
- llama_stack/providers/remote/eval/__init__.py +5 -0
- llama_stack/providers/remote/eval/nvidia/__init__.py +31 -0
- llama_stack/providers/remote/eval/nvidia/config.py +29 -0
- llama_stack/providers/remote/eval/nvidia/eval.py +162 -0
- llama_stack/providers/remote/files/s3/__init__.py +19 -0
- llama_stack/providers/remote/files/s3/config.py +42 -0
- llama_stack/providers/remote/files/s3/files.py +313 -0
- llama_stack/providers/remote/inference/__init__.py +5 -0
- llama_stack/providers/{adapters/safety/sample → remote/inference/anthropic}/__init__.py +4 -6
- llama_stack/providers/remote/inference/anthropic/anthropic.py +36 -0
- llama_stack/providers/remote/inference/anthropic/config.py +28 -0
- llama_stack/providers/{impls/meta_reference/telemetry → remote/inference/azure}/__init__.py +4 -4
- llama_stack/providers/remote/inference/azure/azure.py +25 -0
- llama_stack/providers/remote/inference/azure/config.py +61 -0
- llama_stack/providers/{adapters → remote}/inference/bedrock/__init__.py +18 -17
- llama_stack/providers/remote/inference/bedrock/bedrock.py +142 -0
- llama_stack/providers/{adapters/inference/sample → remote/inference/bedrock}/config.py +3 -4
- llama_stack/providers/remote/inference/bedrock/models.py +29 -0
- llama_stack/providers/remote/inference/cerebras/__init__.py +19 -0
- llama_stack/providers/remote/inference/cerebras/cerebras.py +28 -0
- llama_stack/providers/remote/inference/cerebras/config.py +30 -0
- llama_stack/providers/{adapters → remote}/inference/databricks/__init__.py +4 -5
- llama_stack/providers/remote/inference/databricks/config.py +37 -0
- llama_stack/providers/remote/inference/databricks/databricks.py +44 -0
- llama_stack/providers/{adapters → remote}/inference/fireworks/__init__.py +8 -4
- llama_stack/providers/remote/inference/fireworks/config.py +27 -0
- llama_stack/providers/remote/inference/fireworks/fireworks.py +27 -0
- llama_stack/providers/{adapters/memory/pgvector → remote/inference/gemini}/__init__.py +4 -4
- llama_stack/providers/remote/inference/gemini/config.py +28 -0
- llama_stack/providers/remote/inference/gemini/gemini.py +82 -0
- llama_stack/providers/remote/inference/groq/__init__.py +15 -0
- llama_stack/providers/remote/inference/groq/config.py +34 -0
- llama_stack/providers/remote/inference/groq/groq.py +18 -0
- llama_stack/providers/remote/inference/llama_openai_compat/__init__.py +15 -0
- llama_stack/providers/remote/inference/llama_openai_compat/config.py +34 -0
- llama_stack/providers/remote/inference/llama_openai_compat/llama.py +46 -0
- llama_stack/providers/remote/inference/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/inference/nvidia/config.py +64 -0
- llama_stack/providers/remote/inference/nvidia/nvidia.py +61 -0
- llama_stack/providers/{adapters/safety/sample/config.py → remote/inference/nvidia/utils.py} +3 -4
- llama_stack/providers/{impls/vllm → remote/inference/ollama}/__init__.py +4 -6
- llama_stack/providers/remote/inference/ollama/config.py +25 -0
- llama_stack/providers/remote/inference/ollama/ollama.py +102 -0
- llama_stack/providers/{adapters/telemetry/opentelemetry → remote/inference/openai}/__init__.py +4 -4
- llama_stack/providers/remote/inference/openai/config.py +39 -0
- llama_stack/providers/remote/inference/openai/openai.py +38 -0
- llama_stack/providers/remote/inference/passthrough/__init__.py +23 -0
- llama_stack/providers/remote/inference/passthrough/config.py +34 -0
- llama_stack/providers/remote/inference/passthrough/passthrough.py +122 -0
- llama_stack/providers/remote/inference/runpod/__init__.py +16 -0
- llama_stack/providers/remote/inference/runpod/config.py +32 -0
- llama_stack/providers/remote/inference/runpod/runpod.py +42 -0
- llama_stack/providers/remote/inference/sambanova/__init__.py +16 -0
- llama_stack/providers/remote/inference/sambanova/config.py +34 -0
- llama_stack/providers/remote/inference/sambanova/sambanova.py +28 -0
- llama_stack/providers/{adapters → remote}/inference/tgi/__init__.py +3 -4
- llama_stack/providers/remote/inference/tgi/config.py +76 -0
- llama_stack/providers/remote/inference/tgi/tgi.py +85 -0
- llama_stack/providers/{adapters → remote}/inference/together/__init__.py +8 -4
- llama_stack/providers/remote/inference/together/config.py +27 -0
- llama_stack/providers/remote/inference/together/together.py +102 -0
- llama_stack/providers/remote/inference/vertexai/__init__.py +15 -0
- llama_stack/providers/remote/inference/vertexai/config.py +48 -0
- llama_stack/providers/remote/inference/vertexai/vertexai.py +54 -0
- llama_stack/providers/remote/inference/vllm/__init__.py +22 -0
- llama_stack/providers/remote/inference/vllm/config.py +59 -0
- llama_stack/providers/remote/inference/vllm/vllm.py +111 -0
- llama_stack/providers/remote/inference/watsonx/__init__.py +15 -0
- llama_stack/providers/remote/inference/watsonx/config.py +45 -0
- llama_stack/providers/remote/inference/watsonx/watsonx.py +336 -0
- llama_stack/providers/remote/post_training/__init__.py +5 -0
- llama_stack/providers/remote/post_training/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/post_training/nvidia/config.py +113 -0
- llama_stack/providers/remote/post_training/nvidia/models.py +27 -0
- llama_stack/providers/remote/post_training/nvidia/post_training.py +430 -0
- llama_stack/providers/remote/post_training/nvidia/utils.py +63 -0
- llama_stack/providers/remote/safety/__init__.py +5 -0
- llama_stack/providers/remote/safety/bedrock/bedrock.py +111 -0
- llama_stack/providers/remote/safety/bedrock/config.py +14 -0
- llama_stack/providers/{adapters/inference/sample → remote/safety/nvidia}/__init__.py +5 -4
- llama_stack/providers/remote/safety/nvidia/config.py +40 -0
- llama_stack/providers/remote/safety/nvidia/nvidia.py +161 -0
- llama_stack/providers/{adapters/agents/sample → remote/safety/sambanova}/__init__.py +5 -4
- llama_stack/providers/remote/safety/sambanova/config.py +37 -0
- llama_stack/providers/remote/safety/sambanova/sambanova.py +98 -0
- llama_stack/providers/remote/tool_runtime/__init__.py +5 -0
- llama_stack/providers/remote/tool_runtime/bing_search/__init__.py +21 -0
- llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +112 -0
- llama_stack/providers/remote/tool_runtime/bing_search/config.py +22 -0
- llama_stack/providers/remote/tool_runtime/brave_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +148 -0
- llama_stack/providers/remote/tool_runtime/brave_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py +15 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py +20 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +73 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +84 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/__init__.py +22 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py +21 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +140 -0
- llama_stack/providers/remote/vector_io/__init__.py +5 -0
- llama_stack/providers/remote/vector_io/chroma/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/chroma/chroma.py +215 -0
- llama_stack/providers/remote/vector_io/chroma/config.py +28 -0
- llama_stack/providers/remote/vector_io/milvus/__init__.py +18 -0
- llama_stack/providers/remote/vector_io/milvus/config.py +35 -0
- llama_stack/providers/remote/vector_io/milvus/milvus.py +375 -0
- llama_stack/providers/remote/vector_io/pgvector/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/pgvector/config.py +47 -0
- llama_stack/providers/remote/vector_io/pgvector/pgvector.py +460 -0
- llama_stack/providers/remote/vector_io/qdrant/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/qdrant/config.py +37 -0
- llama_stack/providers/remote/vector_io/qdrant/qdrant.py +265 -0
- llama_stack/providers/remote/vector_io/weaviate/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/weaviate/config.py +32 -0
- llama_stack/providers/remote/vector_io/weaviate/weaviate.py +393 -0
- llama_stack/providers/utils/bedrock/__init__.py +5 -0
- llama_stack/providers/utils/bedrock/client.py +74 -0
- llama_stack/providers/utils/bedrock/config.py +64 -0
- llama_stack/providers/utils/bedrock/refreshable_boto_session.py +112 -0
- llama_stack/providers/utils/common/__init__.py +5 -0
- llama_stack/providers/utils/common/data_schema_validator.py +103 -0
- llama_stack/providers/utils/datasetio/__init__.py +5 -0
- llama_stack/providers/utils/datasetio/url_utils.py +47 -0
- llama_stack/providers/utils/files/__init__.py +5 -0
- llama_stack/providers/utils/files/form_data.py +69 -0
- llama_stack/providers/utils/inference/__init__.py +8 -7
- llama_stack/providers/utils/inference/embedding_mixin.py +101 -0
- llama_stack/providers/utils/inference/inference_store.py +264 -0
- llama_stack/providers/utils/inference/litellm_openai_mixin.py +336 -0
- llama_stack/providers/utils/inference/model_registry.py +173 -23
- llama_stack/providers/utils/inference/openai_compat.py +1261 -49
- llama_stack/providers/utils/inference/openai_mixin.py +506 -0
- llama_stack/providers/utils/inference/prompt_adapter.py +365 -67
- llama_stack/providers/utils/kvstore/api.py +6 -6
- llama_stack/providers/utils/kvstore/config.py +28 -48
- llama_stack/providers/utils/kvstore/kvstore.py +61 -15
- llama_stack/providers/utils/kvstore/mongodb/__init__.py +9 -0
- llama_stack/providers/utils/kvstore/mongodb/mongodb.py +82 -0
- llama_stack/providers/utils/kvstore/postgres/__init__.py +7 -0
- llama_stack/providers/utils/kvstore/postgres/postgres.py +114 -0
- llama_stack/providers/utils/kvstore/redis/redis.py +33 -9
- llama_stack/providers/utils/kvstore/sqlite/config.py +2 -1
- llama_stack/providers/utils/kvstore/sqlite/sqlite.py +123 -22
- llama_stack/providers/utils/memory/file_utils.py +1 -1
- llama_stack/providers/utils/memory/openai_vector_store_mixin.py +1304 -0
- llama_stack/providers/utils/memory/vector_store.py +220 -82
- llama_stack/providers/utils/pagination.py +43 -0
- llama_stack/providers/utils/responses/__init__.py +5 -0
- llama_stack/providers/utils/responses/responses_store.py +292 -0
- llama_stack/providers/utils/scheduler.py +270 -0
- llama_stack/providers/utils/scoring/__init__.py +5 -0
- llama_stack/providers/utils/scoring/aggregation_utils.py +75 -0
- llama_stack/providers/utils/scoring/base_scoring_fn.py +114 -0
- llama_stack/providers/utils/scoring/basic_scoring_utils.py +26 -0
- llama_stack/providers/utils/sqlstore/__init__.py +5 -0
- llama_stack/providers/utils/sqlstore/api.py +128 -0
- llama_stack/providers/utils/sqlstore/authorized_sqlstore.py +319 -0
- llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py +343 -0
- llama_stack/providers/utils/sqlstore/sqlstore.py +70 -0
- llama_stack/providers/utils/telemetry/trace_protocol.py +142 -0
- llama_stack/providers/utils/telemetry/tracing.py +192 -53
- llama_stack/providers/utils/tools/__init__.py +5 -0
- llama_stack/providers/utils/tools/mcp.py +148 -0
- llama_stack/providers/utils/tools/ttl_dict.py +70 -0
- llama_stack/providers/utils/vector_io/__init__.py +5 -0
- llama_stack/providers/utils/vector_io/vector_utils.py +156 -0
- llama_stack/schema_utils.py +118 -0
- llama_stack/strong_typing/__init__.py +19 -0
- llama_stack/strong_typing/auxiliary.py +228 -0
- llama_stack/strong_typing/classdef.py +440 -0
- llama_stack/strong_typing/core.py +46 -0
- llama_stack/strong_typing/deserializer.py +877 -0
- llama_stack/strong_typing/docstring.py +409 -0
- llama_stack/strong_typing/exception.py +23 -0
- llama_stack/strong_typing/inspection.py +1085 -0
- llama_stack/strong_typing/mapping.py +40 -0
- llama_stack/strong_typing/name.py +182 -0
- llama_stack/strong_typing/py.typed +0 -0
- llama_stack/strong_typing/schema.py +792 -0
- llama_stack/strong_typing/serialization.py +97 -0
- llama_stack/strong_typing/serializer.py +500 -0
- llama_stack/strong_typing/slots.py +27 -0
- llama_stack/strong_typing/topological.py +89 -0
- llama_stack/testing/__init__.py +5 -0
- llama_stack/testing/api_recorder.py +956 -0
- llama_stack/ui/node_modules/flatted/python/flatted.py +149 -0
- llama_stack-0.3.4.dist-info/METADATA +261 -0
- llama_stack-0.3.4.dist-info/RECORD +625 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/WHEEL +1 -1
- llama_stack/apis/agents/client.py +0 -292
- llama_stack/apis/agents/event_logger.py +0 -184
- llama_stack/apis/batch_inference/batch_inference.py +0 -72
- llama_stack/apis/common/deployment_types.py +0 -31
- llama_stack/apis/dataset/dataset.py +0 -63
- llama_stack/apis/evals/evals.py +0 -122
- llama_stack/apis/inference/client.py +0 -197
- llama_stack/apis/inspect/client.py +0 -82
- llama_stack/apis/memory/client.py +0 -155
- llama_stack/apis/memory/memory.py +0 -65
- llama_stack/apis/memory_banks/__init__.py +0 -7
- llama_stack/apis/memory_banks/client.py +0 -101
- llama_stack/apis/memory_banks/memory_banks.py +0 -78
- llama_stack/apis/models/client.py +0 -83
- llama_stack/apis/reward_scoring/__init__.py +0 -7
- llama_stack/apis/reward_scoring/reward_scoring.py +0 -55
- llama_stack/apis/safety/client.py +0 -105
- llama_stack/apis/shields/client.py +0 -79
- llama_stack/cli/download.py +0 -340
- llama_stack/cli/model/describe.py +0 -82
- llama_stack/cli/model/download.py +0 -24
- llama_stack/cli/model/list.py +0 -62
- llama_stack/cli/model/model.py +0 -34
- llama_stack/cli/model/prompt_format.py +0 -112
- llama_stack/cli/model/safety_models.py +0 -52
- llama_stack/cli/stack/build.py +0 -299
- llama_stack/cli/stack/configure.py +0 -178
- llama_stack/distribution/build.py +0 -123
- llama_stack/distribution/build_conda_env.sh +0 -136
- llama_stack/distribution/build_container.sh +0 -142
- llama_stack/distribution/common.sh +0 -40
- llama_stack/distribution/configure_container.sh +0 -47
- llama_stack/distribution/datatypes.py +0 -139
- llama_stack/distribution/distribution.py +0 -58
- llama_stack/distribution/inspect.py +0 -67
- llama_stack/distribution/request_headers.py +0 -57
- llama_stack/distribution/resolver.py +0 -323
- llama_stack/distribution/routers/__init__.py +0 -48
- llama_stack/distribution/routers/routers.py +0 -158
- llama_stack/distribution/routers/routing_tables.py +0 -173
- llama_stack/distribution/server/endpoints.py +0 -48
- llama_stack/distribution/server/server.py +0 -343
- llama_stack/distribution/start_conda_env.sh +0 -42
- llama_stack/distribution/start_container.sh +0 -64
- llama_stack/distribution/templates/local-bedrock-conda-example-build.yaml +0 -10
- llama_stack/distribution/templates/local-build.yaml +0 -10
- llama_stack/distribution/templates/local-databricks-build.yaml +0 -10
- llama_stack/distribution/templates/local-fireworks-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-endpoint-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-serverless-build.yaml +0 -10
- llama_stack/distribution/templates/local-ollama-build.yaml +0 -10
- llama_stack/distribution/templates/local-tgi-build.yaml +0 -10
- llama_stack/distribution/templates/local-together-build.yaml +0 -10
- llama_stack/distribution/templates/local-vllm-build.yaml +0 -10
- llama_stack/distribution/utils/exec.py +0 -105
- llama_stack/providers/adapters/agents/sample/sample.py +0 -18
- llama_stack/providers/adapters/inference/bedrock/bedrock.py +0 -451
- llama_stack/providers/adapters/inference/bedrock/config.py +0 -55
- llama_stack/providers/adapters/inference/databricks/config.py +0 -21
- llama_stack/providers/adapters/inference/databricks/databricks.py +0 -125
- llama_stack/providers/adapters/inference/fireworks/config.py +0 -20
- llama_stack/providers/adapters/inference/fireworks/fireworks.py +0 -130
- llama_stack/providers/adapters/inference/ollama/__init__.py +0 -19
- llama_stack/providers/adapters/inference/ollama/ollama.py +0 -175
- llama_stack/providers/adapters/inference/sample/sample.py +0 -23
- llama_stack/providers/adapters/inference/tgi/config.py +0 -43
- llama_stack/providers/adapters/inference/tgi/tgi.py +0 -200
- llama_stack/providers/adapters/inference/together/config.py +0 -22
- llama_stack/providers/adapters/inference/together/together.py +0 -143
- llama_stack/providers/adapters/memory/chroma/chroma.py +0 -157
- llama_stack/providers/adapters/memory/pgvector/config.py +0 -17
- llama_stack/providers/adapters/memory/pgvector/pgvector.py +0 -211
- llama_stack/providers/adapters/memory/sample/sample.py +0 -23
- llama_stack/providers/adapters/memory/weaviate/__init__.py +0 -15
- llama_stack/providers/adapters/memory/weaviate/weaviate.py +0 -190
- llama_stack/providers/adapters/safety/bedrock/bedrock.py +0 -113
- llama_stack/providers/adapters/safety/bedrock/config.py +0 -16
- llama_stack/providers/adapters/safety/sample/sample.py +0 -23
- llama_stack/providers/adapters/safety/together/__init__.py +0 -18
- llama_stack/providers/adapters/safety/together/config.py +0 -26
- llama_stack/providers/adapters/safety/together/together.py +0 -101
- llama_stack/providers/adapters/telemetry/opentelemetry/config.py +0 -12
- llama_stack/providers/adapters/telemetry/opentelemetry/opentelemetry.py +0 -201
- llama_stack/providers/adapters/telemetry/sample/__init__.py +0 -17
- llama_stack/providers/adapters/telemetry/sample/config.py +0 -12
- llama_stack/providers/adapters/telemetry/sample/sample.py +0 -18
- llama_stack/providers/impls/meta_reference/agents/agent_instance.py +0 -844
- llama_stack/providers/impls/meta_reference/agents/agents.py +0 -161
- llama_stack/providers/impls/meta_reference/agents/persistence.py +0 -84
- llama_stack/providers/impls/meta_reference/agents/rag/context_retriever.py +0 -74
- llama_stack/providers/impls/meta_reference/agents/safety.py +0 -57
- llama_stack/providers/impls/meta_reference/agents/tests/code_execution.py +0 -93
- llama_stack/providers/impls/meta_reference/agents/tests/test_chat_agent.py +0 -305
- llama_stack/providers/impls/meta_reference/agents/tools/base.py +0 -20
- llama_stack/providers/impls/meta_reference/agents/tools/builtin.py +0 -375
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_env_prefix.py +0 -133
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_execution.py +0 -256
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/matplotlib_custom_backend.py +0 -87
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/utils.py +0 -21
- llama_stack/providers/impls/meta_reference/agents/tools/safety.py +0 -43
- llama_stack/providers/impls/meta_reference/codeshield/code_scanner.py +0 -58
- llama_stack/providers/impls/meta_reference/inference/config.py +0 -45
- llama_stack/providers/impls/meta_reference/inference/generation.py +0 -376
- llama_stack/providers/impls/meta_reference/inference/inference.py +0 -280
- llama_stack/providers/impls/meta_reference/inference/model_parallel.py +0 -99
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_impls.py +0 -184
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_txest_disabled.py +0 -76
- llama_stack/providers/impls/meta_reference/inference/quantization/loader.py +0 -97
- llama_stack/providers/impls/meta_reference/inference/quantization/scripts/quantize_checkpoint.py +0 -161
- llama_stack/providers/impls/meta_reference/memory/__init__.py +0 -19
- llama_stack/providers/impls/meta_reference/memory/faiss.py +0 -113
- llama_stack/providers/impls/meta_reference/safety/__init__.py +0 -17
- llama_stack/providers/impls/meta_reference/safety/base.py +0 -57
- llama_stack/providers/impls/meta_reference/safety/config.py +0 -48
- llama_stack/providers/impls/meta_reference/safety/llama_guard.py +0 -268
- llama_stack/providers/impls/meta_reference/safety/prompt_guard.py +0 -145
- llama_stack/providers/impls/meta_reference/safety/safety.py +0 -112
- llama_stack/providers/impls/meta_reference/telemetry/console.py +0 -89
- llama_stack/providers/impls/vllm/config.py +0 -35
- llama_stack/providers/impls/vllm/vllm.py +0 -241
- llama_stack/providers/registry/memory.py +0 -78
- llama_stack/providers/registry/telemetry.py +0 -44
- llama_stack/providers/tests/agents/test_agents.py +0 -210
- llama_stack/providers/tests/inference/test_inference.py +0 -257
- llama_stack/providers/tests/inference/test_prompt_adapter.py +0 -126
- llama_stack/providers/tests/memory/test_memory.py +0 -136
- llama_stack/providers/tests/resolver.py +0 -100
- llama_stack/providers/tests/safety/test_safety.py +0 -77
- llama_stack-0.0.42.dist-info/METADATA +0 -137
- llama_stack-0.0.42.dist-info/RECORD +0 -256
- /llama_stack/{distribution → core}/__init__.py +0 -0
- /llama_stack/{distribution/server → core/access_control}/__init__.py +0 -0
- /llama_stack/{distribution/utils → core/conversations}/__init__.py +0 -0
- /llama_stack/{providers/adapters → core/prompts}/__init__.py +0 -0
- /llama_stack/{providers/adapters/agents → core/routing_tables}/__init__.py +0 -0
- /llama_stack/{providers/adapters/inference → core/server}/__init__.py +0 -0
- /llama_stack/{providers/adapters/memory → core/storage}/__init__.py +0 -0
- /llama_stack/{providers/adapters/safety → core/ui}/__init__.py +0 -0
- /llama_stack/{providers/adapters/telemetry → core/ui/modules}/__init__.py +0 -0
- /llama_stack/{providers/impls → core/ui/page}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference → core/ui/page/distribution}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/rag → core/ui/page/evaluations}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tests → core/ui/page/playground}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools → core/utils}/__init__.py +0 -0
- /llama_stack/{distribution → core}/utils/dynamic.py +0 -0
- /llama_stack/{distribution → core}/utils/serialize.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools/ipython_tool → distributions}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization → models}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization/scripts → models/llama}/__init__.py +0 -0
- /llama_stack/{providers/tests → models/llama/llama3}/__init__.py +0 -0
- /llama_stack/{providers/tests/agents → models/llama/llama3/quantization}/__init__.py +0 -0
- /llama_stack/{providers/tests/inference → models/llama/llama3_2}/__init__.py +0 -0
- /llama_stack/{providers/tests/memory → models/llama/llama3_3}/__init__.py +0 -0
- /llama_stack/{providers/tests/safety → models/llama/llama4}/__init__.py +0 -0
- /llama_stack/{scripts → models/llama/llama4/prompt_templates}/__init__.py +0 -0
- /llama_stack/providers/{adapters → remote}/safety/bedrock/__init__.py +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/entry_points.txt +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info/licenses}/LICENSE +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
import uuid
|
|
8
|
+
from typing import TYPE_CHECKING, Any
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from codeshield.cs import CodeShieldScanResult
|
|
12
|
+
|
|
13
|
+
from llama_stack.apis.inference import OpenAIMessageParam
|
|
14
|
+
from llama_stack.apis.safety import (
|
|
15
|
+
RunShieldResponse,
|
|
16
|
+
Safety,
|
|
17
|
+
SafetyViolation,
|
|
18
|
+
ViolationLevel,
|
|
19
|
+
)
|
|
20
|
+
from llama_stack.apis.safety.safety import ModerationObject, ModerationObjectResults
|
|
21
|
+
from llama_stack.apis.shields import Shield
|
|
22
|
+
from llama_stack.log import get_logger
|
|
23
|
+
from llama_stack.providers.utils.inference.prompt_adapter import (
|
|
24
|
+
interleaved_content_as_str,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
from .config import CodeScannerConfig
|
|
28
|
+
|
|
29
|
+
log = get_logger(name=__name__, category="safety")
|
|
30
|
+
|
|
31
|
+
ALLOWED_CODE_SCANNER_MODEL_IDS = [
|
|
32
|
+
"code-scanner",
|
|
33
|
+
"code-shield",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class MetaReferenceCodeScannerSafetyImpl(Safety):
|
|
38
|
+
def __init__(self, config: CodeScannerConfig, deps) -> None:
|
|
39
|
+
self.config = config
|
|
40
|
+
|
|
41
|
+
async def initialize(self) -> None:
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
async def shutdown(self) -> None:
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
async def register_shield(self, shield: Shield) -> None:
|
|
48
|
+
if shield.provider_resource_id not in ALLOWED_CODE_SCANNER_MODEL_IDS:
|
|
49
|
+
raise ValueError(
|
|
50
|
+
f"Unsupported Code Scanner ID: {shield.provider_resource_id}. Allowed IDs: {ALLOWED_CODE_SCANNER_MODEL_IDS}"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
async def run_shield(
|
|
54
|
+
self,
|
|
55
|
+
shield_id: str,
|
|
56
|
+
messages: list[OpenAIMessageParam],
|
|
57
|
+
params: dict[str, Any] = None,
|
|
58
|
+
) -> RunShieldResponse:
|
|
59
|
+
shield = await self.shield_store.get_shield(shield_id)
|
|
60
|
+
if not shield:
|
|
61
|
+
raise ValueError(f"Shield {shield_id} not found")
|
|
62
|
+
|
|
63
|
+
from codeshield.cs import CodeShield
|
|
64
|
+
|
|
65
|
+
text = "\n".join([interleaved_content_as_str(m.content) for m in messages])
|
|
66
|
+
log.info(f"Running CodeScannerShield on {text[50:]}")
|
|
67
|
+
result = await CodeShield.scan_code(text)
|
|
68
|
+
|
|
69
|
+
violation = None
|
|
70
|
+
if result.is_insecure:
|
|
71
|
+
violation = SafetyViolation(
|
|
72
|
+
violation_level=(ViolationLevel.ERROR),
|
|
73
|
+
user_message="Sorry, I found security concerns in the code.",
|
|
74
|
+
metadata={"violation_type": ",".join([issue.pattern_id for issue in result.issues_found])},
|
|
75
|
+
)
|
|
76
|
+
return RunShieldResponse(violation=violation)
|
|
77
|
+
|
|
78
|
+
def get_moderation_object_results(self, scan_result: "CodeShieldScanResult") -> ModerationObjectResults:
|
|
79
|
+
categories = {}
|
|
80
|
+
category_scores = {}
|
|
81
|
+
category_applied_input_types = {}
|
|
82
|
+
|
|
83
|
+
flagged = scan_result.is_insecure
|
|
84
|
+
user_message = None
|
|
85
|
+
metadata = {}
|
|
86
|
+
|
|
87
|
+
if scan_result.is_insecure:
|
|
88
|
+
pattern_ids = [issue.pattern_id for issue in scan_result.issues_found]
|
|
89
|
+
categories = dict.fromkeys(pattern_ids, True)
|
|
90
|
+
category_scores = dict.fromkeys(pattern_ids, 1.0)
|
|
91
|
+
category_applied_input_types = {key: ["text"] for key in pattern_ids}
|
|
92
|
+
user_message = f"Security concerns detected in the code. {scan_result.recommended_treatment.name}: {', '.join([issue.description for issue in scan_result.issues_found])}"
|
|
93
|
+
metadata = {"violation_type": ",".join([issue.pattern_id for issue in scan_result.issues_found])}
|
|
94
|
+
|
|
95
|
+
return ModerationObjectResults(
|
|
96
|
+
flagged=flagged,
|
|
97
|
+
categories=categories,
|
|
98
|
+
category_scores=category_scores,
|
|
99
|
+
category_applied_input_types=category_applied_input_types,
|
|
100
|
+
user_message=user_message,
|
|
101
|
+
metadata=metadata,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
async def run_moderation(self, input: str | list[str], model: str) -> ModerationObject:
|
|
105
|
+
inputs = input if isinstance(input, list) else [input]
|
|
106
|
+
results = []
|
|
107
|
+
|
|
108
|
+
from codeshield.cs import CodeShield
|
|
109
|
+
|
|
110
|
+
for text_input in inputs:
|
|
111
|
+
log.info(f"Running CodeScannerShield moderation on input: {text_input[:100]}...")
|
|
112
|
+
try:
|
|
113
|
+
scan_result = await CodeShield.scan_code(text_input)
|
|
114
|
+
moderation_result = self.get_moderation_object_results(scan_result)
|
|
115
|
+
except Exception as e:
|
|
116
|
+
log.error(f"CodeShield.scan_code failed: {e}")
|
|
117
|
+
# create safe fallback response on scanner failure to avoid blocking legitimate requests
|
|
118
|
+
moderation_result = ModerationObjectResults(
|
|
119
|
+
flagged=False,
|
|
120
|
+
categories={},
|
|
121
|
+
category_scores={},
|
|
122
|
+
category_applied_input_types={},
|
|
123
|
+
user_message=None,
|
|
124
|
+
metadata={"scanner_error": str(e)},
|
|
125
|
+
)
|
|
126
|
+
results.append(moderation_result)
|
|
127
|
+
|
|
128
|
+
return ModerationObject(id=str(uuid.uuid4()), model=model, results=results)
|
|
@@ -4,10 +4,12 @@
|
|
|
4
4
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
5
|
# the root directory of this source tree.
|
|
6
6
|
|
|
7
|
-
from
|
|
7
|
+
from typing import Any
|
|
8
8
|
|
|
9
9
|
from pydantic import BaseModel
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
|
|
13
|
-
|
|
12
|
+
class CodeScannerConfig(BaseModel):
|
|
13
|
+
@classmethod
|
|
14
|
+
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
|
|
15
|
+
return {}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from .config import LlamaGuardConfig
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
async def get_provider_impl(config: LlamaGuardConfig, deps: dict[str, Any]):
|
|
13
|
+
from .llama_guard import LlamaGuardSafetyImpl
|
|
14
|
+
|
|
15
|
+
assert isinstance(config, LlamaGuardConfig), f"Unexpected config type: {type(config)}"
|
|
16
|
+
|
|
17
|
+
impl = LlamaGuardSafetyImpl(config, deps)
|
|
18
|
+
await impl.initialize()
|
|
19
|
+
return impl
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class LlamaGuardConfig(BaseModel):
|
|
13
|
+
excluded_categories: list[str] = []
|
|
14
|
+
|
|
15
|
+
@classmethod
|
|
16
|
+
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
|
|
17
|
+
return {
|
|
18
|
+
"excluded_categories": [],
|
|
19
|
+
}
|
|
@@ -0,0 +1,489 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
import re
|
|
8
|
+
import uuid
|
|
9
|
+
from string import Template
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from llama_stack.apis.common.content_types import ImageContentItem, TextContentItem
|
|
13
|
+
from llama_stack.apis.inference import (
|
|
14
|
+
Inference,
|
|
15
|
+
OpenAIChatCompletionRequestWithExtraBody,
|
|
16
|
+
OpenAIMessageParam,
|
|
17
|
+
OpenAIUserMessageParam,
|
|
18
|
+
)
|
|
19
|
+
from llama_stack.apis.safety import (
|
|
20
|
+
RunShieldResponse,
|
|
21
|
+
Safety,
|
|
22
|
+
SafetyViolation,
|
|
23
|
+
ViolationLevel,
|
|
24
|
+
)
|
|
25
|
+
from llama_stack.apis.safety.safety import ModerationObject, ModerationObjectResults
|
|
26
|
+
from llama_stack.apis.shields import Shield
|
|
27
|
+
from llama_stack.core.datatypes import Api
|
|
28
|
+
from llama_stack.log import get_logger
|
|
29
|
+
from llama_stack.models.llama.datatypes import Role
|
|
30
|
+
from llama_stack.models.llama.sku_types import CoreModelId
|
|
31
|
+
from llama_stack.providers.datatypes import ShieldsProtocolPrivate
|
|
32
|
+
from llama_stack.providers.utils.inference.prompt_adapter import (
|
|
33
|
+
interleaved_content_as_str,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
from .config import LlamaGuardConfig
|
|
37
|
+
|
|
38
|
+
CANNED_RESPONSE_TEXT = "I can't answer that. Can I help with something else?"
|
|
39
|
+
|
|
40
|
+
SAFE_RESPONSE = "safe"
|
|
41
|
+
|
|
42
|
+
CAT_VIOLENT_CRIMES = "Violent Crimes"
|
|
43
|
+
CAT_NON_VIOLENT_CRIMES = "Non-Violent Crimes"
|
|
44
|
+
CAT_SEX_CRIMES = "Sex Crimes"
|
|
45
|
+
CAT_CHILD_EXPLOITATION = "Child Exploitation"
|
|
46
|
+
CAT_DEFAMATION = "Defamation"
|
|
47
|
+
CAT_SPECIALIZED_ADVICE = "Specialized Advice"
|
|
48
|
+
CAT_PRIVACY = "Privacy"
|
|
49
|
+
CAT_INTELLECTUAL_PROPERTY = "Intellectual Property"
|
|
50
|
+
CAT_INDISCRIMINATE_WEAPONS = "Indiscriminate Weapons"
|
|
51
|
+
CAT_HATE = "Hate"
|
|
52
|
+
CAT_SELF_HARM = "Self-Harm"
|
|
53
|
+
CAT_SEXUAL_CONTENT = "Sexual Content"
|
|
54
|
+
CAT_ELECTIONS = "Elections"
|
|
55
|
+
CAT_CODE_INTERPRETER_ABUSE = "Code Interpreter Abuse"
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
SAFETY_CATEGORIES_TO_CODE_MAP = {
|
|
59
|
+
CAT_VIOLENT_CRIMES: "S1",
|
|
60
|
+
CAT_NON_VIOLENT_CRIMES: "S2",
|
|
61
|
+
CAT_SEX_CRIMES: "S3",
|
|
62
|
+
CAT_CHILD_EXPLOITATION: "S4",
|
|
63
|
+
CAT_DEFAMATION: "S5",
|
|
64
|
+
CAT_SPECIALIZED_ADVICE: "S6",
|
|
65
|
+
CAT_PRIVACY: "S7",
|
|
66
|
+
CAT_INTELLECTUAL_PROPERTY: "S8",
|
|
67
|
+
CAT_INDISCRIMINATE_WEAPONS: "S9",
|
|
68
|
+
CAT_HATE: "S10",
|
|
69
|
+
CAT_SELF_HARM: "S11",
|
|
70
|
+
CAT_SEXUAL_CONTENT: "S12",
|
|
71
|
+
CAT_ELECTIONS: "S13",
|
|
72
|
+
CAT_CODE_INTERPRETER_ABUSE: "S14",
|
|
73
|
+
}
|
|
74
|
+
SAFETY_CODE_TO_CATEGORIES_MAP = {v: k for k, v in SAFETY_CATEGORIES_TO_CODE_MAP.items()}
|
|
75
|
+
|
|
76
|
+
DEFAULT_LG_V3_SAFETY_CATEGORIES = [
|
|
77
|
+
CAT_VIOLENT_CRIMES,
|
|
78
|
+
CAT_NON_VIOLENT_CRIMES,
|
|
79
|
+
CAT_SEX_CRIMES,
|
|
80
|
+
CAT_CHILD_EXPLOITATION,
|
|
81
|
+
CAT_DEFAMATION,
|
|
82
|
+
CAT_SPECIALIZED_ADVICE,
|
|
83
|
+
CAT_PRIVACY,
|
|
84
|
+
CAT_INTELLECTUAL_PROPERTY,
|
|
85
|
+
CAT_INDISCRIMINATE_WEAPONS,
|
|
86
|
+
CAT_HATE,
|
|
87
|
+
CAT_SELF_HARM,
|
|
88
|
+
CAT_SEXUAL_CONTENT,
|
|
89
|
+
CAT_ELECTIONS,
|
|
90
|
+
]
|
|
91
|
+
|
|
92
|
+
# accept both CoreModelId and huggingface repo id
|
|
93
|
+
LLAMA_GUARD_MODEL_IDS = {
|
|
94
|
+
CoreModelId.llama_guard_3_8b.value: "meta-llama/Llama-Guard-3-8B",
|
|
95
|
+
"meta-llama/Llama-Guard-3-8B": "meta-llama/Llama-Guard-3-8B",
|
|
96
|
+
CoreModelId.llama_guard_3_1b.value: "meta-llama/Llama-Guard-3-1B",
|
|
97
|
+
"meta-llama/Llama-Guard-3-1B": "meta-llama/Llama-Guard-3-1B",
|
|
98
|
+
CoreModelId.llama_guard_3_11b_vision.value: "meta-llama/Llama-Guard-3-11B-Vision",
|
|
99
|
+
"meta-llama/Llama-Guard-3-11B-Vision": "meta-llama/Llama-Guard-3-11B-Vision",
|
|
100
|
+
CoreModelId.llama_guard_4_12b.value: "meta-llama/Llama-Guard-4-12B",
|
|
101
|
+
"meta-llama/Llama-Guard-4-12B": "meta-llama/Llama-Guard-4-12B",
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
MODEL_TO_SAFETY_CATEGORIES_MAP = {
|
|
105
|
+
"meta-llama/Llama-Guard-3-8B": DEFAULT_LG_V3_SAFETY_CATEGORIES + [CAT_CODE_INTERPRETER_ABUSE],
|
|
106
|
+
"meta-llama/Llama-Guard-3-1B": DEFAULT_LG_V3_SAFETY_CATEGORIES,
|
|
107
|
+
"meta-llama/Llama-Guard-3-11B-Vision": DEFAULT_LG_V3_SAFETY_CATEGORIES,
|
|
108
|
+
# Llama Guard 4 uses the same categories as Llama Guard 3
|
|
109
|
+
# source: https://github.com/meta-llama/PurpleLlama/blob/main/Llama-Guard4/12B/MODEL_CARD.md
|
|
110
|
+
"meta-llama/Llama-Guard-4-12B": DEFAULT_LG_V3_SAFETY_CATEGORIES,
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
PROMPT_TASK = "Task: Check if there is unsafe content in '$agent_type' messages in conversations according our safety policy with the below categories."
|
|
115
|
+
|
|
116
|
+
SAFETY_CATEGORIES = """
|
|
117
|
+
|
|
118
|
+
<BEGIN UNSAFE CONTENT CATEGORIES>
|
|
119
|
+
$categories
|
|
120
|
+
<END UNSAFE CONTENT CATEGORIES>
|
|
121
|
+
"""
|
|
122
|
+
|
|
123
|
+
PROMPT_CONVERSATION = """
|
|
124
|
+
<BEGIN CONVERSATION>
|
|
125
|
+
|
|
126
|
+
$conversations
|
|
127
|
+
|
|
128
|
+
<END CONVERSATION>
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
PROMPT_INSTRUCTIONS = """
|
|
133
|
+
Provide your safety assessment for ONLY THE LAST $agent_type message in the above conversation:
|
|
134
|
+
- First line must read 'safe' or 'unsafe'.
|
|
135
|
+
- If unsafe, a second line must include a comma-separated list of violated categories."""
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
PROMPT_TEMPLATE = Template(f"{PROMPT_TASK}{SAFETY_CATEGORIES}{PROMPT_CONVERSATION}{PROMPT_INSTRUCTIONS}")
|
|
139
|
+
|
|
140
|
+
logger = get_logger(name=__name__, category="safety")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class LlamaGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
|
|
144
|
+
def __init__(self, config: LlamaGuardConfig, deps) -> None:
|
|
145
|
+
self.config = config
|
|
146
|
+
self.inference_api = deps[Api.inference]
|
|
147
|
+
|
|
148
|
+
async def initialize(self) -> None:
|
|
149
|
+
pass
|
|
150
|
+
|
|
151
|
+
async def shutdown(self) -> None:
|
|
152
|
+
pass
|
|
153
|
+
|
|
154
|
+
async def register_shield(self, shield: Shield) -> None:
|
|
155
|
+
model_id = shield.provider_resource_id
|
|
156
|
+
if not model_id:
|
|
157
|
+
raise ValueError("Llama Guard shield must have a model id")
|
|
158
|
+
|
|
159
|
+
async def unregister_shield(self, identifier: str) -> None:
|
|
160
|
+
# LlamaGuard doesn't need to do anything special for unregistration
|
|
161
|
+
# The routing table handles the removal from the registry
|
|
162
|
+
pass
|
|
163
|
+
|
|
164
|
+
async def run_shield(
|
|
165
|
+
self,
|
|
166
|
+
shield_id: str,
|
|
167
|
+
messages: list[OpenAIMessageParam],
|
|
168
|
+
params: dict[str, Any] = None,
|
|
169
|
+
) -> RunShieldResponse:
|
|
170
|
+
shield = await self.shield_store.get_shield(shield_id)
|
|
171
|
+
if not shield:
|
|
172
|
+
raise ValueError(f"Unknown shield {shield_id}")
|
|
173
|
+
|
|
174
|
+
messages = messages.copy()
|
|
175
|
+
# some shields like llama-guard require the first message to be a user message
|
|
176
|
+
# since this might be a tool call, first role might not be user
|
|
177
|
+
if len(messages) > 0 and messages[0].role != "user":
|
|
178
|
+
messages[0] = OpenAIUserMessageParam(content=messages[0].content)
|
|
179
|
+
|
|
180
|
+
# Use the inference API's model resolution instead of hardcoded mappings
|
|
181
|
+
# This allows the shield to work with any registered model
|
|
182
|
+
model_id = shield.provider_resource_id
|
|
183
|
+
|
|
184
|
+
# Determine safety categories based on the model type
|
|
185
|
+
# For known Llama Guard models, use specific categories
|
|
186
|
+
if model_id in LLAMA_GUARD_MODEL_IDS:
|
|
187
|
+
# Use the mapped model for categories but the original model_id for inference
|
|
188
|
+
mapped_model = LLAMA_GUARD_MODEL_IDS[model_id]
|
|
189
|
+
safety_categories = MODEL_TO_SAFETY_CATEGORIES_MAP.get(mapped_model, DEFAULT_LG_V3_SAFETY_CATEGORIES)
|
|
190
|
+
else:
|
|
191
|
+
# For unknown models, use default Llama Guard 3 8B categories
|
|
192
|
+
safety_categories = DEFAULT_LG_V3_SAFETY_CATEGORIES + [CAT_CODE_INTERPRETER_ABUSE]
|
|
193
|
+
|
|
194
|
+
impl = LlamaGuardShield(
|
|
195
|
+
model=model_id,
|
|
196
|
+
inference_api=self.inference_api,
|
|
197
|
+
excluded_categories=self.config.excluded_categories,
|
|
198
|
+
safety_categories=safety_categories,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
return await impl.run(messages)
|
|
202
|
+
|
|
203
|
+
async def run_moderation(self, input: str | list[str], model: str) -> ModerationObject:
|
|
204
|
+
if isinstance(input, list):
|
|
205
|
+
messages = input.copy()
|
|
206
|
+
else:
|
|
207
|
+
messages = [input]
|
|
208
|
+
|
|
209
|
+
# convert to user messages format with role
|
|
210
|
+
messages = [OpenAIUserMessageParam(content=m) for m in messages]
|
|
211
|
+
|
|
212
|
+
# Determine safety categories based on the model type
|
|
213
|
+
# For known Llama Guard models, use specific categories
|
|
214
|
+
if model in LLAMA_GUARD_MODEL_IDS:
|
|
215
|
+
# Use the mapped model for categories but the original model_id for inference
|
|
216
|
+
mapped_model = LLAMA_GUARD_MODEL_IDS[model]
|
|
217
|
+
safety_categories = MODEL_TO_SAFETY_CATEGORIES_MAP.get(mapped_model, DEFAULT_LG_V3_SAFETY_CATEGORIES)
|
|
218
|
+
else:
|
|
219
|
+
# For unknown models, use default Llama Guard 3 8B categories
|
|
220
|
+
safety_categories = DEFAULT_LG_V3_SAFETY_CATEGORIES + [CAT_CODE_INTERPRETER_ABUSE]
|
|
221
|
+
|
|
222
|
+
impl = LlamaGuardShield(
|
|
223
|
+
model=model,
|
|
224
|
+
inference_api=self.inference_api,
|
|
225
|
+
excluded_categories=self.config.excluded_categories,
|
|
226
|
+
safety_categories=safety_categories,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
return await impl.run_moderation(messages)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
class LlamaGuardShield:
|
|
233
|
+
def __init__(
|
|
234
|
+
self,
|
|
235
|
+
model: str,
|
|
236
|
+
inference_api: Inference,
|
|
237
|
+
excluded_categories: list[str] | None = None,
|
|
238
|
+
safety_categories: list[str] | None = None,
|
|
239
|
+
):
|
|
240
|
+
if excluded_categories is None:
|
|
241
|
+
excluded_categories = []
|
|
242
|
+
if safety_categories is None:
|
|
243
|
+
safety_categories = []
|
|
244
|
+
|
|
245
|
+
assert len(excluded_categories) == 0 or all(
|
|
246
|
+
x in SAFETY_CATEGORIES_TO_CODE_MAP.values() for x in excluded_categories
|
|
247
|
+
), "Invalid categories in excluded categories. Expected format is ['S1', 'S2', ..]"
|
|
248
|
+
|
|
249
|
+
self.model = model
|
|
250
|
+
self.inference_api = inference_api
|
|
251
|
+
self.excluded_categories = excluded_categories
|
|
252
|
+
self.safety_categories = safety_categories
|
|
253
|
+
|
|
254
|
+
def check_unsafe_response(self, response: str) -> str | None:
|
|
255
|
+
match = re.match(r"^unsafe\n(.*)$", response)
|
|
256
|
+
if match:
|
|
257
|
+
# extracts the unsafe code
|
|
258
|
+
extracted = match.group(1)
|
|
259
|
+
return extracted
|
|
260
|
+
|
|
261
|
+
return None
|
|
262
|
+
|
|
263
|
+
def get_safety_categories(self) -> list[str]:
|
|
264
|
+
excluded_categories = self.excluded_categories
|
|
265
|
+
if set(excluded_categories) == set(SAFETY_CATEGORIES_TO_CODE_MAP.values()):
|
|
266
|
+
excluded_categories = []
|
|
267
|
+
|
|
268
|
+
final_categories = []
|
|
269
|
+
|
|
270
|
+
all_categories = self.safety_categories
|
|
271
|
+
for cat in all_categories:
|
|
272
|
+
cat_code = SAFETY_CATEGORIES_TO_CODE_MAP[cat]
|
|
273
|
+
if cat_code in excluded_categories:
|
|
274
|
+
continue
|
|
275
|
+
final_categories.append(f"{cat_code}: {cat}.")
|
|
276
|
+
|
|
277
|
+
return final_categories
|
|
278
|
+
|
|
279
|
+
def validate_messages(self, messages: list[OpenAIMessageParam]) -> list[OpenAIMessageParam]:
|
|
280
|
+
if len(messages) == 0:
|
|
281
|
+
raise ValueError("Messages must not be empty")
|
|
282
|
+
if messages[0].role != Role.user.value:
|
|
283
|
+
raise ValueError("Messages must start with user")
|
|
284
|
+
|
|
285
|
+
if len(messages) >= 2 and (messages[0].role == Role.user.value and messages[1].role == Role.user.value):
|
|
286
|
+
messages = messages[1:]
|
|
287
|
+
|
|
288
|
+
return messages
|
|
289
|
+
|
|
290
|
+
async def run(self, messages: list[OpenAIMessageParam]) -> RunShieldResponse:
|
|
291
|
+
messages = self.validate_messages(messages)
|
|
292
|
+
|
|
293
|
+
if self.model == CoreModelId.llama_guard_3_11b_vision.value:
|
|
294
|
+
shield_input_message = self.build_vision_shield_input(messages)
|
|
295
|
+
else:
|
|
296
|
+
shield_input_message = self.build_text_shield_input(messages)
|
|
297
|
+
|
|
298
|
+
params = OpenAIChatCompletionRequestWithExtraBody(
|
|
299
|
+
model=self.model,
|
|
300
|
+
messages=[shield_input_message],
|
|
301
|
+
stream=False,
|
|
302
|
+
temperature=0.0, # default is 1, which is too high for safety
|
|
303
|
+
)
|
|
304
|
+
response = await self.inference_api.openai_chat_completion(params)
|
|
305
|
+
content = response.choices[0].message.content
|
|
306
|
+
content = content.strip()
|
|
307
|
+
return self.get_shield_response(content)
|
|
308
|
+
|
|
309
|
+
def build_text_shield_input(self, messages: list[OpenAIMessageParam]) -> OpenAIUserMessageParam:
|
|
310
|
+
return OpenAIUserMessageParam(content=self.build_prompt(messages))
|
|
311
|
+
|
|
312
|
+
def build_vision_shield_input(self, messages: list[OpenAIMessageParam]) -> OpenAIUserMessageParam:
|
|
313
|
+
conversation = []
|
|
314
|
+
most_recent_img = None
|
|
315
|
+
|
|
316
|
+
for m in messages[::-1]:
|
|
317
|
+
if isinstance(m.content, str) or isinstance(m.content, TextContentItem):
|
|
318
|
+
conversation.append(m)
|
|
319
|
+
elif isinstance(m.content, ImageContentItem):
|
|
320
|
+
if most_recent_img is None and m.role == Role.user.value:
|
|
321
|
+
most_recent_img = m.content
|
|
322
|
+
conversation.append(m)
|
|
323
|
+
elif isinstance(m.content, list):
|
|
324
|
+
content = []
|
|
325
|
+
for c in m.content:
|
|
326
|
+
if isinstance(c, str) or isinstance(c, TextContentItem):
|
|
327
|
+
content.append(c)
|
|
328
|
+
elif isinstance(c, ImageContentItem):
|
|
329
|
+
if most_recent_img is None and m.role == Role.user.value:
|
|
330
|
+
most_recent_img = c
|
|
331
|
+
content.append(c)
|
|
332
|
+
else:
|
|
333
|
+
raise ValueError(f"Unknown content type: {c}")
|
|
334
|
+
|
|
335
|
+
conversation.append(OpenAIUserMessageParam(content=content))
|
|
336
|
+
else:
|
|
337
|
+
raise ValueError(f"Unknown content type: {m.content}")
|
|
338
|
+
|
|
339
|
+
prompt = []
|
|
340
|
+
if most_recent_img is not None:
|
|
341
|
+
prompt.append(most_recent_img)
|
|
342
|
+
prompt.append(self.build_prompt(conversation[::-1]))
|
|
343
|
+
|
|
344
|
+
return OpenAIUserMessageParam(content=prompt)
|
|
345
|
+
|
|
346
|
+
def build_prompt(self, messages: list[OpenAIMessageParam]) -> str:
|
|
347
|
+
categories = self.get_safety_categories()
|
|
348
|
+
categories_str = "\n".join(categories)
|
|
349
|
+
conversations_str = "\n\n".join(
|
|
350
|
+
[f"{m.role.capitalize()}: {interleaved_content_as_str(m.content)}" for m in messages]
|
|
351
|
+
)
|
|
352
|
+
return PROMPT_TEMPLATE.substitute(
|
|
353
|
+
agent_type=messages[-1].role.capitalize(),
|
|
354
|
+
categories=categories_str,
|
|
355
|
+
conversations=conversations_str,
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
def get_shield_response(self, response: str) -> RunShieldResponse:
|
|
359
|
+
response = response.strip()
|
|
360
|
+
if response == SAFE_RESPONSE:
|
|
361
|
+
return RunShieldResponse(violation=None)
|
|
362
|
+
|
|
363
|
+
unsafe_code = self.check_unsafe_response(response)
|
|
364
|
+
if unsafe_code:
|
|
365
|
+
unsafe_code_list = unsafe_code.split(",")
|
|
366
|
+
if set(unsafe_code_list).issubset(set(self.excluded_categories)):
|
|
367
|
+
return RunShieldResponse(violation=None)
|
|
368
|
+
|
|
369
|
+
return RunShieldResponse(
|
|
370
|
+
violation=SafetyViolation(
|
|
371
|
+
violation_level=ViolationLevel.ERROR,
|
|
372
|
+
user_message=CANNED_RESPONSE_TEXT,
|
|
373
|
+
metadata={"violation_type": unsafe_code},
|
|
374
|
+
),
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
raise ValueError(f"Unexpected response: {response}")
|
|
378
|
+
|
|
379
|
+
async def run_moderation(self, messages: list[OpenAIMessageParam]) -> ModerationObject:
|
|
380
|
+
if not messages:
|
|
381
|
+
return self.create_moderation_object(self.model)
|
|
382
|
+
|
|
383
|
+
# TODO: Add Image based support for OpenAI Moderations
|
|
384
|
+
shield_input_message = self.build_text_shield_input(messages)
|
|
385
|
+
|
|
386
|
+
params = OpenAIChatCompletionRequestWithExtraBody(
|
|
387
|
+
model=self.model,
|
|
388
|
+
messages=[shield_input_message],
|
|
389
|
+
stream=False,
|
|
390
|
+
temperature=0.0, # default is 1, which is too high for safety
|
|
391
|
+
)
|
|
392
|
+
response = await self.inference_api.openai_chat_completion(params)
|
|
393
|
+
content = response.choices[0].message.content
|
|
394
|
+
content = content.strip()
|
|
395
|
+
return self.get_moderation_object(content)
|
|
396
|
+
|
|
397
|
+
def create_moderation_object(self, model: str, unsafe_code: str | None = None) -> ModerationObject:
|
|
398
|
+
"""Create a ModerationObject for either safe or unsafe content.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
model: The model name
|
|
402
|
+
unsafe_code: Optional comma-separated list of safety codes. If None, creates safe object.
|
|
403
|
+
|
|
404
|
+
Returns:
|
|
405
|
+
ModerationObject with appropriate configuration
|
|
406
|
+
"""
|
|
407
|
+
# Set default values for safe case
|
|
408
|
+
categories = dict.fromkeys(SAFETY_CATEGORIES_TO_CODE_MAP.keys(), False)
|
|
409
|
+
category_scores = dict.fromkeys(SAFETY_CATEGORIES_TO_CODE_MAP.keys(), 1.0)
|
|
410
|
+
category_applied_input_types = {key: [] for key in SAFETY_CATEGORIES_TO_CODE_MAP.keys()}
|
|
411
|
+
flagged = False
|
|
412
|
+
user_message = None
|
|
413
|
+
metadata = {}
|
|
414
|
+
|
|
415
|
+
# Handle unsafe case
|
|
416
|
+
if unsafe_code:
|
|
417
|
+
unsafe_code_list = [code.strip() for code in unsafe_code.split(",")]
|
|
418
|
+
invalid_codes = [code for code in unsafe_code_list if code not in SAFETY_CODE_TO_CATEGORIES_MAP]
|
|
419
|
+
if invalid_codes:
|
|
420
|
+
logger.warning(f"Invalid safety codes returned: {invalid_codes}")
|
|
421
|
+
# just returning safe object, as we don't know what the invalid codes can map to
|
|
422
|
+
return ModerationObject(
|
|
423
|
+
id=f"modr-{uuid.uuid4()}",
|
|
424
|
+
model=model,
|
|
425
|
+
results=[
|
|
426
|
+
ModerationObjectResults(
|
|
427
|
+
flagged=flagged,
|
|
428
|
+
categories=categories,
|
|
429
|
+
category_applied_input_types=category_applied_input_types,
|
|
430
|
+
category_scores=category_scores,
|
|
431
|
+
user_message=user_message,
|
|
432
|
+
metadata=metadata,
|
|
433
|
+
)
|
|
434
|
+
],
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
llama_guard_category = [SAFETY_CODE_TO_CATEGORIES_MAP[code] for code in unsafe_code_list]
|
|
438
|
+
|
|
439
|
+
# Update categories for unsafe content
|
|
440
|
+
categories = {k: k in llama_guard_category for k in SAFETY_CATEGORIES_TO_CODE_MAP.keys()}
|
|
441
|
+
category_scores = {
|
|
442
|
+
k: 1.0 if k in llama_guard_category else 0.0 for k in SAFETY_CATEGORIES_TO_CODE_MAP.keys()
|
|
443
|
+
}
|
|
444
|
+
category_applied_input_types = {
|
|
445
|
+
k: ["text"] if k in llama_guard_category else [] for k in SAFETY_CATEGORIES_TO_CODE_MAP.keys()
|
|
446
|
+
}
|
|
447
|
+
flagged = True
|
|
448
|
+
user_message = CANNED_RESPONSE_TEXT
|
|
449
|
+
metadata = {"violation_type": unsafe_code_list}
|
|
450
|
+
|
|
451
|
+
return ModerationObject(
|
|
452
|
+
id=f"modr-{uuid.uuid4()}",
|
|
453
|
+
model=model,
|
|
454
|
+
results=[
|
|
455
|
+
ModerationObjectResults(
|
|
456
|
+
flagged=flagged,
|
|
457
|
+
categories=categories,
|
|
458
|
+
category_applied_input_types=category_applied_input_types,
|
|
459
|
+
category_scores=category_scores,
|
|
460
|
+
user_message=user_message,
|
|
461
|
+
metadata=metadata,
|
|
462
|
+
)
|
|
463
|
+
],
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
def is_content_safe(self, response: str, unsafe_code: str | None = None) -> bool:
|
|
467
|
+
"""Check if content is safe based on response and unsafe code."""
|
|
468
|
+
if response.strip().lower().startswith(SAFE_RESPONSE):
|
|
469
|
+
return True
|
|
470
|
+
|
|
471
|
+
if unsafe_code:
|
|
472
|
+
unsafe_code_list = unsafe_code.split(",")
|
|
473
|
+
if set(unsafe_code_list).issubset(set(self.excluded_categories)):
|
|
474
|
+
return True
|
|
475
|
+
|
|
476
|
+
return False
|
|
477
|
+
|
|
478
|
+
def get_moderation_object(self, response: str) -> ModerationObject:
|
|
479
|
+
response = response.strip()
|
|
480
|
+
if self.is_content_safe(response):
|
|
481
|
+
return self.create_moderation_object(self.model)
|
|
482
|
+
unsafe_code = self.check_unsafe_response(response)
|
|
483
|
+
if not unsafe_code:
|
|
484
|
+
raise ValueError(f"Unexpected response: {response}")
|
|
485
|
+
|
|
486
|
+
if self.is_content_safe(response, unsafe_code):
|
|
487
|
+
return self.create_moderation_object(self.model)
|
|
488
|
+
else:
|
|
489
|
+
return self.create_moderation_object(self.model, unsafe_code)
|
|
@@ -6,12 +6,12 @@
|
|
|
6
6
|
|
|
7
7
|
from typing import Any
|
|
8
8
|
|
|
9
|
-
from .config import
|
|
9
|
+
from .config import PromptGuardConfig
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
async def
|
|
13
|
-
from .
|
|
12
|
+
async def get_provider_impl(config: PromptGuardConfig, deps: dict[str, Any]):
|
|
13
|
+
from .prompt_guard import PromptGuardSafetyImpl
|
|
14
14
|
|
|
15
|
-
impl =
|
|
15
|
+
impl = PromptGuardSafetyImpl(config, deps)
|
|
16
16
|
await impl.initialize()
|
|
17
17
|
return impl
|