llama-stack 0.0.42__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack/__init__.py +5 -0
- llama_stack/apis/agents/__init__.py +1 -1
- llama_stack/apis/agents/agents.py +700 -281
- llama_stack/apis/agents/openai_responses.py +1311 -0
- llama_stack/{providers/adapters/memory/sample/config.py → apis/batches/__init__.py} +2 -5
- llama_stack/apis/batches/batches.py +100 -0
- llama_stack/apis/benchmarks/__init__.py +7 -0
- llama_stack/apis/benchmarks/benchmarks.py +108 -0
- llama_stack/apis/common/content_types.py +143 -0
- llama_stack/apis/common/errors.py +103 -0
- llama_stack/apis/common/job_types.py +38 -0
- llama_stack/apis/common/responses.py +36 -0
- llama_stack/apis/common/training_types.py +36 -5
- llama_stack/apis/common/type_system.py +158 -0
- llama_stack/apis/conversations/__init__.py +31 -0
- llama_stack/apis/conversations/conversations.py +286 -0
- llama_stack/apis/datasetio/__init__.py +7 -0
- llama_stack/apis/datasetio/datasetio.py +59 -0
- llama_stack/apis/datasets/__init__.py +7 -0
- llama_stack/apis/datasets/datasets.py +251 -0
- llama_stack/apis/datatypes.py +160 -0
- llama_stack/apis/eval/__init__.py +7 -0
- llama_stack/apis/eval/eval.py +169 -0
- llama_stack/apis/files/__init__.py +7 -0
- llama_stack/apis/files/files.py +199 -0
- llama_stack/apis/inference/__init__.py +1 -1
- llama_stack/apis/inference/inference.py +1169 -113
- llama_stack/apis/inspect/__init__.py +1 -1
- llama_stack/apis/inspect/inspect.py +69 -16
- llama_stack/apis/models/__init__.py +1 -1
- llama_stack/apis/models/models.py +148 -21
- llama_stack/apis/post_training/__init__.py +1 -1
- llama_stack/apis/post_training/post_training.py +265 -120
- llama_stack/{providers/adapters/agents/sample/config.py → apis/prompts/__init__.py} +2 -5
- llama_stack/apis/prompts/prompts.py +204 -0
- llama_stack/apis/providers/__init__.py +7 -0
- llama_stack/apis/providers/providers.py +69 -0
- llama_stack/apis/resource.py +37 -0
- llama_stack/apis/safety/__init__.py +1 -1
- llama_stack/apis/safety/safety.py +95 -12
- llama_stack/apis/scoring/__init__.py +7 -0
- llama_stack/apis/scoring/scoring.py +93 -0
- llama_stack/apis/scoring_functions/__init__.py +7 -0
- llama_stack/apis/scoring_functions/scoring_functions.py +208 -0
- llama_stack/apis/shields/__init__.py +1 -1
- llama_stack/apis/shields/shields.py +76 -33
- llama_stack/apis/synthetic_data_generation/__init__.py +1 -1
- llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +40 -17
- llama_stack/apis/telemetry/__init__.py +1 -1
- llama_stack/apis/telemetry/telemetry.py +322 -31
- llama_stack/apis/{dataset → tools}/__init__.py +2 -1
- llama_stack/apis/tools/rag_tool.py +218 -0
- llama_stack/apis/tools/tools.py +221 -0
- llama_stack/apis/vector_io/__init__.py +7 -0
- llama_stack/apis/vector_io/vector_io.py +960 -0
- llama_stack/apis/vector_stores/__init__.py +7 -0
- llama_stack/apis/vector_stores/vector_stores.py +51 -0
- llama_stack/apis/version.py +9 -0
- llama_stack/cli/llama.py +13 -5
- llama_stack/cli/stack/_list_deps.py +182 -0
- llama_stack/cli/stack/list_apis.py +1 -1
- llama_stack/cli/stack/list_deps.py +55 -0
- llama_stack/cli/stack/list_providers.py +24 -10
- llama_stack/cli/stack/list_stacks.py +56 -0
- llama_stack/cli/stack/remove.py +115 -0
- llama_stack/cli/stack/run.py +169 -56
- llama_stack/cli/stack/stack.py +18 -4
- llama_stack/cli/stack/utils.py +151 -0
- llama_stack/cli/table.py +23 -61
- llama_stack/cli/utils.py +29 -0
- llama_stack/core/access_control/access_control.py +131 -0
- llama_stack/core/access_control/conditions.py +129 -0
- llama_stack/core/access_control/datatypes.py +107 -0
- llama_stack/core/build.py +164 -0
- llama_stack/core/client.py +205 -0
- llama_stack/core/common.sh +37 -0
- llama_stack/{distribution → core}/configure.py +74 -55
- llama_stack/core/conversations/conversations.py +309 -0
- llama_stack/core/datatypes.py +625 -0
- llama_stack/core/distribution.py +276 -0
- llama_stack/core/external.py +54 -0
- llama_stack/core/id_generation.py +42 -0
- llama_stack/core/inspect.py +86 -0
- llama_stack/core/library_client.py +539 -0
- llama_stack/core/prompts/prompts.py +234 -0
- llama_stack/core/providers.py +137 -0
- llama_stack/core/request_headers.py +115 -0
- llama_stack/core/resolver.py +506 -0
- llama_stack/core/routers/__init__.py +101 -0
- llama_stack/core/routers/datasets.py +73 -0
- llama_stack/core/routers/eval_scoring.py +155 -0
- llama_stack/core/routers/inference.py +645 -0
- llama_stack/core/routers/safety.py +85 -0
- llama_stack/core/routers/tool_runtime.py +91 -0
- llama_stack/core/routers/vector_io.py +442 -0
- llama_stack/core/routing_tables/benchmarks.py +62 -0
- llama_stack/core/routing_tables/common.py +254 -0
- llama_stack/core/routing_tables/datasets.py +91 -0
- llama_stack/core/routing_tables/models.py +163 -0
- llama_stack/core/routing_tables/scoring_functions.py +66 -0
- llama_stack/core/routing_tables/shields.py +61 -0
- llama_stack/core/routing_tables/toolgroups.py +129 -0
- llama_stack/core/routing_tables/vector_stores.py +292 -0
- llama_stack/core/server/auth.py +187 -0
- llama_stack/core/server/auth_providers.py +494 -0
- llama_stack/core/server/quota.py +110 -0
- llama_stack/core/server/routes.py +141 -0
- llama_stack/core/server/server.py +542 -0
- llama_stack/core/server/tracing.py +80 -0
- llama_stack/core/stack.py +546 -0
- llama_stack/core/start_stack.sh +117 -0
- llama_stack/core/storage/datatypes.py +283 -0
- llama_stack/{cli/model → core/store}/__init__.py +1 -1
- llama_stack/core/store/registry.py +199 -0
- llama_stack/core/testing_context.py +49 -0
- llama_stack/core/ui/app.py +55 -0
- llama_stack/core/ui/modules/api.py +32 -0
- llama_stack/core/ui/modules/utils.py +42 -0
- llama_stack/core/ui/page/distribution/datasets.py +18 -0
- llama_stack/core/ui/page/distribution/eval_tasks.py +20 -0
- llama_stack/core/ui/page/distribution/models.py +18 -0
- llama_stack/core/ui/page/distribution/providers.py +27 -0
- llama_stack/core/ui/page/distribution/resources.py +48 -0
- llama_stack/core/ui/page/distribution/scoring_functions.py +18 -0
- llama_stack/core/ui/page/distribution/shields.py +19 -0
- llama_stack/core/ui/page/evaluations/app_eval.py +143 -0
- llama_stack/core/ui/page/evaluations/native_eval.py +253 -0
- llama_stack/core/ui/page/playground/chat.py +130 -0
- llama_stack/core/ui/page/playground/tools.py +352 -0
- llama_stack/core/utils/config.py +30 -0
- llama_stack/{distribution → core}/utils/config_dirs.py +3 -6
- llama_stack/core/utils/config_resolution.py +125 -0
- llama_stack/core/utils/context.py +84 -0
- llama_stack/core/utils/exec.py +96 -0
- llama_stack/{providers/impls/meta_reference/codeshield/config.py → core/utils/image_types.py} +4 -3
- llama_stack/{distribution → core}/utils/model_utils.py +2 -2
- llama_stack/{distribution → core}/utils/prompt_for_config.py +30 -63
- llama_stack/{apis/batch_inference → distributions/dell}/__init__.py +1 -1
- llama_stack/distributions/dell/build.yaml +33 -0
- llama_stack/distributions/dell/dell.py +158 -0
- llama_stack/distributions/dell/run-with-safety.yaml +141 -0
- llama_stack/distributions/dell/run.yaml +132 -0
- llama_stack/distributions/meta-reference-gpu/__init__.py +7 -0
- llama_stack/distributions/meta-reference-gpu/build.yaml +32 -0
- llama_stack/distributions/meta-reference-gpu/meta_reference.py +163 -0
- llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +154 -0
- llama_stack/distributions/meta-reference-gpu/run.yaml +139 -0
- llama_stack/{apis/evals → distributions/nvidia}/__init__.py +1 -1
- llama_stack/distributions/nvidia/build.yaml +29 -0
- llama_stack/distributions/nvidia/nvidia.py +154 -0
- llama_stack/distributions/nvidia/run-with-safety.yaml +137 -0
- llama_stack/distributions/nvidia/run.yaml +116 -0
- llama_stack/distributions/open-benchmark/__init__.py +7 -0
- llama_stack/distributions/open-benchmark/build.yaml +36 -0
- llama_stack/distributions/open-benchmark/open_benchmark.py +303 -0
- llama_stack/distributions/open-benchmark/run.yaml +252 -0
- llama_stack/distributions/postgres-demo/__init__.py +7 -0
- llama_stack/distributions/postgres-demo/build.yaml +23 -0
- llama_stack/distributions/postgres-demo/postgres_demo.py +125 -0
- llama_stack/distributions/postgres-demo/run.yaml +115 -0
- llama_stack/{apis/memory → distributions/starter}/__init__.py +1 -1
- llama_stack/distributions/starter/build.yaml +61 -0
- llama_stack/distributions/starter/run-with-postgres-store.yaml +285 -0
- llama_stack/distributions/starter/run.yaml +276 -0
- llama_stack/distributions/starter/starter.py +345 -0
- llama_stack/distributions/starter-gpu/__init__.py +7 -0
- llama_stack/distributions/starter-gpu/build.yaml +61 -0
- llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +288 -0
- llama_stack/distributions/starter-gpu/run.yaml +279 -0
- llama_stack/distributions/starter-gpu/starter_gpu.py +20 -0
- llama_stack/distributions/template.py +456 -0
- llama_stack/distributions/watsonx/__init__.py +7 -0
- llama_stack/distributions/watsonx/build.yaml +33 -0
- llama_stack/distributions/watsonx/run.yaml +133 -0
- llama_stack/distributions/watsonx/watsonx.py +95 -0
- llama_stack/env.py +24 -0
- llama_stack/log.py +314 -0
- llama_stack/models/llama/checkpoint.py +164 -0
- llama_stack/models/llama/datatypes.py +164 -0
- llama_stack/models/llama/hadamard_utils.py +86 -0
- llama_stack/models/llama/llama3/args.py +74 -0
- llama_stack/models/llama/llama3/chat_format.py +286 -0
- llama_stack/models/llama/llama3/generation.py +376 -0
- llama_stack/models/llama/llama3/interface.py +255 -0
- llama_stack/models/llama/llama3/model.py +304 -0
- llama_stack/models/llama/llama3/multimodal/__init__.py +12 -0
- llama_stack/models/llama/llama3/multimodal/encoder_utils.py +180 -0
- llama_stack/models/llama/llama3/multimodal/image_transform.py +409 -0
- llama_stack/models/llama/llama3/multimodal/model.py +1430 -0
- llama_stack/models/llama/llama3/multimodal/utils.py +26 -0
- llama_stack/models/llama/llama3/prompt_templates/__init__.py +22 -0
- llama_stack/models/llama/llama3/prompt_templates/base.py +39 -0
- llama_stack/models/llama/llama3/prompt_templates/system_prompts.py +319 -0
- llama_stack/models/llama/llama3/prompt_templates/tool_response.py +62 -0
- llama_stack/models/llama/llama3/quantization/loader.py +316 -0
- llama_stack/models/llama/llama3/template_data.py +116 -0
- llama_stack/models/llama/llama3/tokenizer.model +128000 -0
- llama_stack/models/llama/llama3/tokenizer.py +198 -0
- llama_stack/models/llama/llama3/tool_utils.py +266 -0
- llama_stack/models/llama/llama3_1/__init__.py +12 -0
- llama_stack/models/llama/llama3_1/prompt_format.md +358 -0
- llama_stack/models/llama/llama3_1/prompts.py +258 -0
- llama_stack/models/llama/llama3_2/prompts_text.py +229 -0
- llama_stack/models/llama/llama3_2/prompts_vision.py +126 -0
- llama_stack/models/llama/llama3_2/text_prompt_format.md +286 -0
- llama_stack/models/llama/llama3_2/vision_prompt_format.md +141 -0
- llama_stack/models/llama/llama3_3/prompts.py +259 -0
- llama_stack/models/llama/llama4/args.py +107 -0
- llama_stack/models/llama/llama4/chat_format.py +317 -0
- llama_stack/models/llama/llama4/datatypes.py +56 -0
- llama_stack/models/llama/llama4/ffn.py +58 -0
- llama_stack/models/llama/llama4/generation.py +313 -0
- llama_stack/models/llama/llama4/model.py +437 -0
- llama_stack/models/llama/llama4/moe.py +214 -0
- llama_stack/models/llama/llama4/preprocess.py +435 -0
- llama_stack/models/llama/llama4/prompt_format.md +304 -0
- llama_stack/models/llama/llama4/prompt_templates/system_prompts.py +136 -0
- llama_stack/models/llama/llama4/prompts.py +279 -0
- llama_stack/models/llama/llama4/quantization/__init__.py +5 -0
- llama_stack/models/llama/llama4/quantization/loader.py +226 -0
- llama_stack/models/llama/llama4/tokenizer.model +200000 -0
- llama_stack/models/llama/llama4/tokenizer.py +263 -0
- llama_stack/models/llama/llama4/vision/__init__.py +5 -0
- llama_stack/models/llama/llama4/vision/embedding.py +210 -0
- llama_stack/models/llama/llama4/vision/encoder.py +412 -0
- llama_stack/models/llama/prompt_format.py +191 -0
- llama_stack/models/llama/quantize_impls.py +316 -0
- llama_stack/models/llama/sku_list.py +1029 -0
- llama_stack/models/llama/sku_types.py +233 -0
- llama_stack/models/llama/tokenizer_utils.py +40 -0
- llama_stack/providers/datatypes.py +136 -107
- llama_stack/providers/inline/__init__.py +5 -0
- llama_stack/providers/inline/agents/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/agents/meta_reference}/__init__.py +12 -5
- llama_stack/providers/inline/agents/meta_reference/agent_instance.py +1024 -0
- llama_stack/providers/inline/agents/meta_reference/agents.py +383 -0
- llama_stack/providers/inline/agents/meta_reference/config.py +37 -0
- llama_stack/providers/inline/agents/meta_reference/persistence.py +228 -0
- llama_stack/providers/inline/agents/meta_reference/responses/__init__.py +5 -0
- llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +423 -0
- llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +1226 -0
- llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +449 -0
- llama_stack/providers/inline/agents/meta_reference/responses/types.py +194 -0
- llama_stack/providers/inline/agents/meta_reference/responses/utils.py +365 -0
- llama_stack/providers/inline/agents/meta_reference/safety.py +52 -0
- llama_stack/providers/inline/batches/__init__.py +5 -0
- llama_stack/providers/inline/batches/reference/__init__.py +36 -0
- llama_stack/providers/inline/batches/reference/batches.py +679 -0
- llama_stack/providers/inline/batches/reference/config.py +40 -0
- llama_stack/providers/inline/datasetio/__init__.py +5 -0
- llama_stack/providers/inline/datasetio/localfs/__init__.py +20 -0
- llama_stack/providers/inline/datasetio/localfs/config.py +23 -0
- llama_stack/providers/inline/datasetio/localfs/datasetio.py +113 -0
- llama_stack/providers/inline/eval/__init__.py +5 -0
- llama_stack/providers/inline/eval/meta_reference/__init__.py +28 -0
- llama_stack/providers/inline/eval/meta_reference/config.py +23 -0
- llama_stack/providers/inline/eval/meta_reference/eval.py +259 -0
- llama_stack/providers/inline/files/localfs/__init__.py +20 -0
- llama_stack/providers/inline/files/localfs/config.py +31 -0
- llama_stack/providers/inline/files/localfs/files.py +219 -0
- llama_stack/providers/inline/inference/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/__init__.py +4 -4
- llama_stack/providers/inline/inference/meta_reference/common.py +24 -0
- llama_stack/providers/inline/inference/meta_reference/config.py +68 -0
- llama_stack/providers/inline/inference/meta_reference/generators.py +211 -0
- llama_stack/providers/inline/inference/meta_reference/inference.py +158 -0
- llama_stack/providers/inline/inference/meta_reference/model_parallel.py +96 -0
- llama_stack/providers/{impls/meta_reference/inference → inline/inference/meta_reference}/parallel_utils.py +56 -73
- llama_stack/providers/inline/inference/sentence_transformers/__init__.py +22 -0
- llama_stack/providers/{impls/meta_reference/agents → inline/inference/sentence_transformers}/config.py +6 -4
- llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +83 -0
- llama_stack/providers/inline/post_training/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/common/utils.py +35 -0
- llama_stack/providers/inline/post_training/common/validator.py +36 -0
- llama_stack/providers/inline/post_training/huggingface/__init__.py +27 -0
- llama_stack/providers/inline/post_training/huggingface/config.py +83 -0
- llama_stack/providers/inline/post_training/huggingface/post_training.py +208 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +519 -0
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +485 -0
- llama_stack/providers/inline/post_training/huggingface/utils.py +269 -0
- llama_stack/providers/inline/post_training/torchtune/__init__.py +27 -0
- llama_stack/providers/inline/post_training/torchtune/common/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +240 -0
- llama_stack/providers/inline/post_training/torchtune/common/utils.py +99 -0
- llama_stack/providers/inline/post_training/torchtune/config.py +20 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +57 -0
- llama_stack/providers/inline/post_training/torchtune/datasets/sft.py +78 -0
- llama_stack/providers/inline/post_training/torchtune/post_training.py +178 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/__init__.py +5 -0
- llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +588 -0
- llama_stack/providers/inline/safety/__init__.py +5 -0
- llama_stack/providers/{impls/meta_reference/codeshield → inline/safety/code_scanner}/__init__.py +4 -2
- llama_stack/providers/inline/safety/code_scanner/code_scanner.py +128 -0
- llama_stack/providers/{impls/meta_reference/memory → inline/safety/code_scanner}/config.py +5 -3
- llama_stack/providers/inline/safety/llama_guard/__init__.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/config.py +19 -0
- llama_stack/providers/inline/safety/llama_guard/llama_guard.py +489 -0
- llama_stack/providers/{adapters/memory/sample → inline/safety/prompt_guard}/__init__.py +4 -4
- llama_stack/providers/inline/safety/prompt_guard/config.py +32 -0
- llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +131 -0
- llama_stack/providers/inline/scoring/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/__init__.py +25 -0
- llama_stack/providers/{adapters/memory/weaviate → inline/scoring/basic}/config.py +5 -7
- llama_stack/providers/inline/scoring/basic/scoring.py +126 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +240 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +41 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/docvqa.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/equality.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/ifeval.py +23 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_math_response.py +27 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py +71 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/subset_of.py +21 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +80 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +66 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +58 -0
- llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +38 -0
- llama_stack/providers/inline/scoring/basic/utils/__init__.py +5 -0
- llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py +3319 -0
- llama_stack/providers/inline/scoring/basic/utils/math_utils.py +330 -0
- llama_stack/providers/inline/scoring/braintrust/__init__.py +27 -0
- llama_stack/providers/inline/scoring/braintrust/braintrust.py +230 -0
- llama_stack/providers/inline/scoring/braintrust/config.py +21 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_correctness.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_relevancy.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_similarity.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_entity_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_precision.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_recall.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_relevancy.py +23 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/factuality.py +24 -0
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/faithfulness.py +24 -0
- llama_stack/providers/inline/scoring/llm_as_judge/__init__.py +21 -0
- llama_stack/providers/inline/scoring/llm_as_judge/config.py +14 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +113 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/__init__.py +5 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +96 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +20 -0
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +81 -0
- llama_stack/providers/inline/telemetry/__init__.py +5 -0
- llama_stack/providers/inline/telemetry/meta_reference/__init__.py +21 -0
- llama_stack/providers/inline/telemetry/meta_reference/config.py +47 -0
- llama_stack/providers/inline/telemetry/meta_reference/telemetry.py +252 -0
- llama_stack/providers/inline/tool_runtime/__init__.py +5 -0
- llama_stack/providers/inline/tool_runtime/rag/__init__.py +19 -0
- llama_stack/providers/{impls/meta_reference/telemetry → inline/tool_runtime/rag}/config.py +5 -3
- llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +77 -0
- llama_stack/providers/inline/tool_runtime/rag/memory.py +332 -0
- llama_stack/providers/inline/vector_io/__init__.py +5 -0
- llama_stack/providers/inline/vector_io/chroma/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/chroma/config.py +30 -0
- llama_stack/providers/inline/vector_io/faiss/__init__.py +21 -0
- llama_stack/providers/inline/vector_io/faiss/config.py +26 -0
- llama_stack/providers/inline/vector_io/faiss/faiss.py +293 -0
- llama_stack/providers/inline/vector_io/milvus/__init__.py +19 -0
- llama_stack/providers/inline/vector_io/milvus/config.py +29 -0
- llama_stack/providers/inline/vector_io/qdrant/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/qdrant/config.py +29 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +20 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/config.py +26 -0
- llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +483 -0
- llama_stack/providers/registry/agents.py +16 -18
- llama_stack/providers/registry/batches.py +26 -0
- llama_stack/providers/registry/datasetio.py +49 -0
- llama_stack/providers/registry/eval.py +46 -0
- llama_stack/providers/registry/files.py +31 -0
- llama_stack/providers/registry/inference.py +273 -118
- llama_stack/providers/registry/post_training.py +69 -0
- llama_stack/providers/registry/safety.py +46 -41
- llama_stack/providers/registry/scoring.py +51 -0
- llama_stack/providers/registry/tool_runtime.py +87 -0
- llama_stack/providers/registry/vector_io.py +828 -0
- llama_stack/providers/remote/__init__.py +5 -0
- llama_stack/providers/remote/agents/__init__.py +5 -0
- llama_stack/providers/remote/datasetio/__init__.py +5 -0
- llama_stack/providers/{adapters/memory/chroma → remote/datasetio/huggingface}/__init__.py +7 -4
- llama_stack/providers/remote/datasetio/huggingface/config.py +23 -0
- llama_stack/providers/remote/datasetio/huggingface/huggingface.py +99 -0
- llama_stack/providers/remote/datasetio/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/datasetio/nvidia/config.py +61 -0
- llama_stack/providers/remote/datasetio/nvidia/datasetio.py +116 -0
- llama_stack/providers/remote/eval/__init__.py +5 -0
- llama_stack/providers/remote/eval/nvidia/__init__.py +31 -0
- llama_stack/providers/remote/eval/nvidia/config.py +29 -0
- llama_stack/providers/remote/eval/nvidia/eval.py +162 -0
- llama_stack/providers/remote/files/s3/__init__.py +19 -0
- llama_stack/providers/remote/files/s3/config.py +42 -0
- llama_stack/providers/remote/files/s3/files.py +313 -0
- llama_stack/providers/remote/inference/__init__.py +5 -0
- llama_stack/providers/{adapters/safety/sample → remote/inference/anthropic}/__init__.py +4 -6
- llama_stack/providers/remote/inference/anthropic/anthropic.py +36 -0
- llama_stack/providers/remote/inference/anthropic/config.py +28 -0
- llama_stack/providers/{impls/meta_reference/telemetry → remote/inference/azure}/__init__.py +4 -4
- llama_stack/providers/remote/inference/azure/azure.py +25 -0
- llama_stack/providers/remote/inference/azure/config.py +61 -0
- llama_stack/providers/{adapters → remote}/inference/bedrock/__init__.py +18 -17
- llama_stack/providers/remote/inference/bedrock/bedrock.py +142 -0
- llama_stack/providers/{adapters/inference/sample → remote/inference/bedrock}/config.py +3 -4
- llama_stack/providers/remote/inference/bedrock/models.py +29 -0
- llama_stack/providers/remote/inference/cerebras/__init__.py +19 -0
- llama_stack/providers/remote/inference/cerebras/cerebras.py +28 -0
- llama_stack/providers/remote/inference/cerebras/config.py +30 -0
- llama_stack/providers/{adapters → remote}/inference/databricks/__init__.py +4 -5
- llama_stack/providers/remote/inference/databricks/config.py +37 -0
- llama_stack/providers/remote/inference/databricks/databricks.py +44 -0
- llama_stack/providers/{adapters → remote}/inference/fireworks/__init__.py +8 -4
- llama_stack/providers/remote/inference/fireworks/config.py +27 -0
- llama_stack/providers/remote/inference/fireworks/fireworks.py +27 -0
- llama_stack/providers/{adapters/memory/pgvector → remote/inference/gemini}/__init__.py +4 -4
- llama_stack/providers/remote/inference/gemini/config.py +28 -0
- llama_stack/providers/remote/inference/gemini/gemini.py +82 -0
- llama_stack/providers/remote/inference/groq/__init__.py +15 -0
- llama_stack/providers/remote/inference/groq/config.py +34 -0
- llama_stack/providers/remote/inference/groq/groq.py +18 -0
- llama_stack/providers/remote/inference/llama_openai_compat/__init__.py +15 -0
- llama_stack/providers/remote/inference/llama_openai_compat/config.py +34 -0
- llama_stack/providers/remote/inference/llama_openai_compat/llama.py +46 -0
- llama_stack/providers/remote/inference/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/inference/nvidia/config.py +64 -0
- llama_stack/providers/remote/inference/nvidia/nvidia.py +61 -0
- llama_stack/providers/{adapters/safety/sample/config.py → remote/inference/nvidia/utils.py} +3 -4
- llama_stack/providers/{impls/vllm → remote/inference/ollama}/__init__.py +4 -6
- llama_stack/providers/remote/inference/ollama/config.py +25 -0
- llama_stack/providers/remote/inference/ollama/ollama.py +102 -0
- llama_stack/providers/{adapters/telemetry/opentelemetry → remote/inference/openai}/__init__.py +4 -4
- llama_stack/providers/remote/inference/openai/config.py +39 -0
- llama_stack/providers/remote/inference/openai/openai.py +38 -0
- llama_stack/providers/remote/inference/passthrough/__init__.py +23 -0
- llama_stack/providers/remote/inference/passthrough/config.py +34 -0
- llama_stack/providers/remote/inference/passthrough/passthrough.py +122 -0
- llama_stack/providers/remote/inference/runpod/__init__.py +16 -0
- llama_stack/providers/remote/inference/runpod/config.py +32 -0
- llama_stack/providers/remote/inference/runpod/runpod.py +42 -0
- llama_stack/providers/remote/inference/sambanova/__init__.py +16 -0
- llama_stack/providers/remote/inference/sambanova/config.py +34 -0
- llama_stack/providers/remote/inference/sambanova/sambanova.py +28 -0
- llama_stack/providers/{adapters → remote}/inference/tgi/__init__.py +3 -4
- llama_stack/providers/remote/inference/tgi/config.py +76 -0
- llama_stack/providers/remote/inference/tgi/tgi.py +85 -0
- llama_stack/providers/{adapters → remote}/inference/together/__init__.py +8 -4
- llama_stack/providers/remote/inference/together/config.py +27 -0
- llama_stack/providers/remote/inference/together/together.py +102 -0
- llama_stack/providers/remote/inference/vertexai/__init__.py +15 -0
- llama_stack/providers/remote/inference/vertexai/config.py +48 -0
- llama_stack/providers/remote/inference/vertexai/vertexai.py +54 -0
- llama_stack/providers/remote/inference/vllm/__init__.py +22 -0
- llama_stack/providers/remote/inference/vllm/config.py +59 -0
- llama_stack/providers/remote/inference/vllm/vllm.py +111 -0
- llama_stack/providers/remote/inference/watsonx/__init__.py +15 -0
- llama_stack/providers/remote/inference/watsonx/config.py +45 -0
- llama_stack/providers/remote/inference/watsonx/watsonx.py +336 -0
- llama_stack/providers/remote/post_training/__init__.py +5 -0
- llama_stack/providers/remote/post_training/nvidia/__init__.py +23 -0
- llama_stack/providers/remote/post_training/nvidia/config.py +113 -0
- llama_stack/providers/remote/post_training/nvidia/models.py +27 -0
- llama_stack/providers/remote/post_training/nvidia/post_training.py +430 -0
- llama_stack/providers/remote/post_training/nvidia/utils.py +63 -0
- llama_stack/providers/remote/safety/__init__.py +5 -0
- llama_stack/providers/remote/safety/bedrock/bedrock.py +111 -0
- llama_stack/providers/remote/safety/bedrock/config.py +14 -0
- llama_stack/providers/{adapters/inference/sample → remote/safety/nvidia}/__init__.py +5 -4
- llama_stack/providers/remote/safety/nvidia/config.py +40 -0
- llama_stack/providers/remote/safety/nvidia/nvidia.py +161 -0
- llama_stack/providers/{adapters/agents/sample → remote/safety/sambanova}/__init__.py +5 -4
- llama_stack/providers/remote/safety/sambanova/config.py +37 -0
- llama_stack/providers/remote/safety/sambanova/sambanova.py +98 -0
- llama_stack/providers/remote/tool_runtime/__init__.py +5 -0
- llama_stack/providers/remote/tool_runtime/bing_search/__init__.py +21 -0
- llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +112 -0
- llama_stack/providers/remote/tool_runtime/bing_search/config.py +22 -0
- llama_stack/providers/remote/tool_runtime/brave_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +148 -0
- llama_stack/providers/remote/tool_runtime/brave_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py +15 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py +20 -0
- llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +73 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/__init__.py +20 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/config.py +27 -0
- llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +84 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/__init__.py +22 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py +21 -0
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +140 -0
- llama_stack/providers/remote/vector_io/__init__.py +5 -0
- llama_stack/providers/remote/vector_io/chroma/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/chroma/chroma.py +215 -0
- llama_stack/providers/remote/vector_io/chroma/config.py +28 -0
- llama_stack/providers/remote/vector_io/milvus/__init__.py +18 -0
- llama_stack/providers/remote/vector_io/milvus/config.py +35 -0
- llama_stack/providers/remote/vector_io/milvus/milvus.py +375 -0
- llama_stack/providers/remote/vector_io/pgvector/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/pgvector/config.py +47 -0
- llama_stack/providers/remote/vector_io/pgvector/pgvector.py +460 -0
- llama_stack/providers/remote/vector_io/qdrant/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/qdrant/config.py +37 -0
- llama_stack/providers/remote/vector_io/qdrant/qdrant.py +265 -0
- llama_stack/providers/remote/vector_io/weaviate/__init__.py +17 -0
- llama_stack/providers/remote/vector_io/weaviate/config.py +32 -0
- llama_stack/providers/remote/vector_io/weaviate/weaviate.py +393 -0
- llama_stack/providers/utils/bedrock/__init__.py +5 -0
- llama_stack/providers/utils/bedrock/client.py +74 -0
- llama_stack/providers/utils/bedrock/config.py +64 -0
- llama_stack/providers/utils/bedrock/refreshable_boto_session.py +112 -0
- llama_stack/providers/utils/common/__init__.py +5 -0
- llama_stack/providers/utils/common/data_schema_validator.py +103 -0
- llama_stack/providers/utils/datasetio/__init__.py +5 -0
- llama_stack/providers/utils/datasetio/url_utils.py +47 -0
- llama_stack/providers/utils/files/__init__.py +5 -0
- llama_stack/providers/utils/files/form_data.py +69 -0
- llama_stack/providers/utils/inference/__init__.py +8 -7
- llama_stack/providers/utils/inference/embedding_mixin.py +101 -0
- llama_stack/providers/utils/inference/inference_store.py +264 -0
- llama_stack/providers/utils/inference/litellm_openai_mixin.py +336 -0
- llama_stack/providers/utils/inference/model_registry.py +173 -23
- llama_stack/providers/utils/inference/openai_compat.py +1261 -49
- llama_stack/providers/utils/inference/openai_mixin.py +506 -0
- llama_stack/providers/utils/inference/prompt_adapter.py +365 -67
- llama_stack/providers/utils/kvstore/api.py +6 -6
- llama_stack/providers/utils/kvstore/config.py +28 -48
- llama_stack/providers/utils/kvstore/kvstore.py +61 -15
- llama_stack/providers/utils/kvstore/mongodb/__init__.py +9 -0
- llama_stack/providers/utils/kvstore/mongodb/mongodb.py +82 -0
- llama_stack/providers/utils/kvstore/postgres/__init__.py +7 -0
- llama_stack/providers/utils/kvstore/postgres/postgres.py +114 -0
- llama_stack/providers/utils/kvstore/redis/redis.py +33 -9
- llama_stack/providers/utils/kvstore/sqlite/config.py +2 -1
- llama_stack/providers/utils/kvstore/sqlite/sqlite.py +123 -22
- llama_stack/providers/utils/memory/file_utils.py +1 -1
- llama_stack/providers/utils/memory/openai_vector_store_mixin.py +1304 -0
- llama_stack/providers/utils/memory/vector_store.py +220 -82
- llama_stack/providers/utils/pagination.py +43 -0
- llama_stack/providers/utils/responses/__init__.py +5 -0
- llama_stack/providers/utils/responses/responses_store.py +292 -0
- llama_stack/providers/utils/scheduler.py +270 -0
- llama_stack/providers/utils/scoring/__init__.py +5 -0
- llama_stack/providers/utils/scoring/aggregation_utils.py +75 -0
- llama_stack/providers/utils/scoring/base_scoring_fn.py +114 -0
- llama_stack/providers/utils/scoring/basic_scoring_utils.py +26 -0
- llama_stack/providers/utils/sqlstore/__init__.py +5 -0
- llama_stack/providers/utils/sqlstore/api.py +128 -0
- llama_stack/providers/utils/sqlstore/authorized_sqlstore.py +319 -0
- llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py +343 -0
- llama_stack/providers/utils/sqlstore/sqlstore.py +70 -0
- llama_stack/providers/utils/telemetry/trace_protocol.py +142 -0
- llama_stack/providers/utils/telemetry/tracing.py +192 -53
- llama_stack/providers/utils/tools/__init__.py +5 -0
- llama_stack/providers/utils/tools/mcp.py +148 -0
- llama_stack/providers/utils/tools/ttl_dict.py +70 -0
- llama_stack/providers/utils/vector_io/__init__.py +5 -0
- llama_stack/providers/utils/vector_io/vector_utils.py +156 -0
- llama_stack/schema_utils.py +118 -0
- llama_stack/strong_typing/__init__.py +19 -0
- llama_stack/strong_typing/auxiliary.py +228 -0
- llama_stack/strong_typing/classdef.py +440 -0
- llama_stack/strong_typing/core.py +46 -0
- llama_stack/strong_typing/deserializer.py +877 -0
- llama_stack/strong_typing/docstring.py +409 -0
- llama_stack/strong_typing/exception.py +23 -0
- llama_stack/strong_typing/inspection.py +1085 -0
- llama_stack/strong_typing/mapping.py +40 -0
- llama_stack/strong_typing/name.py +182 -0
- llama_stack/strong_typing/py.typed +0 -0
- llama_stack/strong_typing/schema.py +792 -0
- llama_stack/strong_typing/serialization.py +97 -0
- llama_stack/strong_typing/serializer.py +500 -0
- llama_stack/strong_typing/slots.py +27 -0
- llama_stack/strong_typing/topological.py +89 -0
- llama_stack/testing/__init__.py +5 -0
- llama_stack/testing/api_recorder.py +956 -0
- llama_stack/ui/node_modules/flatted/python/flatted.py +149 -0
- llama_stack-0.3.4.dist-info/METADATA +261 -0
- llama_stack-0.3.4.dist-info/RECORD +625 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/WHEEL +1 -1
- llama_stack/apis/agents/client.py +0 -292
- llama_stack/apis/agents/event_logger.py +0 -184
- llama_stack/apis/batch_inference/batch_inference.py +0 -72
- llama_stack/apis/common/deployment_types.py +0 -31
- llama_stack/apis/dataset/dataset.py +0 -63
- llama_stack/apis/evals/evals.py +0 -122
- llama_stack/apis/inference/client.py +0 -197
- llama_stack/apis/inspect/client.py +0 -82
- llama_stack/apis/memory/client.py +0 -155
- llama_stack/apis/memory/memory.py +0 -65
- llama_stack/apis/memory_banks/__init__.py +0 -7
- llama_stack/apis/memory_banks/client.py +0 -101
- llama_stack/apis/memory_banks/memory_banks.py +0 -78
- llama_stack/apis/models/client.py +0 -83
- llama_stack/apis/reward_scoring/__init__.py +0 -7
- llama_stack/apis/reward_scoring/reward_scoring.py +0 -55
- llama_stack/apis/safety/client.py +0 -105
- llama_stack/apis/shields/client.py +0 -79
- llama_stack/cli/download.py +0 -340
- llama_stack/cli/model/describe.py +0 -82
- llama_stack/cli/model/download.py +0 -24
- llama_stack/cli/model/list.py +0 -62
- llama_stack/cli/model/model.py +0 -34
- llama_stack/cli/model/prompt_format.py +0 -112
- llama_stack/cli/model/safety_models.py +0 -52
- llama_stack/cli/stack/build.py +0 -299
- llama_stack/cli/stack/configure.py +0 -178
- llama_stack/distribution/build.py +0 -123
- llama_stack/distribution/build_conda_env.sh +0 -136
- llama_stack/distribution/build_container.sh +0 -142
- llama_stack/distribution/common.sh +0 -40
- llama_stack/distribution/configure_container.sh +0 -47
- llama_stack/distribution/datatypes.py +0 -139
- llama_stack/distribution/distribution.py +0 -58
- llama_stack/distribution/inspect.py +0 -67
- llama_stack/distribution/request_headers.py +0 -57
- llama_stack/distribution/resolver.py +0 -323
- llama_stack/distribution/routers/__init__.py +0 -48
- llama_stack/distribution/routers/routers.py +0 -158
- llama_stack/distribution/routers/routing_tables.py +0 -173
- llama_stack/distribution/server/endpoints.py +0 -48
- llama_stack/distribution/server/server.py +0 -343
- llama_stack/distribution/start_conda_env.sh +0 -42
- llama_stack/distribution/start_container.sh +0 -64
- llama_stack/distribution/templates/local-bedrock-conda-example-build.yaml +0 -10
- llama_stack/distribution/templates/local-build.yaml +0 -10
- llama_stack/distribution/templates/local-databricks-build.yaml +0 -10
- llama_stack/distribution/templates/local-fireworks-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-endpoint-build.yaml +0 -10
- llama_stack/distribution/templates/local-hf-serverless-build.yaml +0 -10
- llama_stack/distribution/templates/local-ollama-build.yaml +0 -10
- llama_stack/distribution/templates/local-tgi-build.yaml +0 -10
- llama_stack/distribution/templates/local-together-build.yaml +0 -10
- llama_stack/distribution/templates/local-vllm-build.yaml +0 -10
- llama_stack/distribution/utils/exec.py +0 -105
- llama_stack/providers/adapters/agents/sample/sample.py +0 -18
- llama_stack/providers/adapters/inference/bedrock/bedrock.py +0 -451
- llama_stack/providers/adapters/inference/bedrock/config.py +0 -55
- llama_stack/providers/adapters/inference/databricks/config.py +0 -21
- llama_stack/providers/adapters/inference/databricks/databricks.py +0 -125
- llama_stack/providers/adapters/inference/fireworks/config.py +0 -20
- llama_stack/providers/adapters/inference/fireworks/fireworks.py +0 -130
- llama_stack/providers/adapters/inference/ollama/__init__.py +0 -19
- llama_stack/providers/adapters/inference/ollama/ollama.py +0 -175
- llama_stack/providers/adapters/inference/sample/sample.py +0 -23
- llama_stack/providers/adapters/inference/tgi/config.py +0 -43
- llama_stack/providers/adapters/inference/tgi/tgi.py +0 -200
- llama_stack/providers/adapters/inference/together/config.py +0 -22
- llama_stack/providers/adapters/inference/together/together.py +0 -143
- llama_stack/providers/adapters/memory/chroma/chroma.py +0 -157
- llama_stack/providers/adapters/memory/pgvector/config.py +0 -17
- llama_stack/providers/adapters/memory/pgvector/pgvector.py +0 -211
- llama_stack/providers/adapters/memory/sample/sample.py +0 -23
- llama_stack/providers/adapters/memory/weaviate/__init__.py +0 -15
- llama_stack/providers/adapters/memory/weaviate/weaviate.py +0 -190
- llama_stack/providers/adapters/safety/bedrock/bedrock.py +0 -113
- llama_stack/providers/adapters/safety/bedrock/config.py +0 -16
- llama_stack/providers/adapters/safety/sample/sample.py +0 -23
- llama_stack/providers/adapters/safety/together/__init__.py +0 -18
- llama_stack/providers/adapters/safety/together/config.py +0 -26
- llama_stack/providers/adapters/safety/together/together.py +0 -101
- llama_stack/providers/adapters/telemetry/opentelemetry/config.py +0 -12
- llama_stack/providers/adapters/telemetry/opentelemetry/opentelemetry.py +0 -201
- llama_stack/providers/adapters/telemetry/sample/__init__.py +0 -17
- llama_stack/providers/adapters/telemetry/sample/config.py +0 -12
- llama_stack/providers/adapters/telemetry/sample/sample.py +0 -18
- llama_stack/providers/impls/meta_reference/agents/agent_instance.py +0 -844
- llama_stack/providers/impls/meta_reference/agents/agents.py +0 -161
- llama_stack/providers/impls/meta_reference/agents/persistence.py +0 -84
- llama_stack/providers/impls/meta_reference/agents/rag/context_retriever.py +0 -74
- llama_stack/providers/impls/meta_reference/agents/safety.py +0 -57
- llama_stack/providers/impls/meta_reference/agents/tests/code_execution.py +0 -93
- llama_stack/providers/impls/meta_reference/agents/tests/test_chat_agent.py +0 -305
- llama_stack/providers/impls/meta_reference/agents/tools/base.py +0 -20
- llama_stack/providers/impls/meta_reference/agents/tools/builtin.py +0 -375
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_env_prefix.py +0 -133
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/code_execution.py +0 -256
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/matplotlib_custom_backend.py +0 -87
- llama_stack/providers/impls/meta_reference/agents/tools/ipython_tool/utils.py +0 -21
- llama_stack/providers/impls/meta_reference/agents/tools/safety.py +0 -43
- llama_stack/providers/impls/meta_reference/codeshield/code_scanner.py +0 -58
- llama_stack/providers/impls/meta_reference/inference/config.py +0 -45
- llama_stack/providers/impls/meta_reference/inference/generation.py +0 -376
- llama_stack/providers/impls/meta_reference/inference/inference.py +0 -280
- llama_stack/providers/impls/meta_reference/inference/model_parallel.py +0 -99
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_impls.py +0 -184
- llama_stack/providers/impls/meta_reference/inference/quantization/fp8_txest_disabled.py +0 -76
- llama_stack/providers/impls/meta_reference/inference/quantization/loader.py +0 -97
- llama_stack/providers/impls/meta_reference/inference/quantization/scripts/quantize_checkpoint.py +0 -161
- llama_stack/providers/impls/meta_reference/memory/__init__.py +0 -19
- llama_stack/providers/impls/meta_reference/memory/faiss.py +0 -113
- llama_stack/providers/impls/meta_reference/safety/__init__.py +0 -17
- llama_stack/providers/impls/meta_reference/safety/base.py +0 -57
- llama_stack/providers/impls/meta_reference/safety/config.py +0 -48
- llama_stack/providers/impls/meta_reference/safety/llama_guard.py +0 -268
- llama_stack/providers/impls/meta_reference/safety/prompt_guard.py +0 -145
- llama_stack/providers/impls/meta_reference/safety/safety.py +0 -112
- llama_stack/providers/impls/meta_reference/telemetry/console.py +0 -89
- llama_stack/providers/impls/vllm/config.py +0 -35
- llama_stack/providers/impls/vllm/vllm.py +0 -241
- llama_stack/providers/registry/memory.py +0 -78
- llama_stack/providers/registry/telemetry.py +0 -44
- llama_stack/providers/tests/agents/test_agents.py +0 -210
- llama_stack/providers/tests/inference/test_inference.py +0 -257
- llama_stack/providers/tests/inference/test_prompt_adapter.py +0 -126
- llama_stack/providers/tests/memory/test_memory.py +0 -136
- llama_stack/providers/tests/resolver.py +0 -100
- llama_stack/providers/tests/safety/test_safety.py +0 -77
- llama_stack-0.0.42.dist-info/METADATA +0 -137
- llama_stack-0.0.42.dist-info/RECORD +0 -256
- /llama_stack/{distribution → core}/__init__.py +0 -0
- /llama_stack/{distribution/server → core/access_control}/__init__.py +0 -0
- /llama_stack/{distribution/utils → core/conversations}/__init__.py +0 -0
- /llama_stack/{providers/adapters → core/prompts}/__init__.py +0 -0
- /llama_stack/{providers/adapters/agents → core/routing_tables}/__init__.py +0 -0
- /llama_stack/{providers/adapters/inference → core/server}/__init__.py +0 -0
- /llama_stack/{providers/adapters/memory → core/storage}/__init__.py +0 -0
- /llama_stack/{providers/adapters/safety → core/ui}/__init__.py +0 -0
- /llama_stack/{providers/adapters/telemetry → core/ui/modules}/__init__.py +0 -0
- /llama_stack/{providers/impls → core/ui/page}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference → core/ui/page/distribution}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/rag → core/ui/page/evaluations}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tests → core/ui/page/playground}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools → core/utils}/__init__.py +0 -0
- /llama_stack/{distribution → core}/utils/dynamic.py +0 -0
- /llama_stack/{distribution → core}/utils/serialize.py +0 -0
- /llama_stack/{providers/impls/meta_reference/agents/tools/ipython_tool → distributions}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization → models}/__init__.py +0 -0
- /llama_stack/{providers/impls/meta_reference/inference/quantization/scripts → models/llama}/__init__.py +0 -0
- /llama_stack/{providers/tests → models/llama/llama3}/__init__.py +0 -0
- /llama_stack/{providers/tests/agents → models/llama/llama3/quantization}/__init__.py +0 -0
- /llama_stack/{providers/tests/inference → models/llama/llama3_2}/__init__.py +0 -0
- /llama_stack/{providers/tests/memory → models/llama/llama3_3}/__init__.py +0 -0
- /llama_stack/{providers/tests/safety → models/llama/llama4}/__init__.py +0 -0
- /llama_stack/{scripts → models/llama/llama4/prompt_templates}/__init__.py +0 -0
- /llama_stack/providers/{adapters → remote}/safety/bedrock/__init__.py +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/entry_points.txt +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info/licenses}/LICENSE +0 -0
- {llama_stack-0.0.42.dist-info → llama_stack-0.3.4.dist-info}/top_level.txt +0 -0
|
@@ -1,113 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
import logging
|
|
8
|
-
|
|
9
|
-
from typing import Any, Dict, List, Optional
|
|
10
|
-
|
|
11
|
-
import faiss
|
|
12
|
-
import numpy as np
|
|
13
|
-
from numpy.typing import NDArray
|
|
14
|
-
|
|
15
|
-
from llama_models.llama3.api.datatypes import * # noqa: F403
|
|
16
|
-
|
|
17
|
-
from llama_stack.apis.memory import * # noqa: F403
|
|
18
|
-
from llama_stack.providers.datatypes import MemoryBanksProtocolPrivate
|
|
19
|
-
|
|
20
|
-
from llama_stack.providers.utils.memory.vector_store import (
|
|
21
|
-
ALL_MINILM_L6_V2_DIMENSION,
|
|
22
|
-
BankWithIndex,
|
|
23
|
-
EmbeddingIndex,
|
|
24
|
-
)
|
|
25
|
-
from llama_stack.providers.utils.telemetry import tracing
|
|
26
|
-
|
|
27
|
-
from .config import FaissImplConfig
|
|
28
|
-
|
|
29
|
-
logger = logging.getLogger(__name__)
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
class FaissIndex(EmbeddingIndex):
|
|
33
|
-
id_by_index: Dict[int, str]
|
|
34
|
-
chunk_by_index: Dict[int, str]
|
|
35
|
-
|
|
36
|
-
def __init__(self, dimension: int):
|
|
37
|
-
self.index = faiss.IndexFlatL2(dimension)
|
|
38
|
-
self.id_by_index = {}
|
|
39
|
-
self.chunk_by_index = {}
|
|
40
|
-
|
|
41
|
-
@tracing.span(name="add_chunks")
|
|
42
|
-
async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray):
|
|
43
|
-
indexlen = len(self.id_by_index)
|
|
44
|
-
for i, chunk in enumerate(chunks):
|
|
45
|
-
self.chunk_by_index[indexlen + i] = chunk
|
|
46
|
-
self.id_by_index[indexlen + i] = chunk.document_id
|
|
47
|
-
|
|
48
|
-
self.index.add(np.array(embeddings).astype(np.float32))
|
|
49
|
-
|
|
50
|
-
async def query(self, embedding: NDArray, k: int) -> QueryDocumentsResponse:
|
|
51
|
-
distances, indices = self.index.search(
|
|
52
|
-
embedding.reshape(1, -1).astype(np.float32), k
|
|
53
|
-
)
|
|
54
|
-
|
|
55
|
-
chunks = []
|
|
56
|
-
scores = []
|
|
57
|
-
for d, i in zip(distances[0], indices[0]):
|
|
58
|
-
if i < 0:
|
|
59
|
-
continue
|
|
60
|
-
chunks.append(self.chunk_by_index[int(i)])
|
|
61
|
-
scores.append(1.0 / float(d))
|
|
62
|
-
|
|
63
|
-
return QueryDocumentsResponse(chunks=chunks, scores=scores)
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
class FaissMemoryImpl(Memory, MemoryBanksProtocolPrivate):
|
|
67
|
-
def __init__(self, config: FaissImplConfig) -> None:
|
|
68
|
-
self.config = config
|
|
69
|
-
self.cache = {}
|
|
70
|
-
|
|
71
|
-
async def initialize(self) -> None: ...
|
|
72
|
-
|
|
73
|
-
async def shutdown(self) -> None: ...
|
|
74
|
-
|
|
75
|
-
async def register_memory_bank(
|
|
76
|
-
self,
|
|
77
|
-
memory_bank: MemoryBankDef,
|
|
78
|
-
) -> None:
|
|
79
|
-
assert (
|
|
80
|
-
memory_bank.type == MemoryBankType.vector.value
|
|
81
|
-
), f"Only vector banks are supported {memory_bank.type}"
|
|
82
|
-
|
|
83
|
-
index = BankWithIndex(
|
|
84
|
-
bank=memory_bank, index=FaissIndex(ALL_MINILM_L6_V2_DIMENSION)
|
|
85
|
-
)
|
|
86
|
-
self.cache[memory_bank.identifier] = index
|
|
87
|
-
|
|
88
|
-
async def list_memory_banks(self) -> List[MemoryBankDef]:
|
|
89
|
-
return [i.bank for i in self.cache.values()]
|
|
90
|
-
|
|
91
|
-
async def insert_documents(
|
|
92
|
-
self,
|
|
93
|
-
bank_id: str,
|
|
94
|
-
documents: List[MemoryBankDocument],
|
|
95
|
-
ttl_seconds: Optional[int] = None,
|
|
96
|
-
) -> None:
|
|
97
|
-
index = self.cache.get(bank_id)
|
|
98
|
-
if index is None:
|
|
99
|
-
raise ValueError(f"Bank {bank_id} not found")
|
|
100
|
-
|
|
101
|
-
await index.insert_documents(documents)
|
|
102
|
-
|
|
103
|
-
async def query_documents(
|
|
104
|
-
self,
|
|
105
|
-
bank_id: str,
|
|
106
|
-
query: InterleavedTextMedia,
|
|
107
|
-
params: Optional[Dict[str, Any]] = None,
|
|
108
|
-
) -> QueryDocumentsResponse:
|
|
109
|
-
index = self.cache.get(bank_id)
|
|
110
|
-
if index is None:
|
|
111
|
-
raise ValueError(f"Bank {bank_id} not found")
|
|
112
|
-
|
|
113
|
-
return await index.query_documents(query, params)
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from .config import SafetyConfig
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
async def get_provider_impl(config: SafetyConfig, deps):
|
|
11
|
-
from .safety import MetaReferenceSafetyImpl
|
|
12
|
-
|
|
13
|
-
assert isinstance(config, SafetyConfig), f"Unexpected config type: {type(config)}"
|
|
14
|
-
|
|
15
|
-
impl = MetaReferenceSafetyImpl(config, deps)
|
|
16
|
-
await impl.initialize()
|
|
17
|
-
return impl
|
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from abc import ABC, abstractmethod
|
|
8
|
-
from typing import List
|
|
9
|
-
|
|
10
|
-
from llama_models.llama3.api.datatypes import interleaved_text_media_as_str, Message
|
|
11
|
-
from pydantic import BaseModel
|
|
12
|
-
from llama_stack.apis.safety import * # noqa: F403
|
|
13
|
-
|
|
14
|
-
CANNED_RESPONSE_TEXT = "I can't answer that. Can I help with something else?"
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
# TODO: clean this up; just remove this type completely
|
|
18
|
-
class ShieldResponse(BaseModel):
|
|
19
|
-
is_violation: bool
|
|
20
|
-
violation_type: Optional[str] = None
|
|
21
|
-
violation_return_message: Optional[str] = None
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
# TODO: this is a caller / agent concern
|
|
25
|
-
class OnViolationAction(Enum):
|
|
26
|
-
IGNORE = 0
|
|
27
|
-
WARN = 1
|
|
28
|
-
RAISE = 2
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
class ShieldBase(ABC):
|
|
32
|
-
def __init__(
|
|
33
|
-
self,
|
|
34
|
-
on_violation_action: OnViolationAction = OnViolationAction.RAISE,
|
|
35
|
-
):
|
|
36
|
-
self.on_violation_action = on_violation_action
|
|
37
|
-
|
|
38
|
-
@abstractmethod
|
|
39
|
-
async def run(self, messages: List[Message]) -> ShieldResponse:
|
|
40
|
-
raise NotImplementedError()
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def message_content_as_str(message: Message) -> str:
|
|
44
|
-
return interleaved_text_media_as_str(message.content)
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
class TextShield(ShieldBase):
|
|
48
|
-
def convert_messages_to_text(self, messages: List[Message]) -> str:
|
|
49
|
-
return "\n".join([message_content_as_str(m) for m in messages])
|
|
50
|
-
|
|
51
|
-
async def run(self, messages: List[Message]) -> ShieldResponse:
|
|
52
|
-
text = self.convert_messages_to_text(messages)
|
|
53
|
-
return await self.run_impl(text)
|
|
54
|
-
|
|
55
|
-
@abstractmethod
|
|
56
|
-
async def run_impl(self, text: str) -> ShieldResponse:
|
|
57
|
-
raise NotImplementedError()
|
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from enum import Enum
|
|
8
|
-
from typing import List, Optional
|
|
9
|
-
|
|
10
|
-
from llama_models.sku_list import CoreModelId, safety_models
|
|
11
|
-
|
|
12
|
-
from pydantic import BaseModel, field_validator
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class PromptGuardType(Enum):
|
|
16
|
-
injection = "injection"
|
|
17
|
-
jailbreak = "jailbreak"
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class LlamaGuardShieldConfig(BaseModel):
|
|
21
|
-
model: str = "Llama-Guard-3-1B"
|
|
22
|
-
excluded_categories: List[str] = []
|
|
23
|
-
|
|
24
|
-
@field_validator("model")
|
|
25
|
-
@classmethod
|
|
26
|
-
def validate_model(cls, model: str) -> str:
|
|
27
|
-
permitted_models = [
|
|
28
|
-
m.descriptor()
|
|
29
|
-
for m in safety_models()
|
|
30
|
-
if (
|
|
31
|
-
m.core_model_id
|
|
32
|
-
in {
|
|
33
|
-
CoreModelId.llama_guard_3_8b,
|
|
34
|
-
CoreModelId.llama_guard_3_1b,
|
|
35
|
-
CoreModelId.llama_guard_3_11b_vision,
|
|
36
|
-
}
|
|
37
|
-
)
|
|
38
|
-
]
|
|
39
|
-
if model not in permitted_models:
|
|
40
|
-
raise ValueError(
|
|
41
|
-
f"Invalid model: {model}. Must be one of {permitted_models}"
|
|
42
|
-
)
|
|
43
|
-
return model
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
class SafetyConfig(BaseModel):
|
|
47
|
-
llama_guard_shield: Optional[LlamaGuardShieldConfig] = None
|
|
48
|
-
enable_prompt_guard: Optional[bool] = False
|
|
@@ -1,268 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
import re
|
|
8
|
-
|
|
9
|
-
from string import Template
|
|
10
|
-
from typing import List, Optional
|
|
11
|
-
|
|
12
|
-
from llama_models.llama3.api.datatypes import * # noqa: F403
|
|
13
|
-
from llama_stack.apis.inference import * # noqa: F403
|
|
14
|
-
|
|
15
|
-
from .base import CANNED_RESPONSE_TEXT, OnViolationAction, ShieldBase, ShieldResponse
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
SAFE_RESPONSE = "safe"
|
|
19
|
-
_INSTANCE = None
|
|
20
|
-
|
|
21
|
-
CAT_VIOLENT_CRIMES = "Violent Crimes"
|
|
22
|
-
CAT_NON_VIOLENT_CRIMES = "Non-Violent Crimes"
|
|
23
|
-
CAT_SEX_CRIMES = "Sex Crimes"
|
|
24
|
-
CAT_CHILD_EXPLOITATION = "Child Exploitation"
|
|
25
|
-
CAT_DEFAMATION = "Defamation"
|
|
26
|
-
CAT_SPECIALIZED_ADVICE = "Specialized Advice"
|
|
27
|
-
CAT_PRIVACY = "Privacy"
|
|
28
|
-
CAT_INTELLECTUAL_PROPERTY = "Intellectual Property"
|
|
29
|
-
CAT_INDISCRIMINATE_WEAPONS = "Indiscriminate Weapons"
|
|
30
|
-
CAT_HATE = "Hate"
|
|
31
|
-
CAT_SELF_HARM = "Self-Harm"
|
|
32
|
-
CAT_SEXUAL_CONTENT = "Sexual Content"
|
|
33
|
-
CAT_ELECTIONS = "Elections"
|
|
34
|
-
CAT_CODE_INTERPRETER_ABUSE = "Code Interpreter Abuse"
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
SAFETY_CATEGORIES_TO_CODE_MAP = {
|
|
38
|
-
CAT_VIOLENT_CRIMES: "S1",
|
|
39
|
-
CAT_NON_VIOLENT_CRIMES: "S2",
|
|
40
|
-
CAT_SEX_CRIMES: "S3",
|
|
41
|
-
CAT_CHILD_EXPLOITATION: "S4",
|
|
42
|
-
CAT_DEFAMATION: "S5",
|
|
43
|
-
CAT_SPECIALIZED_ADVICE: "S6",
|
|
44
|
-
CAT_PRIVACY: "S7",
|
|
45
|
-
CAT_INTELLECTUAL_PROPERTY: "S8",
|
|
46
|
-
CAT_INDISCRIMINATE_WEAPONS: "S9",
|
|
47
|
-
CAT_HATE: "S10",
|
|
48
|
-
CAT_SELF_HARM: "S11",
|
|
49
|
-
CAT_SEXUAL_CONTENT: "S12",
|
|
50
|
-
CAT_ELECTIONS: "S13",
|
|
51
|
-
CAT_CODE_INTERPRETER_ABUSE: "S14",
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
DEFAULT_LG_V3_SAFETY_CATEGORIES = [
|
|
56
|
-
CAT_VIOLENT_CRIMES,
|
|
57
|
-
CAT_NON_VIOLENT_CRIMES,
|
|
58
|
-
CAT_SEX_CRIMES,
|
|
59
|
-
CAT_CHILD_EXPLOITATION,
|
|
60
|
-
CAT_DEFAMATION,
|
|
61
|
-
CAT_SPECIALIZED_ADVICE,
|
|
62
|
-
CAT_PRIVACY,
|
|
63
|
-
CAT_INTELLECTUAL_PROPERTY,
|
|
64
|
-
CAT_INDISCRIMINATE_WEAPONS,
|
|
65
|
-
CAT_HATE,
|
|
66
|
-
CAT_SELF_HARM,
|
|
67
|
-
CAT_SEXUAL_CONTENT,
|
|
68
|
-
CAT_ELECTIONS,
|
|
69
|
-
]
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
MODEL_TO_SAFETY_CATEGORIES_MAP = {
|
|
73
|
-
CoreModelId.llama_guard_3_8b.value: (
|
|
74
|
-
DEFAULT_LG_V3_SAFETY_CATEGORIES + [CAT_CODE_INTERPRETER_ABUSE]
|
|
75
|
-
),
|
|
76
|
-
CoreModelId.llama_guard_3_1b.value: DEFAULT_LG_V3_SAFETY_CATEGORIES,
|
|
77
|
-
CoreModelId.llama_guard_3_11b_vision.value: DEFAULT_LG_V3_SAFETY_CATEGORIES,
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
PROMPT_TASK = "Task: Check if there is unsafe content in '$agent_type' messages in conversations according our safety policy with the below categories."
|
|
82
|
-
|
|
83
|
-
SAFETY_CATEGORIES = """
|
|
84
|
-
|
|
85
|
-
<BEGIN UNSAFE CONTENT CATEGORIES>
|
|
86
|
-
$categories
|
|
87
|
-
<END UNSAFE CONTENT CATEGORIES>
|
|
88
|
-
"""
|
|
89
|
-
|
|
90
|
-
PROMPT_CONVERSATION = """
|
|
91
|
-
<BEGIN CONVERSATION>
|
|
92
|
-
|
|
93
|
-
$conversations
|
|
94
|
-
|
|
95
|
-
<END CONVERSATION>
|
|
96
|
-
"""
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
PROMPT_INSTRUCTIONS = """
|
|
100
|
-
Provide your safety assessment for ONLY THE LAST $agent_type message in the above conversation:
|
|
101
|
-
- First line must read 'safe' or 'unsafe'.
|
|
102
|
-
- If unsafe, a second line must include a comma-separated list of violated categories."""
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
PROMPT_TEMPLATE = Template(
|
|
106
|
-
f"{PROMPT_TASK}{SAFETY_CATEGORIES}{PROMPT_CONVERSATION}{PROMPT_INSTRUCTIONS}"
|
|
107
|
-
)
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
class LlamaGuardShield(ShieldBase):
|
|
111
|
-
def __init__(
|
|
112
|
-
self,
|
|
113
|
-
model: str,
|
|
114
|
-
inference_api: Inference,
|
|
115
|
-
excluded_categories: List[str] = None,
|
|
116
|
-
on_violation_action: OnViolationAction = OnViolationAction.RAISE,
|
|
117
|
-
):
|
|
118
|
-
super().__init__(on_violation_action)
|
|
119
|
-
|
|
120
|
-
if excluded_categories is None:
|
|
121
|
-
excluded_categories = []
|
|
122
|
-
|
|
123
|
-
assert len(excluded_categories) == 0 or all(
|
|
124
|
-
x in SAFETY_CATEGORIES_TO_CODE_MAP.values() for x in excluded_categories
|
|
125
|
-
), "Invalid categories in excluded categories. Expected format is ['S1', 'S2', ..]"
|
|
126
|
-
|
|
127
|
-
if model not in MODEL_TO_SAFETY_CATEGORIES_MAP:
|
|
128
|
-
raise ValueError(f"Unsupported model: {model}")
|
|
129
|
-
|
|
130
|
-
self.model = model
|
|
131
|
-
self.inference_api = inference_api
|
|
132
|
-
self.excluded_categories = excluded_categories
|
|
133
|
-
|
|
134
|
-
def check_unsafe_response(self, response: str) -> Optional[str]:
|
|
135
|
-
match = re.match(r"^unsafe\n(.*)$", response)
|
|
136
|
-
if match:
|
|
137
|
-
# extracts the unsafe code
|
|
138
|
-
extracted = match.group(1)
|
|
139
|
-
return extracted
|
|
140
|
-
|
|
141
|
-
return None
|
|
142
|
-
|
|
143
|
-
def get_safety_categories(self) -> List[str]:
|
|
144
|
-
excluded_categories = self.excluded_categories
|
|
145
|
-
if set(excluded_categories) == set(SAFETY_CATEGORIES_TO_CODE_MAP.values()):
|
|
146
|
-
excluded_categories = []
|
|
147
|
-
|
|
148
|
-
final_categories = []
|
|
149
|
-
|
|
150
|
-
all_categories = MODEL_TO_SAFETY_CATEGORIES_MAP[self.model]
|
|
151
|
-
for cat in all_categories:
|
|
152
|
-
cat_code = SAFETY_CATEGORIES_TO_CODE_MAP[cat]
|
|
153
|
-
if cat_code in excluded_categories:
|
|
154
|
-
continue
|
|
155
|
-
final_categories.append(f"{cat_code}: {cat}.")
|
|
156
|
-
|
|
157
|
-
return final_categories
|
|
158
|
-
|
|
159
|
-
def validate_messages(self, messages: List[Message]) -> None:
|
|
160
|
-
if len(messages) == 0:
|
|
161
|
-
raise ValueError("Messages must not be empty")
|
|
162
|
-
if messages[0].role != Role.user.value:
|
|
163
|
-
raise ValueError("Messages must start with user")
|
|
164
|
-
|
|
165
|
-
if len(messages) >= 2 and (
|
|
166
|
-
messages[0].role == Role.user.value and messages[1].role == Role.user.value
|
|
167
|
-
):
|
|
168
|
-
messages = messages[1:]
|
|
169
|
-
|
|
170
|
-
for i in range(1, len(messages)):
|
|
171
|
-
if messages[i].role == messages[i - 1].role:
|
|
172
|
-
raise ValueError(
|
|
173
|
-
f"Messages must alternate between user and assistant. Message {i} has the same role as message {i - 1}"
|
|
174
|
-
)
|
|
175
|
-
return messages
|
|
176
|
-
|
|
177
|
-
async def run(self, messages: List[Message]) -> ShieldResponse:
|
|
178
|
-
messages = self.validate_messages(messages)
|
|
179
|
-
|
|
180
|
-
if self.model == CoreModelId.llama_guard_3_11b_vision.value:
|
|
181
|
-
shield_input_message = self.build_vision_shield_input(messages)
|
|
182
|
-
else:
|
|
183
|
-
shield_input_message = self.build_text_shield_input(messages)
|
|
184
|
-
|
|
185
|
-
# TODO: llama-stack inference protocol has issues with non-streaming inference code
|
|
186
|
-
content = ""
|
|
187
|
-
async for chunk in self.inference_api.chat_completion(
|
|
188
|
-
model=self.model,
|
|
189
|
-
messages=[shield_input_message],
|
|
190
|
-
stream=True,
|
|
191
|
-
):
|
|
192
|
-
event = chunk.event
|
|
193
|
-
if event.event_type == ChatCompletionResponseEventType.progress:
|
|
194
|
-
assert isinstance(event.delta, str)
|
|
195
|
-
content += event.delta
|
|
196
|
-
|
|
197
|
-
content = content.strip()
|
|
198
|
-
shield_response = self.get_shield_response(content)
|
|
199
|
-
return shield_response
|
|
200
|
-
|
|
201
|
-
def build_text_shield_input(self, messages: List[Message]) -> UserMessage:
|
|
202
|
-
return UserMessage(content=self.build_prompt(messages))
|
|
203
|
-
|
|
204
|
-
def build_vision_shield_input(self, messages: List[Message]) -> UserMessage:
|
|
205
|
-
conversation = []
|
|
206
|
-
most_recent_img = None
|
|
207
|
-
|
|
208
|
-
for m in messages[::-1]:
|
|
209
|
-
if isinstance(m.content, str):
|
|
210
|
-
conversation.append(m)
|
|
211
|
-
elif isinstance(m.content, ImageMedia):
|
|
212
|
-
if most_recent_img is None and m.role == Role.user.value:
|
|
213
|
-
most_recent_img = m.content
|
|
214
|
-
conversation.append(m)
|
|
215
|
-
elif isinstance(m.content, list):
|
|
216
|
-
content = []
|
|
217
|
-
for c in m.content:
|
|
218
|
-
if isinstance(c, str):
|
|
219
|
-
content.append(c)
|
|
220
|
-
elif isinstance(c, ImageMedia):
|
|
221
|
-
if most_recent_img is None and m.role == Role.user.value:
|
|
222
|
-
most_recent_img = c
|
|
223
|
-
content.append(c)
|
|
224
|
-
else:
|
|
225
|
-
raise ValueError(f"Unknown content type: {c}")
|
|
226
|
-
|
|
227
|
-
conversation.append(UserMessage(content=content))
|
|
228
|
-
else:
|
|
229
|
-
raise ValueError(f"Unknown content type: {m.content}")
|
|
230
|
-
|
|
231
|
-
prompt = []
|
|
232
|
-
if most_recent_img is not None:
|
|
233
|
-
prompt.append(most_recent_img)
|
|
234
|
-
prompt.append(self.build_prompt(conversation[::-1]))
|
|
235
|
-
|
|
236
|
-
return UserMessage(content=prompt)
|
|
237
|
-
|
|
238
|
-
def build_prompt(self, messages: List[Message]) -> str:
|
|
239
|
-
categories = self.get_safety_categories()
|
|
240
|
-
categories_str = "\n".join(categories)
|
|
241
|
-
conversations_str = "\n\n".join(
|
|
242
|
-
[
|
|
243
|
-
f"{m.role.capitalize()}: {interleaved_text_media_as_str(m.content)}"
|
|
244
|
-
for m in messages
|
|
245
|
-
]
|
|
246
|
-
)
|
|
247
|
-
return PROMPT_TEMPLATE.substitute(
|
|
248
|
-
agent_type=messages[-1].role.capitalize(),
|
|
249
|
-
categories=categories_str,
|
|
250
|
-
conversations=conversations_str,
|
|
251
|
-
)
|
|
252
|
-
|
|
253
|
-
def get_shield_response(self, response: str) -> ShieldResponse:
|
|
254
|
-
response = response.strip()
|
|
255
|
-
if response == SAFE_RESPONSE:
|
|
256
|
-
return ShieldResponse(is_violation=False)
|
|
257
|
-
unsafe_code = self.check_unsafe_response(response)
|
|
258
|
-
if unsafe_code:
|
|
259
|
-
unsafe_code_list = unsafe_code.split(",")
|
|
260
|
-
if set(unsafe_code_list).issubset(set(self.excluded_categories)):
|
|
261
|
-
return ShieldResponse(is_violation=False)
|
|
262
|
-
return ShieldResponse(
|
|
263
|
-
is_violation=True,
|
|
264
|
-
violation_type=unsafe_code,
|
|
265
|
-
violation_return_message=CANNED_RESPONSE_TEXT,
|
|
266
|
-
)
|
|
267
|
-
|
|
268
|
-
raise ValueError(f"Unexpected response: {response}")
|
|
@@ -1,145 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from enum import auto, Enum
|
|
8
|
-
from typing import List
|
|
9
|
-
|
|
10
|
-
import torch
|
|
11
|
-
|
|
12
|
-
from llama_models.llama3.api.datatypes import Message
|
|
13
|
-
from termcolor import cprint
|
|
14
|
-
|
|
15
|
-
from .base import message_content_as_str, OnViolationAction, ShieldResponse, TextShield
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class PromptGuardShield(TextShield):
|
|
19
|
-
class Mode(Enum):
|
|
20
|
-
INJECTION = auto()
|
|
21
|
-
JAILBREAK = auto()
|
|
22
|
-
|
|
23
|
-
_instances = {}
|
|
24
|
-
_model_cache = None
|
|
25
|
-
|
|
26
|
-
@staticmethod
|
|
27
|
-
def instance(
|
|
28
|
-
model_dir: str,
|
|
29
|
-
threshold: float = 0.9,
|
|
30
|
-
temperature: float = 1.0,
|
|
31
|
-
mode: "PromptGuardShield.Mode" = Mode.JAILBREAK,
|
|
32
|
-
on_violation_action=OnViolationAction.RAISE,
|
|
33
|
-
) -> "PromptGuardShield":
|
|
34
|
-
action_value = on_violation_action.value
|
|
35
|
-
key = (model_dir, threshold, temperature, mode, action_value)
|
|
36
|
-
if key not in PromptGuardShield._instances:
|
|
37
|
-
PromptGuardShield._instances[key] = PromptGuardShield(
|
|
38
|
-
model_dir=model_dir,
|
|
39
|
-
threshold=threshold,
|
|
40
|
-
temperature=temperature,
|
|
41
|
-
mode=mode,
|
|
42
|
-
on_violation_action=on_violation_action,
|
|
43
|
-
)
|
|
44
|
-
return PromptGuardShield._instances[key]
|
|
45
|
-
|
|
46
|
-
def __init__(
|
|
47
|
-
self,
|
|
48
|
-
model_dir: str,
|
|
49
|
-
threshold: float = 0.9,
|
|
50
|
-
temperature: float = 1.0,
|
|
51
|
-
mode: "PromptGuardShield.Mode" = Mode.JAILBREAK,
|
|
52
|
-
on_violation_action: OnViolationAction = OnViolationAction.RAISE,
|
|
53
|
-
):
|
|
54
|
-
super().__init__(on_violation_action)
|
|
55
|
-
assert (
|
|
56
|
-
model_dir is not None
|
|
57
|
-
), "Must provide a model directory for prompt injection shield"
|
|
58
|
-
if temperature <= 0:
|
|
59
|
-
raise ValueError("Temperature must be greater than 0")
|
|
60
|
-
self.device = "cuda"
|
|
61
|
-
if PromptGuardShield._model_cache is None:
|
|
62
|
-
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
63
|
-
|
|
64
|
-
# load model and tokenizer
|
|
65
|
-
tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
|
66
|
-
model = AutoModelForSequenceClassification.from_pretrained(
|
|
67
|
-
model_dir, device_map=self.device
|
|
68
|
-
)
|
|
69
|
-
PromptGuardShield._model_cache = (tokenizer, model)
|
|
70
|
-
|
|
71
|
-
self.tokenizer, self.model = PromptGuardShield._model_cache
|
|
72
|
-
self.temperature = temperature
|
|
73
|
-
self.threshold = threshold
|
|
74
|
-
self.mode = mode
|
|
75
|
-
|
|
76
|
-
def convert_messages_to_text(self, messages: List[Message]) -> str:
|
|
77
|
-
return message_content_as_str(messages[-1])
|
|
78
|
-
|
|
79
|
-
async def run_impl(self, text: str) -> ShieldResponse:
|
|
80
|
-
# run model on messages and return response
|
|
81
|
-
inputs = self.tokenizer(text, return_tensors="pt")
|
|
82
|
-
inputs = {name: tensor.to(self.model.device) for name, tensor in inputs.items()}
|
|
83
|
-
with torch.no_grad():
|
|
84
|
-
outputs = self.model(**inputs)
|
|
85
|
-
logits = outputs[0]
|
|
86
|
-
probabilities = torch.softmax(logits / self.temperature, dim=-1)
|
|
87
|
-
score_embedded = probabilities[0, 1].item()
|
|
88
|
-
score_malicious = probabilities[0, 2].item()
|
|
89
|
-
cprint(
|
|
90
|
-
f"Ran PromptGuardShield and got Scores: Embedded: {score_embedded}, Malicious: {score_malicious}",
|
|
91
|
-
color="magenta",
|
|
92
|
-
)
|
|
93
|
-
|
|
94
|
-
if self.mode == self.Mode.INJECTION and (
|
|
95
|
-
score_embedded + score_malicious > self.threshold
|
|
96
|
-
):
|
|
97
|
-
return ShieldResponse(
|
|
98
|
-
is_violation=True,
|
|
99
|
-
violation_type=f"prompt_injection:embedded={score_embedded},malicious={score_malicious}",
|
|
100
|
-
violation_return_message="Sorry, I cannot do this.",
|
|
101
|
-
)
|
|
102
|
-
elif self.mode == self.Mode.JAILBREAK and score_malicious > self.threshold:
|
|
103
|
-
return ShieldResponse(
|
|
104
|
-
is_violation=True,
|
|
105
|
-
violation_type=f"prompt_injection:malicious={score_malicious}",
|
|
106
|
-
violation_return_message="Sorry, I cannot do this.",
|
|
107
|
-
)
|
|
108
|
-
|
|
109
|
-
return ShieldResponse(
|
|
110
|
-
is_violation=False,
|
|
111
|
-
)
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
class JailbreakShield(PromptGuardShield):
|
|
115
|
-
def __init__(
|
|
116
|
-
self,
|
|
117
|
-
model_dir: str,
|
|
118
|
-
threshold: float = 0.9,
|
|
119
|
-
temperature: float = 1.0,
|
|
120
|
-
on_violation_action: OnViolationAction = OnViolationAction.RAISE,
|
|
121
|
-
):
|
|
122
|
-
super().__init__(
|
|
123
|
-
model_dir=model_dir,
|
|
124
|
-
threshold=threshold,
|
|
125
|
-
temperature=temperature,
|
|
126
|
-
mode=PromptGuardShield.Mode.JAILBREAK,
|
|
127
|
-
on_violation_action=on_violation_action,
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
class InjectionShield(PromptGuardShield):
|
|
132
|
-
def __init__(
|
|
133
|
-
self,
|
|
134
|
-
model_dir: str,
|
|
135
|
-
threshold: float = 0.9,
|
|
136
|
-
temperature: float = 1.0,
|
|
137
|
-
on_violation_action: OnViolationAction = OnViolationAction.RAISE,
|
|
138
|
-
):
|
|
139
|
-
super().__init__(
|
|
140
|
-
model_dir=model_dir,
|
|
141
|
-
threshold=threshold,
|
|
142
|
-
temperature=temperature,
|
|
143
|
-
mode=PromptGuardShield.Mode.INJECTION,
|
|
144
|
-
on_violation_action=on_violation_action,
|
|
145
|
-
)
|