llama-stack 0.3.4__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack/__init__.py +0 -5
- llama_stack/cli/llama.py +3 -3
- llama_stack/cli/stack/_list_deps.py +12 -23
- llama_stack/cli/stack/list_stacks.py +37 -18
- llama_stack/cli/stack/run.py +121 -11
- llama_stack/cli/stack/utils.py +0 -127
- llama_stack/core/access_control/access_control.py +69 -28
- llama_stack/core/access_control/conditions.py +15 -5
- llama_stack/core/admin.py +267 -0
- llama_stack/core/build.py +6 -74
- llama_stack/core/client.py +1 -1
- llama_stack/core/configure.py +6 -6
- llama_stack/core/conversations/conversations.py +28 -25
- llama_stack/core/datatypes.py +271 -79
- llama_stack/core/distribution.py +15 -16
- llama_stack/core/external.py +3 -3
- llama_stack/core/inspect.py +98 -15
- llama_stack/core/library_client.py +73 -61
- llama_stack/core/prompts/prompts.py +12 -11
- llama_stack/core/providers.py +17 -11
- llama_stack/core/resolver.py +65 -56
- llama_stack/core/routers/__init__.py +8 -12
- llama_stack/core/routers/datasets.py +1 -4
- llama_stack/core/routers/eval_scoring.py +7 -4
- llama_stack/core/routers/inference.py +55 -271
- llama_stack/core/routers/safety.py +52 -24
- llama_stack/core/routers/tool_runtime.py +6 -48
- llama_stack/core/routers/vector_io.py +130 -51
- llama_stack/core/routing_tables/benchmarks.py +24 -20
- llama_stack/core/routing_tables/common.py +1 -4
- llama_stack/core/routing_tables/datasets.py +22 -22
- llama_stack/core/routing_tables/models.py +119 -6
- llama_stack/core/routing_tables/scoring_functions.py +7 -7
- llama_stack/core/routing_tables/shields.py +1 -2
- llama_stack/core/routing_tables/toolgroups.py +17 -7
- llama_stack/core/routing_tables/vector_stores.py +51 -16
- llama_stack/core/server/auth.py +5 -3
- llama_stack/core/server/auth_providers.py +36 -20
- llama_stack/core/server/fastapi_router_registry.py +84 -0
- llama_stack/core/server/quota.py +2 -2
- llama_stack/core/server/routes.py +79 -27
- llama_stack/core/server/server.py +102 -87
- llama_stack/core/stack.py +201 -58
- llama_stack/core/storage/datatypes.py +26 -3
- llama_stack/{providers/utils → core/storage}/kvstore/__init__.py +2 -0
- llama_stack/{providers/utils → core/storage}/kvstore/kvstore.py +55 -24
- llama_stack/{providers/utils → core/storage}/kvstore/mongodb/mongodb.py +13 -10
- llama_stack/{providers/utils → core/storage}/kvstore/postgres/postgres.py +28 -17
- llama_stack/{providers/utils → core/storage}/kvstore/redis/redis.py +41 -16
- llama_stack/{providers/utils → core/storage}/kvstore/sqlite/sqlite.py +1 -1
- llama_stack/core/storage/sqlstore/__init__.py +17 -0
- llama_stack/{providers/utils → core/storage}/sqlstore/authorized_sqlstore.py +69 -49
- llama_stack/{providers/utils → core/storage}/sqlstore/sqlalchemy_sqlstore.py +47 -17
- llama_stack/{providers/utils → core/storage}/sqlstore/sqlstore.py +25 -8
- llama_stack/core/store/registry.py +1 -1
- llama_stack/core/utils/config.py +8 -2
- llama_stack/core/utils/config_resolution.py +32 -29
- llama_stack/core/utils/context.py +4 -10
- llama_stack/core/utils/exec.py +9 -0
- llama_stack/core/utils/type_inspection.py +45 -0
- llama_stack/distributions/dell/{run.yaml → config.yaml} +3 -2
- llama_stack/distributions/dell/dell.py +2 -2
- llama_stack/distributions/dell/run-with-safety.yaml +3 -2
- llama_stack/distributions/meta-reference-gpu/{run.yaml → config.yaml} +3 -2
- llama_stack/distributions/meta-reference-gpu/meta_reference.py +2 -2
- llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +3 -2
- llama_stack/distributions/nvidia/{run.yaml → config.yaml} +4 -4
- llama_stack/distributions/nvidia/nvidia.py +1 -1
- llama_stack/distributions/nvidia/run-with-safety.yaml +4 -4
- llama_stack/{apis/datasetio → distributions/oci}/__init__.py +1 -1
- llama_stack/distributions/oci/config.yaml +134 -0
- llama_stack/distributions/oci/oci.py +108 -0
- llama_stack/distributions/open-benchmark/{run.yaml → config.yaml} +5 -4
- llama_stack/distributions/open-benchmark/open_benchmark.py +2 -3
- llama_stack/distributions/postgres-demo/{run.yaml → config.yaml} +4 -3
- llama_stack/distributions/starter/{run.yaml → config.yaml} +64 -13
- llama_stack/distributions/starter/run-with-postgres-store.yaml +64 -13
- llama_stack/distributions/starter/starter.py +8 -5
- llama_stack/distributions/starter-gpu/{run.yaml → config.yaml} +64 -13
- llama_stack/distributions/starter-gpu/run-with-postgres-store.yaml +64 -13
- llama_stack/distributions/template.py +13 -69
- llama_stack/distributions/watsonx/{run.yaml → config.yaml} +4 -3
- llama_stack/distributions/watsonx/watsonx.py +1 -1
- llama_stack/log.py +28 -11
- llama_stack/models/llama/checkpoint.py +6 -6
- llama_stack/models/llama/hadamard_utils.py +2 -0
- llama_stack/models/llama/llama3/generation.py +3 -1
- llama_stack/models/llama/llama3/interface.py +2 -5
- llama_stack/models/llama/llama3/multimodal/encoder_utils.py +3 -3
- llama_stack/models/llama/llama3/multimodal/image_transform.py +6 -6
- llama_stack/models/llama/llama3/prompt_templates/system_prompts.py +1 -1
- llama_stack/models/llama/llama3/tool_utils.py +2 -1
- llama_stack/models/llama/llama4/prompt_templates/system_prompts.py +1 -1
- llama_stack/providers/inline/agents/meta_reference/__init__.py +3 -3
- llama_stack/providers/inline/agents/meta_reference/agents.py +44 -261
- llama_stack/providers/inline/agents/meta_reference/config.py +6 -1
- llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +207 -57
- llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +308 -47
- llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +162 -96
- llama_stack/providers/inline/agents/meta_reference/responses/types.py +23 -8
- llama_stack/providers/inline/agents/meta_reference/responses/utils.py +201 -33
- llama_stack/providers/inline/agents/meta_reference/safety.py +8 -13
- llama_stack/providers/inline/batches/reference/__init__.py +2 -4
- llama_stack/providers/inline/batches/reference/batches.py +78 -60
- llama_stack/providers/inline/datasetio/localfs/datasetio.py +2 -5
- llama_stack/providers/inline/eval/meta_reference/eval.py +16 -61
- llama_stack/providers/inline/files/localfs/files.py +37 -28
- llama_stack/providers/inline/inference/meta_reference/config.py +2 -2
- llama_stack/providers/inline/inference/meta_reference/generators.py +50 -60
- llama_stack/providers/inline/inference/meta_reference/inference.py +403 -19
- llama_stack/providers/inline/inference/meta_reference/model_parallel.py +7 -26
- llama_stack/providers/inline/inference/meta_reference/parallel_utils.py +2 -12
- llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +10 -15
- llama_stack/providers/inline/post_training/common/validator.py +1 -5
- llama_stack/providers/inline/post_training/huggingface/post_training.py +8 -8
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +18 -10
- llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device_dpo.py +12 -9
- llama_stack/providers/inline/post_training/huggingface/utils.py +27 -6
- llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py +1 -1
- llama_stack/providers/inline/post_training/torchtune/common/utils.py +1 -1
- llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py +1 -1
- llama_stack/providers/inline/post_training/torchtune/post_training.py +8 -8
- llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +16 -16
- llama_stack/providers/inline/safety/code_scanner/code_scanner.py +13 -9
- llama_stack/providers/inline/safety/llama_guard/llama_guard.py +18 -15
- llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py +9 -9
- llama_stack/providers/inline/scoring/basic/scoring.py +6 -13
- llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py +1 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py +1 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/docvqa.py +2 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/equality.py +2 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/ifeval.py +2 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_math_response.py +2 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py +2 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/subset_of.py +2 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py +1 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py +1 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py +1 -2
- llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py +1 -2
- llama_stack/providers/inline/scoring/braintrust/braintrust.py +12 -15
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_correctness.py +2 -2
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_relevancy.py +2 -2
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/answer_similarity.py +2 -2
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_entity_recall.py +2 -2
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_precision.py +2 -2
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_recall.py +2 -2
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/context_relevancy.py +2 -2
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/factuality.py +2 -2
- llama_stack/providers/inline/scoring/braintrust/scoring_fn/fn_defs/faithfulness.py +2 -2
- llama_stack/providers/inline/scoring/llm_as_judge/scoring.py +7 -14
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +2 -2
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +1 -2
- llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +1 -3
- llama_stack/providers/inline/tool_runtime/rag/__init__.py +1 -1
- llama_stack/providers/inline/tool_runtime/rag/config.py +8 -1
- llama_stack/providers/inline/tool_runtime/rag/context_retriever.py +7 -6
- llama_stack/providers/inline/tool_runtime/rag/memory.py +64 -48
- llama_stack/providers/inline/vector_io/chroma/__init__.py +1 -1
- llama_stack/providers/inline/vector_io/chroma/config.py +1 -1
- llama_stack/providers/inline/vector_io/faiss/__init__.py +1 -1
- llama_stack/providers/inline/vector_io/faiss/config.py +1 -1
- llama_stack/providers/inline/vector_io/faiss/faiss.py +43 -28
- llama_stack/providers/inline/vector_io/milvus/__init__.py +1 -1
- llama_stack/providers/inline/vector_io/milvus/config.py +1 -1
- llama_stack/providers/inline/vector_io/qdrant/__init__.py +1 -1
- llama_stack/providers/inline/vector_io/qdrant/config.py +1 -1
- llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py +1 -1
- llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +40 -33
- llama_stack/providers/registry/agents.py +7 -3
- llama_stack/providers/registry/batches.py +1 -1
- llama_stack/providers/registry/datasetio.py +1 -1
- llama_stack/providers/registry/eval.py +1 -1
- llama_stack/{apis/datasets/__init__.py → providers/registry/file_processors.py} +5 -1
- llama_stack/providers/registry/files.py +11 -2
- llama_stack/providers/registry/inference.py +22 -3
- llama_stack/providers/registry/post_training.py +1 -1
- llama_stack/providers/registry/safety.py +1 -1
- llama_stack/providers/registry/scoring.py +1 -1
- llama_stack/providers/registry/tool_runtime.py +2 -2
- llama_stack/providers/registry/vector_io.py +7 -7
- llama_stack/providers/remote/datasetio/huggingface/huggingface.py +2 -5
- llama_stack/providers/remote/datasetio/nvidia/datasetio.py +1 -4
- llama_stack/providers/remote/eval/nvidia/eval.py +15 -9
- llama_stack/providers/remote/files/openai/__init__.py +19 -0
- llama_stack/providers/remote/files/openai/config.py +28 -0
- llama_stack/providers/remote/files/openai/files.py +253 -0
- llama_stack/providers/remote/files/s3/files.py +52 -30
- llama_stack/providers/remote/inference/anthropic/anthropic.py +2 -1
- llama_stack/providers/remote/inference/anthropic/config.py +1 -1
- llama_stack/providers/remote/inference/azure/azure.py +1 -3
- llama_stack/providers/remote/inference/azure/config.py +8 -7
- llama_stack/providers/remote/inference/bedrock/__init__.py +1 -1
- llama_stack/providers/remote/inference/bedrock/bedrock.py +82 -105
- llama_stack/providers/remote/inference/bedrock/config.py +24 -3
- llama_stack/providers/remote/inference/cerebras/cerebras.py +5 -5
- llama_stack/providers/remote/inference/cerebras/config.py +12 -5
- llama_stack/providers/remote/inference/databricks/config.py +13 -6
- llama_stack/providers/remote/inference/databricks/databricks.py +16 -6
- llama_stack/providers/remote/inference/fireworks/config.py +5 -5
- llama_stack/providers/remote/inference/fireworks/fireworks.py +1 -1
- llama_stack/providers/remote/inference/gemini/config.py +1 -1
- llama_stack/providers/remote/inference/gemini/gemini.py +13 -14
- llama_stack/providers/remote/inference/groq/config.py +5 -5
- llama_stack/providers/remote/inference/groq/groq.py +1 -1
- llama_stack/providers/remote/inference/llama_openai_compat/config.py +5 -5
- llama_stack/providers/remote/inference/llama_openai_compat/llama.py +8 -6
- llama_stack/providers/remote/inference/nvidia/__init__.py +1 -1
- llama_stack/providers/remote/inference/nvidia/config.py +21 -11
- llama_stack/providers/remote/inference/nvidia/nvidia.py +115 -3
- llama_stack/providers/remote/inference/nvidia/utils.py +1 -1
- llama_stack/providers/remote/inference/oci/__init__.py +17 -0
- llama_stack/providers/remote/inference/oci/auth.py +79 -0
- llama_stack/providers/remote/inference/oci/config.py +75 -0
- llama_stack/providers/remote/inference/oci/oci.py +162 -0
- llama_stack/providers/remote/inference/ollama/config.py +7 -5
- llama_stack/providers/remote/inference/ollama/ollama.py +17 -8
- llama_stack/providers/remote/inference/openai/config.py +4 -4
- llama_stack/providers/remote/inference/openai/openai.py +1 -1
- llama_stack/providers/remote/inference/passthrough/__init__.py +2 -2
- llama_stack/providers/remote/inference/passthrough/config.py +5 -10
- llama_stack/providers/remote/inference/passthrough/passthrough.py +97 -75
- llama_stack/providers/remote/inference/runpod/config.py +12 -5
- llama_stack/providers/remote/inference/runpod/runpod.py +2 -20
- llama_stack/providers/remote/inference/sambanova/config.py +5 -5
- llama_stack/providers/remote/inference/sambanova/sambanova.py +1 -1
- llama_stack/providers/remote/inference/tgi/config.py +7 -6
- llama_stack/providers/remote/inference/tgi/tgi.py +19 -11
- llama_stack/providers/remote/inference/together/config.py +5 -5
- llama_stack/providers/remote/inference/together/together.py +15 -12
- llama_stack/providers/remote/inference/vertexai/config.py +1 -1
- llama_stack/providers/remote/inference/vllm/config.py +5 -5
- llama_stack/providers/remote/inference/vllm/vllm.py +13 -14
- llama_stack/providers/remote/inference/watsonx/config.py +4 -4
- llama_stack/providers/remote/inference/watsonx/watsonx.py +21 -94
- llama_stack/providers/remote/post_training/nvidia/post_training.py +4 -4
- llama_stack/providers/remote/post_training/nvidia/utils.py +1 -1
- llama_stack/providers/remote/safety/bedrock/bedrock.py +6 -6
- llama_stack/providers/remote/safety/bedrock/config.py +1 -1
- llama_stack/providers/remote/safety/nvidia/config.py +1 -1
- llama_stack/providers/remote/safety/nvidia/nvidia.py +11 -5
- llama_stack/providers/remote/safety/sambanova/config.py +1 -1
- llama_stack/providers/remote/safety/sambanova/sambanova.py +6 -6
- llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py +11 -6
- llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py +12 -7
- llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py +8 -2
- llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +57 -15
- llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py +11 -6
- llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py +11 -6
- llama_stack/providers/remote/vector_io/chroma/__init__.py +1 -1
- llama_stack/providers/remote/vector_io/chroma/chroma.py +125 -20
- llama_stack/providers/remote/vector_io/chroma/config.py +1 -1
- llama_stack/providers/remote/vector_io/milvus/__init__.py +1 -1
- llama_stack/providers/remote/vector_io/milvus/config.py +1 -1
- llama_stack/providers/remote/vector_io/milvus/milvus.py +27 -21
- llama_stack/providers/remote/vector_io/pgvector/__init__.py +1 -1
- llama_stack/providers/remote/vector_io/pgvector/config.py +1 -1
- llama_stack/providers/remote/vector_io/pgvector/pgvector.py +26 -18
- llama_stack/providers/remote/vector_io/qdrant/__init__.py +1 -1
- llama_stack/providers/remote/vector_io/qdrant/config.py +1 -1
- llama_stack/providers/remote/vector_io/qdrant/qdrant.py +141 -24
- llama_stack/providers/remote/vector_io/weaviate/__init__.py +1 -1
- llama_stack/providers/remote/vector_io/weaviate/config.py +1 -1
- llama_stack/providers/remote/vector_io/weaviate/weaviate.py +26 -21
- llama_stack/providers/utils/common/data_schema_validator.py +1 -5
- llama_stack/providers/utils/files/form_data.py +1 -1
- llama_stack/providers/utils/inference/embedding_mixin.py +1 -1
- llama_stack/providers/utils/inference/inference_store.py +12 -21
- llama_stack/providers/utils/inference/litellm_openai_mixin.py +79 -79
- llama_stack/providers/utils/inference/model_registry.py +1 -3
- llama_stack/providers/utils/inference/openai_compat.py +44 -1171
- llama_stack/providers/utils/inference/openai_mixin.py +68 -42
- llama_stack/providers/utils/inference/prompt_adapter.py +50 -265
- llama_stack/providers/utils/inference/stream_utils.py +23 -0
- llama_stack/providers/utils/memory/__init__.py +2 -0
- llama_stack/providers/utils/memory/file_utils.py +1 -1
- llama_stack/providers/utils/memory/openai_vector_store_mixin.py +181 -84
- llama_stack/providers/utils/memory/vector_store.py +39 -38
- llama_stack/providers/utils/pagination.py +1 -1
- llama_stack/providers/utils/responses/responses_store.py +15 -25
- llama_stack/providers/utils/scoring/aggregation_utils.py +1 -2
- llama_stack/providers/utils/scoring/base_scoring_fn.py +1 -2
- llama_stack/providers/utils/tools/mcp.py +93 -11
- llama_stack/telemetry/constants.py +27 -0
- llama_stack/telemetry/helpers.py +43 -0
- llama_stack/testing/api_recorder.py +25 -16
- {llama_stack-0.3.4.dist-info → llama_stack-0.4.0.dist-info}/METADATA +56 -131
- llama_stack-0.4.0.dist-info/RECORD +588 -0
- llama_stack-0.4.0.dist-info/top_level.txt +2 -0
- llama_stack_api/__init__.py +945 -0
- llama_stack_api/admin/__init__.py +45 -0
- llama_stack_api/admin/api.py +72 -0
- llama_stack_api/admin/fastapi_routes.py +117 -0
- llama_stack_api/admin/models.py +113 -0
- llama_stack_api/agents.py +173 -0
- llama_stack_api/batches/__init__.py +40 -0
- llama_stack_api/batches/api.py +53 -0
- llama_stack_api/batches/fastapi_routes.py +113 -0
- llama_stack_api/batches/models.py +78 -0
- llama_stack_api/benchmarks/__init__.py +43 -0
- llama_stack_api/benchmarks/api.py +39 -0
- llama_stack_api/benchmarks/fastapi_routes.py +109 -0
- llama_stack_api/benchmarks/models.py +109 -0
- {llama_stack/apis → llama_stack_api}/common/content_types.py +1 -43
- {llama_stack/apis → llama_stack_api}/common/errors.py +0 -8
- {llama_stack/apis → llama_stack_api}/common/job_types.py +1 -1
- llama_stack_api/common/responses.py +77 -0
- {llama_stack/apis → llama_stack_api}/common/training_types.py +1 -1
- {llama_stack/apis → llama_stack_api}/common/type_system.py +2 -14
- llama_stack_api/connectors.py +146 -0
- {llama_stack/apis/conversations → llama_stack_api}/conversations.py +23 -39
- {llama_stack/apis/datasetio → llama_stack_api}/datasetio.py +4 -8
- llama_stack_api/datasets/__init__.py +61 -0
- llama_stack_api/datasets/api.py +35 -0
- llama_stack_api/datasets/fastapi_routes.py +104 -0
- llama_stack_api/datasets/models.py +152 -0
- {llama_stack/providers → llama_stack_api}/datatypes.py +166 -10
- {llama_stack/apis/eval → llama_stack_api}/eval.py +8 -40
- llama_stack_api/file_processors/__init__.py +27 -0
- llama_stack_api/file_processors/api.py +64 -0
- llama_stack_api/file_processors/fastapi_routes.py +78 -0
- llama_stack_api/file_processors/models.py +42 -0
- llama_stack_api/files/__init__.py +35 -0
- llama_stack_api/files/api.py +51 -0
- llama_stack_api/files/fastapi_routes.py +124 -0
- llama_stack_api/files/models.py +107 -0
- {llama_stack/apis/inference → llama_stack_api}/inference.py +90 -194
- llama_stack_api/inspect_api/__init__.py +37 -0
- llama_stack_api/inspect_api/api.py +25 -0
- llama_stack_api/inspect_api/fastapi_routes.py +76 -0
- llama_stack_api/inspect_api/models.py +28 -0
- {llama_stack/apis/agents → llama_stack_api/internal}/__init__.py +3 -1
- llama_stack/providers/utils/kvstore/api.py → llama_stack_api/internal/kvstore.py +5 -0
- llama_stack_api/internal/sqlstore.py +79 -0
- {llama_stack/apis/models → llama_stack_api}/models.py +11 -9
- {llama_stack/apis/agents → llama_stack_api}/openai_responses.py +184 -27
- {llama_stack/apis/post_training → llama_stack_api}/post_training.py +7 -11
- {llama_stack/apis/prompts → llama_stack_api}/prompts.py +3 -4
- llama_stack_api/providers/__init__.py +33 -0
- llama_stack_api/providers/api.py +16 -0
- llama_stack_api/providers/fastapi_routes.py +57 -0
- llama_stack_api/providers/models.py +24 -0
- {llama_stack/apis/tools → llama_stack_api}/rag_tool.py +2 -52
- {llama_stack/apis → llama_stack_api}/resource.py +1 -1
- llama_stack_api/router_utils.py +160 -0
- {llama_stack/apis/safety → llama_stack_api}/safety.py +6 -9
- {llama_stack → llama_stack_api}/schema_utils.py +94 -4
- {llama_stack/apis/scoring → llama_stack_api}/scoring.py +3 -3
- {llama_stack/apis/scoring_functions → llama_stack_api}/scoring_functions.py +9 -6
- {llama_stack/apis/shields → llama_stack_api}/shields.py +6 -7
- {llama_stack/apis/tools → llama_stack_api}/tools.py +26 -21
- {llama_stack/apis/vector_io → llama_stack_api}/vector_io.py +133 -152
- {llama_stack/apis/vector_stores → llama_stack_api}/vector_stores.py +1 -1
- llama_stack/apis/agents/agents.py +0 -894
- llama_stack/apis/batches/__init__.py +0 -9
- llama_stack/apis/batches/batches.py +0 -100
- llama_stack/apis/benchmarks/__init__.py +0 -7
- llama_stack/apis/benchmarks/benchmarks.py +0 -108
- llama_stack/apis/common/responses.py +0 -36
- llama_stack/apis/conversations/__init__.py +0 -31
- llama_stack/apis/datasets/datasets.py +0 -251
- llama_stack/apis/datatypes.py +0 -160
- llama_stack/apis/eval/__init__.py +0 -7
- llama_stack/apis/files/__init__.py +0 -7
- llama_stack/apis/files/files.py +0 -199
- llama_stack/apis/inference/__init__.py +0 -7
- llama_stack/apis/inference/event_logger.py +0 -43
- llama_stack/apis/inspect/__init__.py +0 -7
- llama_stack/apis/inspect/inspect.py +0 -94
- llama_stack/apis/models/__init__.py +0 -7
- llama_stack/apis/post_training/__init__.py +0 -7
- llama_stack/apis/prompts/__init__.py +0 -9
- llama_stack/apis/providers/__init__.py +0 -7
- llama_stack/apis/providers/providers.py +0 -69
- llama_stack/apis/safety/__init__.py +0 -7
- llama_stack/apis/scoring/__init__.py +0 -7
- llama_stack/apis/scoring_functions/__init__.py +0 -7
- llama_stack/apis/shields/__init__.py +0 -7
- llama_stack/apis/synthetic_data_generation/__init__.py +0 -7
- llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py +0 -77
- llama_stack/apis/telemetry/__init__.py +0 -7
- llama_stack/apis/telemetry/telemetry.py +0 -423
- llama_stack/apis/tools/__init__.py +0 -8
- llama_stack/apis/vector_io/__init__.py +0 -7
- llama_stack/apis/vector_stores/__init__.py +0 -7
- llama_stack/core/server/tracing.py +0 -80
- llama_stack/core/ui/app.py +0 -55
- llama_stack/core/ui/modules/__init__.py +0 -5
- llama_stack/core/ui/modules/api.py +0 -32
- llama_stack/core/ui/modules/utils.py +0 -42
- llama_stack/core/ui/page/__init__.py +0 -5
- llama_stack/core/ui/page/distribution/__init__.py +0 -5
- llama_stack/core/ui/page/distribution/datasets.py +0 -18
- llama_stack/core/ui/page/distribution/eval_tasks.py +0 -20
- llama_stack/core/ui/page/distribution/models.py +0 -18
- llama_stack/core/ui/page/distribution/providers.py +0 -27
- llama_stack/core/ui/page/distribution/resources.py +0 -48
- llama_stack/core/ui/page/distribution/scoring_functions.py +0 -18
- llama_stack/core/ui/page/distribution/shields.py +0 -19
- llama_stack/core/ui/page/evaluations/__init__.py +0 -5
- llama_stack/core/ui/page/evaluations/app_eval.py +0 -143
- llama_stack/core/ui/page/evaluations/native_eval.py +0 -253
- llama_stack/core/ui/page/playground/__init__.py +0 -5
- llama_stack/core/ui/page/playground/chat.py +0 -130
- llama_stack/core/ui/page/playground/tools.py +0 -352
- llama_stack/distributions/dell/build.yaml +0 -33
- llama_stack/distributions/meta-reference-gpu/build.yaml +0 -32
- llama_stack/distributions/nvidia/build.yaml +0 -29
- llama_stack/distributions/open-benchmark/build.yaml +0 -36
- llama_stack/distributions/postgres-demo/__init__.py +0 -7
- llama_stack/distributions/postgres-demo/build.yaml +0 -23
- llama_stack/distributions/postgres-demo/postgres_demo.py +0 -125
- llama_stack/distributions/starter/build.yaml +0 -61
- llama_stack/distributions/starter-gpu/build.yaml +0 -61
- llama_stack/distributions/watsonx/build.yaml +0 -33
- llama_stack/providers/inline/agents/meta_reference/agent_instance.py +0 -1024
- llama_stack/providers/inline/agents/meta_reference/persistence.py +0 -228
- llama_stack/providers/inline/telemetry/__init__.py +0 -5
- llama_stack/providers/inline/telemetry/meta_reference/__init__.py +0 -21
- llama_stack/providers/inline/telemetry/meta_reference/config.py +0 -47
- llama_stack/providers/inline/telemetry/meta_reference/telemetry.py +0 -252
- llama_stack/providers/remote/inference/bedrock/models.py +0 -29
- llama_stack/providers/utils/kvstore/sqlite/config.py +0 -20
- llama_stack/providers/utils/sqlstore/__init__.py +0 -5
- llama_stack/providers/utils/sqlstore/api.py +0 -128
- llama_stack/providers/utils/telemetry/__init__.py +0 -5
- llama_stack/providers/utils/telemetry/trace_protocol.py +0 -142
- llama_stack/providers/utils/telemetry/tracing.py +0 -384
- llama_stack/strong_typing/__init__.py +0 -19
- llama_stack/strong_typing/auxiliary.py +0 -228
- llama_stack/strong_typing/classdef.py +0 -440
- llama_stack/strong_typing/core.py +0 -46
- llama_stack/strong_typing/deserializer.py +0 -877
- llama_stack/strong_typing/docstring.py +0 -409
- llama_stack/strong_typing/exception.py +0 -23
- llama_stack/strong_typing/inspection.py +0 -1085
- llama_stack/strong_typing/mapping.py +0 -40
- llama_stack/strong_typing/name.py +0 -182
- llama_stack/strong_typing/schema.py +0 -792
- llama_stack/strong_typing/serialization.py +0 -97
- llama_stack/strong_typing/serializer.py +0 -500
- llama_stack/strong_typing/slots.py +0 -27
- llama_stack/strong_typing/topological.py +0 -89
- llama_stack/ui/node_modules/flatted/python/flatted.py +0 -149
- llama_stack-0.3.4.dist-info/RECORD +0 -625
- llama_stack-0.3.4.dist-info/top_level.txt +0 -1
- /llama_stack/{providers/utils → core/storage}/kvstore/config.py +0 -0
- /llama_stack/{providers/utils → core/storage}/kvstore/mongodb/__init__.py +0 -0
- /llama_stack/{providers/utils → core/storage}/kvstore/postgres/__init__.py +0 -0
- /llama_stack/{providers/utils → core/storage}/kvstore/redis/__init__.py +0 -0
- /llama_stack/{providers/utils → core/storage}/kvstore/sqlite/__init__.py +0 -0
- /llama_stack/{apis → providers/inline/file_processor}/__init__.py +0 -0
- /llama_stack/{apis/common → telemetry}/__init__.py +0 -0
- {llama_stack-0.3.4.dist-info → llama_stack-0.4.0.dist-info}/WHEEL +0 -0
- {llama_stack-0.3.4.dist-info → llama_stack-0.4.0.dist-info}/entry_points.txt +0 -0
- {llama_stack-0.3.4.dist-info → llama_stack-0.4.0.dist-info}/licenses/LICENSE +0 -0
- {llama_stack/core/ui → llama_stack_api/common}/__init__.py +0 -0
- {llama_stack/strong_typing → llama_stack_api}/py.typed +0 -0
- {llama_stack/apis → llama_stack_api}/version.py +0 -0
|
@@ -1,228 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
import json
|
|
8
|
-
import uuid
|
|
9
|
-
from datetime import UTC, datetime
|
|
10
|
-
|
|
11
|
-
from llama_stack.apis.agents import AgentConfig, Session, ToolExecutionStep, Turn
|
|
12
|
-
from llama_stack.apis.common.errors import SessionNotFoundError
|
|
13
|
-
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed
|
|
14
|
-
from llama_stack.core.access_control.datatypes import AccessRule
|
|
15
|
-
from llama_stack.core.datatypes import User
|
|
16
|
-
from llama_stack.core.request_headers import get_authenticated_user
|
|
17
|
-
from llama_stack.log import get_logger
|
|
18
|
-
from llama_stack.providers.utils.kvstore import KVStore
|
|
19
|
-
|
|
20
|
-
log = get_logger(name=__name__, category="agents::meta_reference")
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class AgentSessionInfo(Session):
|
|
24
|
-
# TODO: is this used anywhere?
|
|
25
|
-
vector_db_id: str | None = None
|
|
26
|
-
started_at: datetime
|
|
27
|
-
owner: User | None = None
|
|
28
|
-
identifier: str | None = None
|
|
29
|
-
type: str = "session"
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
class AgentInfo(AgentConfig):
|
|
33
|
-
created_at: datetime
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class AgentPersistence:
|
|
37
|
-
def __init__(self, agent_id: str, kvstore: KVStore, policy: list[AccessRule]):
|
|
38
|
-
self.agent_id = agent_id
|
|
39
|
-
self.kvstore = kvstore
|
|
40
|
-
self.policy = policy
|
|
41
|
-
|
|
42
|
-
async def create_session(self, name: str) -> str:
|
|
43
|
-
session_id = str(uuid.uuid4())
|
|
44
|
-
|
|
45
|
-
# Get current user's auth attributes for new sessions
|
|
46
|
-
user = get_authenticated_user()
|
|
47
|
-
|
|
48
|
-
session_info = AgentSessionInfo(
|
|
49
|
-
session_id=session_id,
|
|
50
|
-
session_name=name,
|
|
51
|
-
started_at=datetime.now(UTC),
|
|
52
|
-
owner=user,
|
|
53
|
-
turns=[],
|
|
54
|
-
identifier=name, # should this be qualified in any way?
|
|
55
|
-
)
|
|
56
|
-
if not is_action_allowed(self.policy, "create", session_info, user):
|
|
57
|
-
raise AccessDeniedError("create", session_info, user)
|
|
58
|
-
|
|
59
|
-
await self.kvstore.set(
|
|
60
|
-
key=f"session:{self.agent_id}:{session_id}",
|
|
61
|
-
value=session_info.model_dump_json(),
|
|
62
|
-
)
|
|
63
|
-
return session_id
|
|
64
|
-
|
|
65
|
-
async def get_session_info(self, session_id: str) -> AgentSessionInfo:
|
|
66
|
-
value = await self.kvstore.get(
|
|
67
|
-
key=f"session:{self.agent_id}:{session_id}",
|
|
68
|
-
)
|
|
69
|
-
if not value:
|
|
70
|
-
raise SessionNotFoundError(session_id)
|
|
71
|
-
|
|
72
|
-
session_info = AgentSessionInfo(**json.loads(value))
|
|
73
|
-
|
|
74
|
-
# Check access to session
|
|
75
|
-
if not self._check_session_access(session_info):
|
|
76
|
-
return None
|
|
77
|
-
|
|
78
|
-
return session_info
|
|
79
|
-
|
|
80
|
-
def _check_session_access(self, session_info: AgentSessionInfo) -> bool:
|
|
81
|
-
"""Check if current user has access to the session."""
|
|
82
|
-
# Handle backward compatibility for old sessions without access control
|
|
83
|
-
if not hasattr(session_info, "access_attributes") and not hasattr(session_info, "owner"):
|
|
84
|
-
return True
|
|
85
|
-
|
|
86
|
-
return is_action_allowed(self.policy, "read", session_info, get_authenticated_user())
|
|
87
|
-
|
|
88
|
-
async def get_session_if_accessible(self, session_id: str) -> AgentSessionInfo | None:
|
|
89
|
-
"""Get session info if the user has access to it. For internal use by sub-session methods."""
|
|
90
|
-
session_info = await self.get_session_info(session_id)
|
|
91
|
-
if not session_info:
|
|
92
|
-
return None
|
|
93
|
-
|
|
94
|
-
return session_info
|
|
95
|
-
|
|
96
|
-
async def add_vector_db_to_session(self, session_id: str, vector_db_id: str):
|
|
97
|
-
session_info = await self.get_session_if_accessible(session_id)
|
|
98
|
-
if session_info is None:
|
|
99
|
-
raise SessionNotFoundError(session_id)
|
|
100
|
-
|
|
101
|
-
session_info.vector_db_id = vector_db_id
|
|
102
|
-
await self.kvstore.set(
|
|
103
|
-
key=f"session:{self.agent_id}:{session_id}",
|
|
104
|
-
value=session_info.model_dump_json(),
|
|
105
|
-
)
|
|
106
|
-
|
|
107
|
-
async def add_turn_to_session(self, session_id: str, turn: Turn):
|
|
108
|
-
if not await self.get_session_if_accessible(session_id):
|
|
109
|
-
raise SessionNotFoundError(session_id)
|
|
110
|
-
|
|
111
|
-
await self.kvstore.set(
|
|
112
|
-
key=f"session:{self.agent_id}:{session_id}:{turn.turn_id}",
|
|
113
|
-
value=turn.model_dump_json(),
|
|
114
|
-
)
|
|
115
|
-
|
|
116
|
-
async def get_session_turns(self, session_id: str) -> list[Turn]:
|
|
117
|
-
if not await self.get_session_if_accessible(session_id):
|
|
118
|
-
raise SessionNotFoundError(session_id)
|
|
119
|
-
|
|
120
|
-
values = await self.kvstore.values_in_range(
|
|
121
|
-
start_key=f"session:{self.agent_id}:{session_id}:",
|
|
122
|
-
end_key=f"session:{self.agent_id}:{session_id}:\xff\xff\xff\xff",
|
|
123
|
-
)
|
|
124
|
-
turns = []
|
|
125
|
-
for value in values:
|
|
126
|
-
try:
|
|
127
|
-
turn = Turn(**json.loads(value))
|
|
128
|
-
turns.append(turn)
|
|
129
|
-
except Exception as e:
|
|
130
|
-
log.error(f"Error parsing turn: {e}")
|
|
131
|
-
continue
|
|
132
|
-
|
|
133
|
-
# The kvstore does not guarantee order, so we sort by started_at
|
|
134
|
-
# to ensure consistent ordering of turns.
|
|
135
|
-
turns.sort(key=lambda t: t.started_at)
|
|
136
|
-
|
|
137
|
-
return turns
|
|
138
|
-
|
|
139
|
-
async def get_session_turn(self, session_id: str, turn_id: str) -> Turn | None:
|
|
140
|
-
if not await self.get_session_if_accessible(session_id):
|
|
141
|
-
raise SessionNotFoundError(session_id)
|
|
142
|
-
|
|
143
|
-
value = await self.kvstore.get(
|
|
144
|
-
key=f"session:{self.agent_id}:{session_id}:{turn_id}",
|
|
145
|
-
)
|
|
146
|
-
if not value:
|
|
147
|
-
return None
|
|
148
|
-
return Turn(**json.loads(value))
|
|
149
|
-
|
|
150
|
-
async def set_in_progress_tool_call_step(self, session_id: str, turn_id: str, step: ToolExecutionStep):
|
|
151
|
-
if not await self.get_session_if_accessible(session_id):
|
|
152
|
-
raise SessionNotFoundError(session_id)
|
|
153
|
-
|
|
154
|
-
await self.kvstore.set(
|
|
155
|
-
key=f"in_progress_tool_call_step:{self.agent_id}:{session_id}:{turn_id}",
|
|
156
|
-
value=step.model_dump_json(),
|
|
157
|
-
)
|
|
158
|
-
|
|
159
|
-
async def get_in_progress_tool_call_step(self, session_id: str, turn_id: str) -> ToolExecutionStep | None:
|
|
160
|
-
if not await self.get_session_if_accessible(session_id):
|
|
161
|
-
return None
|
|
162
|
-
|
|
163
|
-
value = await self.kvstore.get(
|
|
164
|
-
key=f"in_progress_tool_call_step:{self.agent_id}:{session_id}:{turn_id}",
|
|
165
|
-
)
|
|
166
|
-
return ToolExecutionStep(**json.loads(value)) if value else None
|
|
167
|
-
|
|
168
|
-
async def set_num_infer_iters_in_turn(self, session_id: str, turn_id: str, num_infer_iters: int):
|
|
169
|
-
if not await self.get_session_if_accessible(session_id):
|
|
170
|
-
raise SessionNotFoundError(session_id)
|
|
171
|
-
|
|
172
|
-
await self.kvstore.set(
|
|
173
|
-
key=f"num_infer_iters_in_turn:{self.agent_id}:{session_id}:{turn_id}",
|
|
174
|
-
value=str(num_infer_iters),
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
async def get_num_infer_iters_in_turn(self, session_id: str, turn_id: str) -> int | None:
|
|
178
|
-
if not await self.get_session_if_accessible(session_id):
|
|
179
|
-
return None
|
|
180
|
-
|
|
181
|
-
value = await self.kvstore.get(
|
|
182
|
-
key=f"num_infer_iters_in_turn:{self.agent_id}:{session_id}:{turn_id}",
|
|
183
|
-
)
|
|
184
|
-
return int(value) if value else None
|
|
185
|
-
|
|
186
|
-
async def list_sessions(self) -> list[Session]:
|
|
187
|
-
values = await self.kvstore.values_in_range(
|
|
188
|
-
start_key=f"session:{self.agent_id}:",
|
|
189
|
-
end_key=f"session:{self.agent_id}:\xff\xff\xff\xff",
|
|
190
|
-
)
|
|
191
|
-
sessions = []
|
|
192
|
-
for value in values:
|
|
193
|
-
try:
|
|
194
|
-
data = json.loads(value)
|
|
195
|
-
if "turn_id" in data:
|
|
196
|
-
continue
|
|
197
|
-
|
|
198
|
-
session_info = Session(**data)
|
|
199
|
-
sessions.append(session_info)
|
|
200
|
-
except Exception as e:
|
|
201
|
-
log.error(f"Error parsing session info: {e}")
|
|
202
|
-
continue
|
|
203
|
-
return sessions
|
|
204
|
-
|
|
205
|
-
async def delete_session_turns(self, session_id: str) -> None:
|
|
206
|
-
"""Delete all turns and their associated data for a session.
|
|
207
|
-
|
|
208
|
-
Args:
|
|
209
|
-
session_id: The ID of the session whose turns should be deleted.
|
|
210
|
-
"""
|
|
211
|
-
turns = await self.get_session_turns(session_id)
|
|
212
|
-
for turn in turns:
|
|
213
|
-
await self.kvstore.delete(key=f"session:{self.agent_id}:{session_id}:{turn.turn_id}")
|
|
214
|
-
|
|
215
|
-
async def delete_session(self, session_id: str) -> None:
|
|
216
|
-
"""Delete a session and all its associated turns.
|
|
217
|
-
|
|
218
|
-
Args:
|
|
219
|
-
session_id: The ID of the session to delete.
|
|
220
|
-
|
|
221
|
-
Raises:
|
|
222
|
-
ValueError: If the session does not exist.
|
|
223
|
-
"""
|
|
224
|
-
session_info = await self.get_session_info(session_id)
|
|
225
|
-
if session_info is None:
|
|
226
|
-
raise SessionNotFoundError(session_id)
|
|
227
|
-
|
|
228
|
-
await self.kvstore.delete(key=f"session:{self.agent_id}:{session_id}")
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from typing import Any
|
|
8
|
-
|
|
9
|
-
from llama_stack.core.datatypes import Api
|
|
10
|
-
|
|
11
|
-
from .config import TelemetryConfig, TelemetrySink
|
|
12
|
-
|
|
13
|
-
__all__ = ["TelemetryConfig", "TelemetrySink"]
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
async def get_provider_impl(config: TelemetryConfig, deps: dict[Api, Any]):
|
|
17
|
-
from .telemetry import TelemetryAdapter
|
|
18
|
-
|
|
19
|
-
impl = TelemetryAdapter(config, deps)
|
|
20
|
-
await impl.initialize()
|
|
21
|
-
return impl
|
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from enum import StrEnum
|
|
8
|
-
from typing import Any
|
|
9
|
-
|
|
10
|
-
from pydantic import BaseModel, Field, field_validator
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class TelemetrySink(StrEnum):
|
|
14
|
-
OTEL_TRACE = "otel_trace"
|
|
15
|
-
OTEL_METRIC = "otel_metric"
|
|
16
|
-
CONSOLE = "console"
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class TelemetryConfig(BaseModel):
|
|
20
|
-
otel_exporter_otlp_endpoint: str | None = Field(
|
|
21
|
-
default=None,
|
|
22
|
-
description="The OpenTelemetry collector endpoint URL (base URL for traces, metrics, and logs). If not set, the SDK will use OTEL_EXPORTER_OTLP_ENDPOINT environment variable.",
|
|
23
|
-
)
|
|
24
|
-
service_name: str = Field(
|
|
25
|
-
# service name is always the same, use zero-width space to avoid clutter
|
|
26
|
-
default="\u200b",
|
|
27
|
-
description="The service name to use for telemetry",
|
|
28
|
-
)
|
|
29
|
-
sinks: list[TelemetrySink] = Field(
|
|
30
|
-
default_factory=list,
|
|
31
|
-
description="List of telemetry sinks to enable (possible values: otel_trace, otel_metric, console)",
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
@field_validator("sinks", mode="before")
|
|
35
|
-
@classmethod
|
|
36
|
-
def validate_sinks(cls, v):
|
|
37
|
-
if isinstance(v, str):
|
|
38
|
-
return [TelemetrySink(sink.strip()) for sink in v.split(",")]
|
|
39
|
-
return v or []
|
|
40
|
-
|
|
41
|
-
@classmethod
|
|
42
|
-
def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
|
|
43
|
-
return {
|
|
44
|
-
"service_name": "${env.OTEL_SERVICE_NAME:=\u200b}",
|
|
45
|
-
"sinks": "${env.TELEMETRY_SINKS:=}",
|
|
46
|
-
"otel_exporter_otlp_endpoint": "${env.OTEL_EXPORTER_OTLP_ENDPOINT:=}",
|
|
47
|
-
}
|
|
@@ -1,252 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
import os
|
|
8
|
-
import threading
|
|
9
|
-
from typing import Any
|
|
10
|
-
|
|
11
|
-
from opentelemetry import metrics, trace
|
|
12
|
-
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
|
|
13
|
-
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
|
14
|
-
from opentelemetry.sdk.metrics import MeterProvider
|
|
15
|
-
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
|
|
16
|
-
from opentelemetry.sdk.trace import TracerProvider
|
|
17
|
-
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
18
|
-
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
|
19
|
-
|
|
20
|
-
from llama_stack.apis.telemetry import (
|
|
21
|
-
Event,
|
|
22
|
-
MetricEvent,
|
|
23
|
-
SpanEndPayload,
|
|
24
|
-
SpanStartPayload,
|
|
25
|
-
SpanStatus,
|
|
26
|
-
StructuredLogEvent,
|
|
27
|
-
Telemetry,
|
|
28
|
-
UnstructuredLogEvent,
|
|
29
|
-
)
|
|
30
|
-
from llama_stack.core.datatypes import Api
|
|
31
|
-
from llama_stack.log import get_logger
|
|
32
|
-
from llama_stack.providers.utils.telemetry.tracing import ROOT_SPAN_MARKERS
|
|
33
|
-
|
|
34
|
-
from .config import TelemetryConfig
|
|
35
|
-
|
|
36
|
-
_GLOBAL_STORAGE: dict[str, dict[str | int, Any]] = {
|
|
37
|
-
"active_spans": {},
|
|
38
|
-
"counters": {},
|
|
39
|
-
"gauges": {},
|
|
40
|
-
"up_down_counters": {},
|
|
41
|
-
}
|
|
42
|
-
_global_lock = threading.Lock()
|
|
43
|
-
_TRACER_PROVIDER = None
|
|
44
|
-
|
|
45
|
-
logger = get_logger(name=__name__, category="telemetry")
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def is_tracing_enabled(tracer):
|
|
49
|
-
with tracer.start_as_current_span("check_tracing") as span:
|
|
50
|
-
return span.is_recording()
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
class TelemetryAdapter(Telemetry):
|
|
54
|
-
def __init__(self, _config: TelemetryConfig, deps: dict[Api, Any]) -> None:
|
|
55
|
-
self.datasetio_api = deps.get(Api.datasetio)
|
|
56
|
-
self.meter = None
|
|
57
|
-
|
|
58
|
-
global _TRACER_PROVIDER
|
|
59
|
-
# Initialize the correct span processor based on the provider state.
|
|
60
|
-
# This is needed since once the span processor is set, it cannot be unset.
|
|
61
|
-
# Recreating the telemetry adapter multiple times will result in duplicate span processors.
|
|
62
|
-
# Since the library client can be recreated multiple times in a notebook,
|
|
63
|
-
# the kernel will hold on to the span processor and cause duplicate spans to be written.
|
|
64
|
-
if os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT"):
|
|
65
|
-
if _TRACER_PROVIDER is None:
|
|
66
|
-
provider = TracerProvider()
|
|
67
|
-
trace.set_tracer_provider(provider)
|
|
68
|
-
_TRACER_PROVIDER = provider
|
|
69
|
-
|
|
70
|
-
# Use single OTLP endpoint for all telemetry signals
|
|
71
|
-
|
|
72
|
-
# Let OpenTelemetry SDK handle endpoint construction automatically
|
|
73
|
-
# The SDK will read OTEL_EXPORTER_OTLP_ENDPOINT and construct appropriate URLs
|
|
74
|
-
# https://opentelemetry.io/docs/languages/sdk-configuration/otlp-exporter
|
|
75
|
-
span_exporter = OTLPSpanExporter()
|
|
76
|
-
span_processor = BatchSpanProcessor(span_exporter)
|
|
77
|
-
trace.get_tracer_provider().add_span_processor(span_processor)
|
|
78
|
-
|
|
79
|
-
metric_reader = PeriodicExportingMetricReader(OTLPMetricExporter())
|
|
80
|
-
metric_provider = MeterProvider(metric_readers=[metric_reader])
|
|
81
|
-
metrics.set_meter_provider(metric_provider)
|
|
82
|
-
self.is_otel_endpoint_set = True
|
|
83
|
-
else:
|
|
84
|
-
logger.warning("OTEL_EXPORTER_OTLP_ENDPOINT is not set, skipping telemetry")
|
|
85
|
-
self.is_otel_endpoint_set = False
|
|
86
|
-
|
|
87
|
-
self.meter = metrics.get_meter(__name__)
|
|
88
|
-
self._lock = _global_lock
|
|
89
|
-
|
|
90
|
-
async def initialize(self) -> None:
|
|
91
|
-
pass
|
|
92
|
-
|
|
93
|
-
async def shutdown(self) -> None:
|
|
94
|
-
if self.is_otel_endpoint_set:
|
|
95
|
-
trace.get_tracer_provider().force_flush()
|
|
96
|
-
|
|
97
|
-
async def log_event(self, event: Event, ttl_seconds: int = 604800) -> None:
|
|
98
|
-
if isinstance(event, UnstructuredLogEvent):
|
|
99
|
-
self._log_unstructured(event, ttl_seconds)
|
|
100
|
-
elif isinstance(event, MetricEvent):
|
|
101
|
-
self._log_metric(event)
|
|
102
|
-
elif isinstance(event, StructuredLogEvent):
|
|
103
|
-
self._log_structured(event, ttl_seconds)
|
|
104
|
-
else:
|
|
105
|
-
raise ValueError(f"Unknown event type: {event}")
|
|
106
|
-
|
|
107
|
-
def _log_unstructured(self, event: UnstructuredLogEvent, ttl_seconds: int) -> None:
|
|
108
|
-
with self._lock:
|
|
109
|
-
# Use global storage instead of instance storage
|
|
110
|
-
span_id = int(event.span_id, 16)
|
|
111
|
-
span = _GLOBAL_STORAGE["active_spans"].get(span_id)
|
|
112
|
-
|
|
113
|
-
if span:
|
|
114
|
-
timestamp_ns = int(event.timestamp.timestamp() * 1e9)
|
|
115
|
-
span.add_event(
|
|
116
|
-
name=event.type.value,
|
|
117
|
-
attributes={
|
|
118
|
-
"message": event.message,
|
|
119
|
-
"severity": event.severity.value,
|
|
120
|
-
"__ttl__": ttl_seconds,
|
|
121
|
-
**(event.attributes or {}),
|
|
122
|
-
},
|
|
123
|
-
timestamp=timestamp_ns,
|
|
124
|
-
)
|
|
125
|
-
else:
|
|
126
|
-
print(f"Warning: No active span found for span_id {span_id}. Dropping event: {event}")
|
|
127
|
-
|
|
128
|
-
def _get_or_create_counter(self, name: str, unit: str) -> metrics.Counter:
|
|
129
|
-
assert self.meter is not None
|
|
130
|
-
if name not in _GLOBAL_STORAGE["counters"]:
|
|
131
|
-
_GLOBAL_STORAGE["counters"][name] = self.meter.create_counter(
|
|
132
|
-
name=name,
|
|
133
|
-
unit=unit,
|
|
134
|
-
description=f"Counter for {name}",
|
|
135
|
-
)
|
|
136
|
-
return _GLOBAL_STORAGE["counters"][name]
|
|
137
|
-
|
|
138
|
-
def _get_or_create_gauge(self, name: str, unit: str) -> metrics.ObservableGauge:
|
|
139
|
-
assert self.meter is not None
|
|
140
|
-
if name not in _GLOBAL_STORAGE["gauges"]:
|
|
141
|
-
_GLOBAL_STORAGE["gauges"][name] = self.meter.create_gauge(
|
|
142
|
-
name=name,
|
|
143
|
-
unit=unit,
|
|
144
|
-
description=f"Gauge for {name}",
|
|
145
|
-
)
|
|
146
|
-
return _GLOBAL_STORAGE["gauges"][name]
|
|
147
|
-
|
|
148
|
-
def _log_metric(self, event: MetricEvent) -> None:
|
|
149
|
-
# Add metric as an event to the current span
|
|
150
|
-
try:
|
|
151
|
-
with self._lock:
|
|
152
|
-
# Only try to add to span if we have a valid span_id
|
|
153
|
-
if event.span_id:
|
|
154
|
-
try:
|
|
155
|
-
span_id = int(event.span_id, 16)
|
|
156
|
-
span = _GLOBAL_STORAGE["active_spans"].get(span_id)
|
|
157
|
-
|
|
158
|
-
if span:
|
|
159
|
-
timestamp_ns = int(event.timestamp.timestamp() * 1e9)
|
|
160
|
-
span.add_event(
|
|
161
|
-
name=f"metric.{event.metric}",
|
|
162
|
-
attributes={
|
|
163
|
-
"value": event.value,
|
|
164
|
-
"unit": event.unit,
|
|
165
|
-
**(event.attributes or {}),
|
|
166
|
-
},
|
|
167
|
-
timestamp=timestamp_ns,
|
|
168
|
-
)
|
|
169
|
-
except (ValueError, KeyError):
|
|
170
|
-
# Invalid span_id or span not found, but we already logged to console above
|
|
171
|
-
pass
|
|
172
|
-
except Exception:
|
|
173
|
-
# Lock acquisition failed
|
|
174
|
-
logger.debug("Failed to acquire lock to add metric to span")
|
|
175
|
-
|
|
176
|
-
# Log to OpenTelemetry meter if available
|
|
177
|
-
if self.meter is None:
|
|
178
|
-
return
|
|
179
|
-
if isinstance(event.value, int):
|
|
180
|
-
counter = self._get_or_create_counter(event.metric, event.unit)
|
|
181
|
-
counter.add(event.value, attributes=event.attributes)
|
|
182
|
-
elif isinstance(event.value, float):
|
|
183
|
-
up_down_counter = self._get_or_create_up_down_counter(event.metric, event.unit)
|
|
184
|
-
up_down_counter.add(event.value, attributes=event.attributes)
|
|
185
|
-
|
|
186
|
-
def _get_or_create_up_down_counter(self, name: str, unit: str) -> metrics.UpDownCounter:
|
|
187
|
-
assert self.meter is not None
|
|
188
|
-
if name not in _GLOBAL_STORAGE["up_down_counters"]:
|
|
189
|
-
_GLOBAL_STORAGE["up_down_counters"][name] = self.meter.create_up_down_counter(
|
|
190
|
-
name=name,
|
|
191
|
-
unit=unit,
|
|
192
|
-
description=f"UpDownCounter for {name}",
|
|
193
|
-
)
|
|
194
|
-
return _GLOBAL_STORAGE["up_down_counters"][name]
|
|
195
|
-
|
|
196
|
-
def _log_structured(self, event: StructuredLogEvent, ttl_seconds: int) -> None:
|
|
197
|
-
with self._lock:
|
|
198
|
-
span_id = int(event.span_id, 16)
|
|
199
|
-
tracer = trace.get_tracer(__name__)
|
|
200
|
-
if event.attributes is None:
|
|
201
|
-
event.attributes = {}
|
|
202
|
-
event.attributes["__ttl__"] = ttl_seconds
|
|
203
|
-
|
|
204
|
-
# Extract these W3C trace context attributes so they are not written to
|
|
205
|
-
# underlying storage, as we just need them to propagate the trace context.
|
|
206
|
-
traceparent = event.attributes.pop("traceparent", None)
|
|
207
|
-
tracestate = event.attributes.pop("tracestate", None)
|
|
208
|
-
if traceparent:
|
|
209
|
-
# If we have a traceparent header value, we're not the root span.
|
|
210
|
-
for root_attribute in ROOT_SPAN_MARKERS:
|
|
211
|
-
event.attributes.pop(root_attribute, None)
|
|
212
|
-
|
|
213
|
-
if isinstance(event.payload, SpanStartPayload):
|
|
214
|
-
# Check if span already exists to prevent duplicates
|
|
215
|
-
if span_id in _GLOBAL_STORAGE["active_spans"]:
|
|
216
|
-
return
|
|
217
|
-
|
|
218
|
-
context = None
|
|
219
|
-
if event.payload.parent_span_id:
|
|
220
|
-
parent_span_id = int(event.payload.parent_span_id, 16)
|
|
221
|
-
parent_span = _GLOBAL_STORAGE["active_spans"].get(parent_span_id)
|
|
222
|
-
context = trace.set_span_in_context(parent_span)
|
|
223
|
-
elif traceparent:
|
|
224
|
-
carrier = {
|
|
225
|
-
"traceparent": traceparent,
|
|
226
|
-
"tracestate": tracestate,
|
|
227
|
-
}
|
|
228
|
-
context = TraceContextTextMapPropagator().extract(carrier=carrier)
|
|
229
|
-
|
|
230
|
-
span = tracer.start_span(
|
|
231
|
-
name=event.payload.name,
|
|
232
|
-
context=context,
|
|
233
|
-
attributes=event.attributes or {},
|
|
234
|
-
)
|
|
235
|
-
_GLOBAL_STORAGE["active_spans"][span_id] = span
|
|
236
|
-
|
|
237
|
-
elif isinstance(event.payload, SpanEndPayload):
|
|
238
|
-
span = _GLOBAL_STORAGE["active_spans"].get(span_id)
|
|
239
|
-
if span:
|
|
240
|
-
if event.attributes:
|
|
241
|
-
span.set_attributes(event.attributes)
|
|
242
|
-
|
|
243
|
-
status = (
|
|
244
|
-
trace.Status(status_code=trace.StatusCode.OK)
|
|
245
|
-
if event.payload.status == SpanStatus.OK
|
|
246
|
-
else trace.Status(status_code=trace.StatusCode.ERROR)
|
|
247
|
-
)
|
|
248
|
-
span.set_status(status)
|
|
249
|
-
span.end()
|
|
250
|
-
_GLOBAL_STORAGE["active_spans"].pop(span_id, None)
|
|
251
|
-
else:
|
|
252
|
-
raise ValueError(f"Unknown structured log event: {event}")
|
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from llama_stack.models.llama.sku_types import CoreModelId
|
|
8
|
-
from llama_stack.providers.utils.inference.model_registry import (
|
|
9
|
-
build_hf_repo_model_entry,
|
|
10
|
-
)
|
|
11
|
-
|
|
12
|
-
SAFETY_MODELS_ENTRIES = []
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
# https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
|
|
16
|
-
MODEL_ENTRIES = [
|
|
17
|
-
build_hf_repo_model_entry(
|
|
18
|
-
"meta.llama3-1-8b-instruct-v1:0",
|
|
19
|
-
CoreModelId.llama3_1_8b_instruct.value,
|
|
20
|
-
),
|
|
21
|
-
build_hf_repo_model_entry(
|
|
22
|
-
"meta.llama3-1-70b-instruct-v1:0",
|
|
23
|
-
CoreModelId.llama3_1_70b_instruct.value,
|
|
24
|
-
),
|
|
25
|
-
build_hf_repo_model_entry(
|
|
26
|
-
"meta.llama3-1-405b-instruct-v1:0",
|
|
27
|
-
CoreModelId.llama3_1_405b_instruct.value,
|
|
28
|
-
),
|
|
29
|
-
] + SAFETY_MODELS_ENTRIES
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from pydantic import BaseModel, Field
|
|
8
|
-
|
|
9
|
-
from llama_stack.schema_utils import json_schema_type
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
@json_schema_type
|
|
13
|
-
class SqliteControlPlaneConfig(BaseModel):
|
|
14
|
-
db_path: str = Field(
|
|
15
|
-
description="File path for the sqlite database",
|
|
16
|
-
)
|
|
17
|
-
table_name: str = Field(
|
|
18
|
-
default="llamastack_control_plane",
|
|
19
|
-
description="Table into which all the keys will be placed",
|
|
20
|
-
)
|