llama-stack-api 0.4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,127 @@
1
+ Metadata-Version: 2.4
2
+ Name: llama-stack-api
3
+ Version: 0.4.2
4
+ Summary: API and Provider specifications for Llama Stack - lightweight package with protocol definitions and provider specs
5
+ Author-email: Meta Llama <llama-oss@meta.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/llamastack/llama-stack
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Information Technology
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
15
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
16
+ Requires-Python: >=3.12
17
+ Description-Content-Type: text/markdown
18
+ Requires-Dist: openai>=2.5.0
19
+ Requires-Dist: fastapi<1.0,>=0.115.0
20
+ Requires-Dist: pydantic>=2.11.9
21
+ Requires-Dist: jsonschema
22
+ Requires-Dist: opentelemetry-sdk>=1.30.0
23
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.30.0
24
+
25
+ # llama-stack-api
26
+
27
+ API and Provider specifications for Llama Stack - a lightweight package with protocol definitions and provider specs.
28
+
29
+ ## Overview
30
+
31
+ `llama-stack-api` is a minimal dependency package that contains:
32
+
33
+ - **API Protocol Definitions**: Type-safe protocol definitions for all Llama Stack APIs (inference, agents, safety, etc.)
34
+ - **Provider Specifications**: Provider spec definitions for building custom providers
35
+ - **Data Types**: Shared data types and models used across the Llama Stack ecosystem
36
+ - **Type Utilities**: Strong typing utilities and schema validation
37
+
38
+ ## What This Package Does NOT Include
39
+
40
+ - Server implementation (see `llama-stack` package)
41
+ - Provider implementations (see `llama-stack` package)
42
+ - CLI tools (see `llama-stack` package)
43
+ - Runtime orchestration (see `llama-stack` package)
44
+
45
+ ## Use Cases
46
+
47
+ This package is designed for:
48
+
49
+ 1. **Third-party Provider Developers**: Build custom providers without depending on the full Llama Stack server
50
+ 2. **Client Library Authors**: Use type definitions without server dependencies
51
+ 3. **Documentation Generation**: Generate API docs from protocol definitions
52
+ 4. **Type Checking**: Validate implementations against the official specs
53
+
54
+ ## Installation
55
+
56
+ ```bash
57
+ pip install llama-stack-api
58
+ ```
59
+
60
+ Or with uv:
61
+
62
+ ```bash
63
+ uv pip install llama-stack-api
64
+ ```
65
+
66
+ ## Dependencies
67
+
68
+ Minimal dependencies:
69
+ - `pydantic>=2.11.9` - For data validation and serialization
70
+ - `jsonschema` - For JSON schema utilities
71
+
72
+ ## Versioning
73
+
74
+ This package follows semantic versioning independently from the main `llama-stack` package:
75
+
76
+ - **Patch versions** (0.1.x): Documentation, internal improvements
77
+ - **Minor versions** (0.x.0): New APIs, backward-compatible changes
78
+ - **Major versions** (x.0.0): Breaking changes to existing APIs
79
+
80
+ Current version: **0.4.0.dev0**
81
+
82
+ ## Usage Example
83
+
84
+ ```python
85
+ from llama_stack_api.inference import Inference, ChatCompletionRequest
86
+ from llama_stack_api.providers.datatypes import ProviderSpec, InlineProviderSpec
87
+ from llama_stack_api.datatypes import Api
88
+
89
+
90
+ # Use protocol definitions for type checking
91
+ class MyInferenceProvider(Inference):
92
+ async def chat_completion(self, request: ChatCompletionRequest):
93
+ # Your implementation
94
+ pass
95
+
96
+
97
+ # Define provider specifications
98
+ my_provider_spec = InlineProviderSpec(
99
+ api=Api.inference,
100
+ provider_type="inline::my-provider",
101
+ pip_packages=["my-dependencies"],
102
+ module="my_package.providers.inference",
103
+ config_class="my_package.providers.inference.MyConfig",
104
+ )
105
+ ```
106
+
107
+ ## Relationship to llama-stack
108
+
109
+ The main `llama-stack` package depends on `llama-stack-api` and provides:
110
+ - Full server implementation
111
+ - Built-in provider implementations
112
+ - CLI tools for running and managing stacks
113
+ - Runtime provider resolution and orchestration
114
+
115
+ ## Contributing
116
+
117
+ See the main [Llama Stack repository](https://github.com/llamastack/llama-stack) for contribution guidelines.
118
+
119
+ ## License
120
+
121
+ MIT License - see LICENSE file for details.
122
+
123
+ ## Links
124
+
125
+ - [Main Llama Stack Repository](https://github.com/llamastack/llama-stack)
126
+ - [Documentation](https://llamastack.ai/)
127
+ - [Client Library](https://pypi.org/project/llama-stack-client/)
@@ -0,0 +1,103 @@
1
+ # llama-stack-api
2
+
3
+ API and Provider specifications for Llama Stack - a lightweight package with protocol definitions and provider specs.
4
+
5
+ ## Overview
6
+
7
+ `llama-stack-api` is a minimal dependency package that contains:
8
+
9
+ - **API Protocol Definitions**: Type-safe protocol definitions for all Llama Stack APIs (inference, agents, safety, etc.)
10
+ - **Provider Specifications**: Provider spec definitions for building custom providers
11
+ - **Data Types**: Shared data types and models used across the Llama Stack ecosystem
12
+ - **Type Utilities**: Strong typing utilities and schema validation
13
+
14
+ ## What This Package Does NOT Include
15
+
16
+ - Server implementation (see `llama-stack` package)
17
+ - Provider implementations (see `llama-stack` package)
18
+ - CLI tools (see `llama-stack` package)
19
+ - Runtime orchestration (see `llama-stack` package)
20
+
21
+ ## Use Cases
22
+
23
+ This package is designed for:
24
+
25
+ 1. **Third-party Provider Developers**: Build custom providers without depending on the full Llama Stack server
26
+ 2. **Client Library Authors**: Use type definitions without server dependencies
27
+ 3. **Documentation Generation**: Generate API docs from protocol definitions
28
+ 4. **Type Checking**: Validate implementations against the official specs
29
+
30
+ ## Installation
31
+
32
+ ```bash
33
+ pip install llama-stack-api
34
+ ```
35
+
36
+ Or with uv:
37
+
38
+ ```bash
39
+ uv pip install llama-stack-api
40
+ ```
41
+
42
+ ## Dependencies
43
+
44
+ Minimal dependencies:
45
+ - `pydantic>=2.11.9` - For data validation and serialization
46
+ - `jsonschema` - For JSON schema utilities
47
+
48
+ ## Versioning
49
+
50
+ This package follows semantic versioning independently from the main `llama-stack` package:
51
+
52
+ - **Patch versions** (0.1.x): Documentation, internal improvements
53
+ - **Minor versions** (0.x.0): New APIs, backward-compatible changes
54
+ - **Major versions** (x.0.0): Breaking changes to existing APIs
55
+
56
+ Current version: **0.4.0.dev0**
57
+
58
+ ## Usage Example
59
+
60
+ ```python
61
+ from llama_stack_api.inference import Inference, ChatCompletionRequest
62
+ from llama_stack_api.providers.datatypes import ProviderSpec, InlineProviderSpec
63
+ from llama_stack_api.datatypes import Api
64
+
65
+
66
+ # Use protocol definitions for type checking
67
+ class MyInferenceProvider(Inference):
68
+ async def chat_completion(self, request: ChatCompletionRequest):
69
+ # Your implementation
70
+ pass
71
+
72
+
73
+ # Define provider specifications
74
+ my_provider_spec = InlineProviderSpec(
75
+ api=Api.inference,
76
+ provider_type="inline::my-provider",
77
+ pip_packages=["my-dependencies"],
78
+ module="my_package.providers.inference",
79
+ config_class="my_package.providers.inference.MyConfig",
80
+ )
81
+ ```
82
+
83
+ ## Relationship to llama-stack
84
+
85
+ The main `llama-stack` package depends on `llama-stack-api` and provides:
86
+ - Full server implementation
87
+ - Built-in provider implementations
88
+ - CLI tools for running and managing stacks
89
+ - Runtime provider resolution and orchestration
90
+
91
+ ## Contributing
92
+
93
+ See the main [Llama Stack repository](https://github.com/llamastack/llama-stack) for contribution guidelines.
94
+
95
+ ## License
96
+
97
+ MIT License - see LICENSE file for details.
98
+
99
+ ## Links
100
+
101
+ - [Main Llama Stack Repository](https://github.com/llamastack/llama-stack)
102
+ - [Documentation](https://llamastack.ai/)
103
+ - [Client Library](https://pypi.org/project/llama-stack-client/)
@@ -0,0 +1,84 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [tool.uv]
6
+ required-version = ">=0.7.0"
7
+
8
+ [project]
9
+ name = "llama-stack-api"
10
+ version = "0.4.2"
11
+ authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
12
+ description = "API and Provider specifications for Llama Stack - lightweight package with protocol definitions and provider specs"
13
+ readme = "README.md"
14
+ requires-python = ">=3.12"
15
+ license = { "text" = "MIT" }
16
+ classifiers = [
17
+ "License :: OSI Approved :: MIT License",
18
+ "Programming Language :: Python :: 3",
19
+ "Operating System :: OS Independent",
20
+ "Intended Audience :: Developers",
21
+ "Intended Audience :: Information Technology",
22
+ "Intended Audience :: Science/Research",
23
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
24
+ "Topic :: Scientific/Engineering :: Information Analysis",
25
+ ]
26
+ dependencies = [
27
+ "openai>=2.5.0",
28
+ "fastapi>=0.115.0,<1.0",
29
+ "pydantic>=2.11.9",
30
+ "jsonschema",
31
+ "opentelemetry-sdk>=1.30.0",
32
+ "opentelemetry-exporter-otlp-proto-http>=1.30.0",
33
+ ]
34
+
35
+ [project.urls]
36
+ Homepage = "https://github.com/llamastack/llama-stack"
37
+
38
+ [tool.setuptools.packages.find]
39
+ where = [".."]
40
+ include = ["llama_stack_api", "llama_stack_api.*"]
41
+
42
+ [tool.setuptools.package-data]
43
+ llama_stack_api = ["py.typed"]
44
+
45
+ [tool.ruff]
46
+ line-length = 120
47
+
48
+ [tool.ruff.lint]
49
+ select = [
50
+ "UP", # pyupgrade
51
+ "B", # flake8-bugbear
52
+ "B9", # flake8-bugbear subset
53
+ "C", # comprehensions
54
+ "E", # pycodestyle
55
+ "F", # Pyflakes
56
+ "N", # Naming
57
+ "W", # Warnings
58
+ "DTZ", # datetime rules
59
+ "I", # isort (imports order)
60
+ "RUF001", # Checks for ambiguous Unicode characters in strings
61
+ "RUF002", # Checks for ambiguous Unicode characters in docstrings
62
+ "RUF003", # Checks for ambiguous Unicode characters in comments
63
+ "PLC2401", # Checks for the use of non-ASCII characters in variable names
64
+ ]
65
+ ignore = [
66
+ # The following ignores are desired by the project maintainers.
67
+ "E402", # Module level import not at top of file
68
+ "E501", # Line too long
69
+ "F405", # Maybe undefined or defined from star import
70
+ "C408", # Ignored because we like the dict keyword argument syntax
71
+ "N812", # Ignored because import torch.nn.functional as F is PyTorch convention
72
+
73
+ # These are the additional ones we started ignoring after moving to ruff. We should look into each one of them later.
74
+ "C901", # Complexity of the function is too high
75
+ ]
76
+ unfixable = [
77
+ "PLE2515",
78
+ ] # Do not fix this automatically since ruff will replace the zero-width space with \u200b - let's do it manually
79
+
80
+ [tool.ruff.lint.per-file-ignores]
81
+ "llama_stack_api/apis/**/__init__.py" = ["F403"]
82
+
83
+ [tool.ruff.lint.pep8-naming]
84
+ classmethod-decorators = ["classmethod", "pydantic.field_validator"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+