llama-stack-api 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,126 @@
1
+ Metadata-Version: 2.4
2
+ Name: llama-stack-api
3
+ Version: 0.4.0
4
+ Summary: API and Provider specifications for Llama Stack - lightweight package with protocol definitions and provider specs
5
+ Author-email: Meta Llama <llama-oss@meta.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/llamastack/llama-stack
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Information Technology
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
15
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
16
+ Requires-Python: >=3.12
17
+ Description-Content-Type: text/markdown
18
+ Requires-Dist: fastapi<1.0,>=0.115.0
19
+ Requires-Dist: pydantic>=2.11.9
20
+ Requires-Dist: jsonschema
21
+ Requires-Dist: opentelemetry-sdk>=1.30.0
22
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.30.0
23
+
24
+ # llama-stack-api
25
+
26
+ API and Provider specifications for Llama Stack - a lightweight package with protocol definitions and provider specs.
27
+
28
+ ## Overview
29
+
30
+ `llama-stack-api` is a minimal dependency package that contains:
31
+
32
+ - **API Protocol Definitions**: Type-safe protocol definitions for all Llama Stack APIs (inference, agents, safety, etc.)
33
+ - **Provider Specifications**: Provider spec definitions for building custom providers
34
+ - **Data Types**: Shared data types and models used across the Llama Stack ecosystem
35
+ - **Type Utilities**: Strong typing utilities and schema validation
36
+
37
+ ## What This Package Does NOT Include
38
+
39
+ - Server implementation (see `llama-stack` package)
40
+ - Provider implementations (see `llama-stack` package)
41
+ - CLI tools (see `llama-stack` package)
42
+ - Runtime orchestration (see `llama-stack` package)
43
+
44
+ ## Use Cases
45
+
46
+ This package is designed for:
47
+
48
+ 1. **Third-party Provider Developers**: Build custom providers without depending on the full Llama Stack server
49
+ 2. **Client Library Authors**: Use type definitions without server dependencies
50
+ 3. **Documentation Generation**: Generate API docs from protocol definitions
51
+ 4. **Type Checking**: Validate implementations against the official specs
52
+
53
+ ## Installation
54
+
55
+ ```bash
56
+ pip install llama-stack-api
57
+ ```
58
+
59
+ Or with uv:
60
+
61
+ ```bash
62
+ uv pip install llama-stack-api
63
+ ```
64
+
65
+ ## Dependencies
66
+
67
+ Minimal dependencies:
68
+ - `pydantic>=2.11.9` - For data validation and serialization
69
+ - `jsonschema` - For JSON schema utilities
70
+
71
+ ## Versioning
72
+
73
+ This package follows semantic versioning independently from the main `llama-stack` package:
74
+
75
+ - **Patch versions** (0.1.x): Documentation, internal improvements
76
+ - **Minor versions** (0.x.0): New APIs, backward-compatible changes
77
+ - **Major versions** (x.0.0): Breaking changes to existing APIs
78
+
79
+ Current version: **0.4.0.dev0**
80
+
81
+ ## Usage Example
82
+
83
+ ```python
84
+ from llama_stack_api.inference import Inference, ChatCompletionRequest
85
+ from llama_stack_api.providers.datatypes import ProviderSpec, InlineProviderSpec
86
+ from llama_stack_api.datatypes import Api
87
+
88
+
89
+ # Use protocol definitions for type checking
90
+ class MyInferenceProvider(Inference):
91
+ async def chat_completion(self, request: ChatCompletionRequest):
92
+ # Your implementation
93
+ pass
94
+
95
+
96
+ # Define provider specifications
97
+ my_provider_spec = InlineProviderSpec(
98
+ api=Api.inference,
99
+ provider_type="inline::my-provider",
100
+ pip_packages=["my-dependencies"],
101
+ module="my_package.providers.inference",
102
+ config_class="my_package.providers.inference.MyConfig",
103
+ )
104
+ ```
105
+
106
+ ## Relationship to llama-stack
107
+
108
+ The main `llama-stack` package depends on `llama-stack-api` and provides:
109
+ - Full server implementation
110
+ - Built-in provider implementations
111
+ - CLI tools for running and managing stacks
112
+ - Runtime provider resolution and orchestration
113
+
114
+ ## Contributing
115
+
116
+ See the main [Llama Stack repository](https://github.com/llamastack/llama-stack) for contribution guidelines.
117
+
118
+ ## License
119
+
120
+ MIT License - see LICENSE file for details.
121
+
122
+ ## Links
123
+
124
+ - [Main Llama Stack Repository](https://github.com/llamastack/llama-stack)
125
+ - [Documentation](https://llamastack.ai/)
126
+ - [Client Library](https://pypi.org/project/llama-stack-client/)
@@ -0,0 +1,103 @@
1
+ # llama-stack-api
2
+
3
+ API and Provider specifications for Llama Stack - a lightweight package with protocol definitions and provider specs.
4
+
5
+ ## Overview
6
+
7
+ `llama-stack-api` is a minimal dependency package that contains:
8
+
9
+ - **API Protocol Definitions**: Type-safe protocol definitions for all Llama Stack APIs (inference, agents, safety, etc.)
10
+ - **Provider Specifications**: Provider spec definitions for building custom providers
11
+ - **Data Types**: Shared data types and models used across the Llama Stack ecosystem
12
+ - **Type Utilities**: Strong typing utilities and schema validation
13
+
14
+ ## What This Package Does NOT Include
15
+
16
+ - Server implementation (see `llama-stack` package)
17
+ - Provider implementations (see `llama-stack` package)
18
+ - CLI tools (see `llama-stack` package)
19
+ - Runtime orchestration (see `llama-stack` package)
20
+
21
+ ## Use Cases
22
+
23
+ This package is designed for:
24
+
25
+ 1. **Third-party Provider Developers**: Build custom providers without depending on the full Llama Stack server
26
+ 2. **Client Library Authors**: Use type definitions without server dependencies
27
+ 3. **Documentation Generation**: Generate API docs from protocol definitions
28
+ 4. **Type Checking**: Validate implementations against the official specs
29
+
30
+ ## Installation
31
+
32
+ ```bash
33
+ pip install llama-stack-api
34
+ ```
35
+
36
+ Or with uv:
37
+
38
+ ```bash
39
+ uv pip install llama-stack-api
40
+ ```
41
+
42
+ ## Dependencies
43
+
44
+ Minimal dependencies:
45
+ - `pydantic>=2.11.9` - For data validation and serialization
46
+ - `jsonschema` - For JSON schema utilities
47
+
48
+ ## Versioning
49
+
50
+ This package follows semantic versioning independently from the main `llama-stack` package:
51
+
52
+ - **Patch versions** (0.1.x): Documentation, internal improvements
53
+ - **Minor versions** (0.x.0): New APIs, backward-compatible changes
54
+ - **Major versions** (x.0.0): Breaking changes to existing APIs
55
+
56
+ Current version: **0.4.0.dev0**
57
+
58
+ ## Usage Example
59
+
60
+ ```python
61
+ from llama_stack_api.inference import Inference, ChatCompletionRequest
62
+ from llama_stack_api.providers.datatypes import ProviderSpec, InlineProviderSpec
63
+ from llama_stack_api.datatypes import Api
64
+
65
+
66
+ # Use protocol definitions for type checking
67
+ class MyInferenceProvider(Inference):
68
+ async def chat_completion(self, request: ChatCompletionRequest):
69
+ # Your implementation
70
+ pass
71
+
72
+
73
+ # Define provider specifications
74
+ my_provider_spec = InlineProviderSpec(
75
+ api=Api.inference,
76
+ provider_type="inline::my-provider",
77
+ pip_packages=["my-dependencies"],
78
+ module="my_package.providers.inference",
79
+ config_class="my_package.providers.inference.MyConfig",
80
+ )
81
+ ```
82
+
83
+ ## Relationship to llama-stack
84
+
85
+ The main `llama-stack` package depends on `llama-stack-api` and provides:
86
+ - Full server implementation
87
+ - Built-in provider implementations
88
+ - CLI tools for running and managing stacks
89
+ - Runtime provider resolution and orchestration
90
+
91
+ ## Contributing
92
+
93
+ See the main [Llama Stack repository](https://github.com/llamastack/llama-stack) for contribution guidelines.
94
+
95
+ ## License
96
+
97
+ MIT License - see LICENSE file for details.
98
+
99
+ ## Links
100
+
101
+ - [Main Llama Stack Repository](https://github.com/llamastack/llama-stack)
102
+ - [Documentation](https://llamastack.ai/)
103
+ - [Client Library](https://pypi.org/project/llama-stack-client/)
@@ -0,0 +1,126 @@
1
+ Metadata-Version: 2.4
2
+ Name: llama-stack-api
3
+ Version: 0.4.0
4
+ Summary: API and Provider specifications for Llama Stack - lightweight package with protocol definitions and provider specs
5
+ Author-email: Meta Llama <llama-oss@meta.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/llamastack/llama-stack
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Information Technology
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
15
+ Classifier: Topic :: Scientific/Engineering :: Information Analysis
16
+ Requires-Python: >=3.12
17
+ Description-Content-Type: text/markdown
18
+ Requires-Dist: fastapi<1.0,>=0.115.0
19
+ Requires-Dist: pydantic>=2.11.9
20
+ Requires-Dist: jsonschema
21
+ Requires-Dist: opentelemetry-sdk>=1.30.0
22
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.30.0
23
+
24
+ # llama-stack-api
25
+
26
+ API and Provider specifications for Llama Stack - a lightweight package with protocol definitions and provider specs.
27
+
28
+ ## Overview
29
+
30
+ `llama-stack-api` is a minimal dependency package that contains:
31
+
32
+ - **API Protocol Definitions**: Type-safe protocol definitions for all Llama Stack APIs (inference, agents, safety, etc.)
33
+ - **Provider Specifications**: Provider spec definitions for building custom providers
34
+ - **Data Types**: Shared data types and models used across the Llama Stack ecosystem
35
+ - **Type Utilities**: Strong typing utilities and schema validation
36
+
37
+ ## What This Package Does NOT Include
38
+
39
+ - Server implementation (see `llama-stack` package)
40
+ - Provider implementations (see `llama-stack` package)
41
+ - CLI tools (see `llama-stack` package)
42
+ - Runtime orchestration (see `llama-stack` package)
43
+
44
+ ## Use Cases
45
+
46
+ This package is designed for:
47
+
48
+ 1. **Third-party Provider Developers**: Build custom providers without depending on the full Llama Stack server
49
+ 2. **Client Library Authors**: Use type definitions without server dependencies
50
+ 3. **Documentation Generation**: Generate API docs from protocol definitions
51
+ 4. **Type Checking**: Validate implementations against the official specs
52
+
53
+ ## Installation
54
+
55
+ ```bash
56
+ pip install llama-stack-api
57
+ ```
58
+
59
+ Or with uv:
60
+
61
+ ```bash
62
+ uv pip install llama-stack-api
63
+ ```
64
+
65
+ ## Dependencies
66
+
67
+ Minimal dependencies:
68
+ - `pydantic>=2.11.9` - For data validation and serialization
69
+ - `jsonschema` - For JSON schema utilities
70
+
71
+ ## Versioning
72
+
73
+ This package follows semantic versioning independently from the main `llama-stack` package:
74
+
75
+ - **Patch versions** (0.1.x): Documentation, internal improvements
76
+ - **Minor versions** (0.x.0): New APIs, backward-compatible changes
77
+ - **Major versions** (x.0.0): Breaking changes to existing APIs
78
+
79
+ Current version: **0.4.0.dev0**
80
+
81
+ ## Usage Example
82
+
83
+ ```python
84
+ from llama_stack_api.inference import Inference, ChatCompletionRequest
85
+ from llama_stack_api.providers.datatypes import ProviderSpec, InlineProviderSpec
86
+ from llama_stack_api.datatypes import Api
87
+
88
+
89
+ # Use protocol definitions for type checking
90
+ class MyInferenceProvider(Inference):
91
+ async def chat_completion(self, request: ChatCompletionRequest):
92
+ # Your implementation
93
+ pass
94
+
95
+
96
+ # Define provider specifications
97
+ my_provider_spec = InlineProviderSpec(
98
+ api=Api.inference,
99
+ provider_type="inline::my-provider",
100
+ pip_packages=["my-dependencies"],
101
+ module="my_package.providers.inference",
102
+ config_class="my_package.providers.inference.MyConfig",
103
+ )
104
+ ```
105
+
106
+ ## Relationship to llama-stack
107
+
108
+ The main `llama-stack` package depends on `llama-stack-api` and provides:
109
+ - Full server implementation
110
+ - Built-in provider implementations
111
+ - CLI tools for running and managing stacks
112
+ - Runtime provider resolution and orchestration
113
+
114
+ ## Contributing
115
+
116
+ See the main [Llama Stack repository](https://github.com/llamastack/llama-stack) for contribution guidelines.
117
+
118
+ ## License
119
+
120
+ MIT License - see LICENSE file for details.
121
+
122
+ ## Links
123
+
124
+ - [Main Llama Stack Repository](https://github.com/llamastack/llama-stack)
125
+ - [Documentation](https://llamastack.ai/)
126
+ - [Client Library](https://pypi.org/project/llama-stack-client/)
@@ -0,0 +1,7 @@
1
+ README.md
2
+ pyproject.toml
3
+ llama_stack_api.egg-info/PKG-INFO
4
+ llama_stack_api.egg-info/SOURCES.txt
5
+ llama_stack_api.egg-info/dependency_links.txt
6
+ llama_stack_api.egg-info/requires.txt
7
+ llama_stack_api.egg-info/top_level.txt
@@ -0,0 +1,5 @@
1
+ fastapi<1.0,>=0.115.0
2
+ pydantic>=2.11.9
3
+ jsonschema
4
+ opentelemetry-sdk>=1.30.0
5
+ opentelemetry-exporter-otlp-proto-http>=1.30.0
@@ -0,0 +1,83 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [tool.uv]
6
+ required-version = ">=0.7.0"
7
+
8
+ [project]
9
+ name = "llama-stack-api"
10
+ version = "0.4.0"
11
+ authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
12
+ description = "API and Provider specifications for Llama Stack - lightweight package with protocol definitions and provider specs"
13
+ readme = "README.md"
14
+ requires-python = ">=3.12"
15
+ license = { "text" = "MIT" }
16
+ classifiers = [
17
+ "License :: OSI Approved :: MIT License",
18
+ "Programming Language :: Python :: 3",
19
+ "Operating System :: OS Independent",
20
+ "Intended Audience :: Developers",
21
+ "Intended Audience :: Information Technology",
22
+ "Intended Audience :: Science/Research",
23
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
24
+ "Topic :: Scientific/Engineering :: Information Analysis",
25
+ ]
26
+ dependencies = [
27
+ "fastapi>=0.115.0,<1.0",
28
+ "pydantic>=2.11.9",
29
+ "jsonschema",
30
+ "opentelemetry-sdk>=1.30.0",
31
+ "opentelemetry-exporter-otlp-proto-http>=1.30.0",
32
+ ]
33
+
34
+ [project.urls]
35
+ Homepage = "https://github.com/llamastack/llama-stack"
36
+
37
+ [tool.setuptools.packages.find]
38
+ where = ["."]
39
+ include = ["llama_stack_api", "llama_stack_api.*"]
40
+
41
+ [tool.setuptools.package-data]
42
+ llama_stack_api = ["py.typed"]
43
+
44
+ [tool.ruff]
45
+ line-length = 120
46
+
47
+ [tool.ruff.lint]
48
+ select = [
49
+ "UP", # pyupgrade
50
+ "B", # flake8-bugbear
51
+ "B9", # flake8-bugbear subset
52
+ "C", # comprehensions
53
+ "E", # pycodestyle
54
+ "F", # Pyflakes
55
+ "N", # Naming
56
+ "W", # Warnings
57
+ "DTZ", # datetime rules
58
+ "I", # isort (imports order)
59
+ "RUF001", # Checks for ambiguous Unicode characters in strings
60
+ "RUF002", # Checks for ambiguous Unicode characters in docstrings
61
+ "RUF003", # Checks for ambiguous Unicode characters in comments
62
+ "PLC2401", # Checks for the use of non-ASCII characters in variable names
63
+ ]
64
+ ignore = [
65
+ # The following ignores are desired by the project maintainers.
66
+ "E402", # Module level import not at top of file
67
+ "E501", # Line too long
68
+ "F405", # Maybe undefined or defined from star import
69
+ "C408", # Ignored because we like the dict keyword argument syntax
70
+ "N812", # Ignored because import torch.nn.functional as F is PyTorch convention
71
+
72
+ # These are the additional ones we started ignoring after moving to ruff. We should look into each one of them later.
73
+ "C901", # Complexity of the function is too high
74
+ ]
75
+ unfixable = [
76
+ "PLE2515",
77
+ ] # Do not fix this automatically since ruff will replace the zero-width space with \u200b - let's do it manually
78
+
79
+ [tool.ruff.lint.per-file-ignores]
80
+ "llama_stack_api/apis/**/__init__.py" = ["F403"]
81
+
82
+ [tool.ruff.lint.pep8-naming]
83
+ classmethod-decorators = ["classmethod", "pydantic.field_validator"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+