Llimona 0.1.0.dev0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. llimona-0.1.0.dev0/PKG-INFO +237 -0
  2. llimona-0.1.0.dev0/README.md +221 -0
  3. llimona-0.1.0.dev0/pyproject.toml +189 -0
  4. llimona-0.1.0.dev0/src/llimona/__init__.py +4 -0
  5. llimona-0.1.0.dev0/src/llimona/addons.py +150 -0
  6. llimona-0.1.0.dev0/src/llimona/app.py +386 -0
  7. llimona-0.1.0.dev0/src/llimona/cli/__init__.py +64 -0
  8. llimona-0.1.0.dev0/src/llimona/cli/addons.py +9 -0
  9. llimona-0.1.0.dev0/src/llimona/cli/openai.py +75 -0
  10. llimona-0.1.0.dev0/src/llimona/cli/providers.py +85 -0
  11. llimona-0.1.0.dev0/src/llimona/cli/utils.py +20 -0
  12. llimona-0.1.0.dev0/src/llimona/component.py +20 -0
  13. llimona-0.1.0.dev0/src/llimona/config/__init__.py +0 -0
  14. llimona-0.1.0.dev0/src/llimona/config/app.py +123 -0
  15. llimona-0.1.0.dev0/src/llimona/config/yaml.py +189 -0
  16. llimona-0.1.0.dev0/src/llimona/context.py +279 -0
  17. llimona-0.1.0.dev0/src/llimona/id_builders.py +172 -0
  18. llimona-0.1.0.dev0/src/llimona/interfaces/__init__.py +0 -0
  19. llimona-0.1.0.dev0/src/llimona/interfaces/openai/__init__.py +44 -0
  20. llimona-0.1.0.dev0/src/llimona/interfaces/openai/mappers.py +62 -0
  21. llimona-0.1.0.dev0/src/llimona/interfaces/openai/models/__init__.py +0 -0
  22. llimona-0.1.0.dev0/src/llimona/interfaces/openai/models/api_models.py +153 -0
  23. llimona-0.1.0.dev0/src/llimona/interfaces/openai/models/api_responses.py +179 -0
  24. llimona-0.1.0.dev0/src/llimona/interfaces/openai/models/content.py +214 -0
  25. llimona-0.1.0.dev0/src/llimona/interfaces/openai/models/enums.py +68 -0
  26. llimona-0.1.0.dev0/src/llimona/interfaces/openai/models/events.py +195 -0
  27. llimona-0.1.0.dev0/src/llimona/interfaces/openai/models/response.py +247 -0
  28. llimona-0.1.0.dev0/src/llimona/interfaces/openai/models/tools.py +64 -0
  29. llimona-0.1.0.dev0/src/llimona/models/__init__.py +0 -0
  30. llimona-0.1.0.dev0/src/llimona/models/common.py +63 -0
  31. llimona-0.1.0.dev0/src/llimona/provider_loaders.py +193 -0
  32. llimona-0.1.0.dev0/src/llimona/providers.py +265 -0
  33. llimona-0.1.0.dev0/src/llimona/py.typed +0 -0
  34. llimona-0.1.0.dev0/src/llimona/registries.py +80 -0
  35. llimona-0.1.0.dev0/src/llimona/sensors.py +360 -0
  36. llimona-0.1.0.dev0/src/llimona/utils.py +54 -0
@@ -0,0 +1,237 @@
1
+ Metadata-Version: 2.3
2
+ Name: Llimona
3
+ Version: 0.1.0.dev0
4
+ Summary: Open and modular framework for building observable LLM gateways with OpenAI-compatible APIs and pluggable providers.
5
+ Author: Alfred
6
+ Author-email: Alfred <alfred82santa@gmail.com>
7
+ Requires-Dist: click>=8.3.1
8
+ Requires-Dist: litellm>=1.81.7
9
+ Requires-Dist: pydantic>=2.12.5
10
+ Requires-Dist: pydantic-settings>=2.13.0
11
+ Requires-Dist: pydantic-views>=0.3.0
12
+ Requires-Dist: pymongo>=4.16.0
13
+ Requires-Dist: pyyaml>=6.0.3
14
+ Requires-Python: >=3.14
15
+ Description-Content-Type: text/markdown
16
+
17
+ # Llimona
18
+
19
+ Llimona is an open and modular Python framework for building production-ready LLM gateways.
20
+ It provides OpenAI-compatible APIs, provider-aware routing, and an extensible plugin model for integrating multiple backends behind a single interface.
21
+
22
+ By keeping providers as addons, Llimona stays lightweight at its core while enabling deployments to include only the integrations, policies, and observability components they actually need.
23
+
24
+ ## Key Features
25
+
26
+ - OpenAI-compatible service interfaces (currently Responses and Models).
27
+ - Provider routing using the `provider_name/model_name` naming convention.
28
+ - Addon-based extensibility through Python entry points (`llimona.addon`).
29
+ - Typed YAML configuration with Pydantic validation.
30
+ - Request `Context` propagation with actor/origin metadata, constraints, and sub-context trees.
31
+ - Sensor support for metrics such as request counters and elapsed time, making request execution observable.
32
+
33
+ ## Architecture
34
+
35
+ [Architecture documentation](docs/arch.md)
36
+
37
+ ## Requirements
38
+
39
+ - Python `>= 3.14`
40
+ - `uv` (recommended)
41
+
42
+ ## Installation
43
+
44
+ ### Install dependencies for local development
45
+
46
+ ```bash
47
+ uv sync
48
+ ```
49
+
50
+ ### Install the core package
51
+
52
+ ```bash
53
+ uv pip install .
54
+ ```
55
+
56
+ ### Install an addon package
57
+
58
+ ```bash
59
+ uv pip install ./addons/llimona_azure_openai
60
+ ```
61
+
62
+ ## Quick Start
63
+
64
+ ### 1) Create an app config
65
+
66
+ Example (`test_config/app.yaml`):
67
+
68
+ ```yaml
69
+ provider_addons:
70
+ - azure_openai
71
+ provider_loaders:
72
+ - type: autodiscovery_dirs
73
+ src: !path .
74
+ ```
75
+
76
+ ### 2) Create a provider directory with `provider.yaml`
77
+
78
+ Example (`example_config/azure_1/provider.yaml`):
79
+
80
+ ```yaml
81
+ type: azure_openai
82
+ name: azure_1
83
+ display_name: Azure Example 1
84
+ owner_id: 444444-222-333-222 # Not used, just for future purposes
85
+ base_url: !envvar AZURE_OPENAI_1_BASE_URL
86
+ credentials:
87
+ api_key: !envvar AZURE_OPENAI_1_API_KEY
88
+ services:
89
+ - type: openai_responses
90
+ - type: openai_models
91
+ models:
92
+ - name: gpt-4o-mini
93
+ allowed_services:
94
+ - openai_responses
95
+ ```
96
+
97
+ ### 3) Run a request
98
+
99
+ ```bash
100
+ uv run llimona app --config-file example_config/app.yaml openai responses create azure_1/gpt-4o-mini "Hello" --stream
101
+ ```
102
+
103
+ ### 4) Observe sensor metrics
104
+
105
+ After the request completes, Llimona prints sensor values that make execution observable:
106
+
107
+ ```text
108
+ Sensor value: elapsed_time=0.606314 (Elapsed time of the request.)
109
+ Sensor value: request_count=1 (Number of requests being processed for the sensor request_count.)
110
+ Sensor value: request_per_unit_of_time=1 (Number of requests in the last 0:01:00.)
111
+ Sensor value: request_per_window_of_time=1 (Number of requests until the next reset.)
112
+ ```
113
+
114
+ ## CLI Usage
115
+
116
+ ### Top-level help
117
+
118
+ ```bash
119
+ llimona --help
120
+ ```
121
+
122
+ ### List discovered addons
123
+
124
+ ```bash
125
+ llimona addons
126
+ ```
127
+
128
+ ### Run commands with an app config
129
+
130
+ ```bash
131
+ llimona app --config-file <path-to-app.yaml> <command>
132
+ ```
133
+
134
+ ### Providers
135
+
136
+ ```bash
137
+ # list all providers
138
+ llimona app --config-file <cfg> providers
139
+
140
+ # inspect one provider
141
+ llimona app --config-file <cfg> providers <provider_name>
142
+
143
+ # list models in one provider
144
+ llimona app --config-file <cfg> providers <provider_name> models
145
+ ```
146
+
147
+ ### OpenAI-compatible interface commands
148
+
149
+ ```bash
150
+ # create a response
151
+ llimona app --config-file <cfg> openai responses create <provider>/<model> "Prompt"
152
+
153
+ # streaming response
154
+ llimona app --config-file <cfg> openai responses create <provider>/<model> "Prompt" --stream
155
+
156
+ # list models (global or filtered by provider)
157
+ llimona app --config-file <cfg> openai models list
158
+ llimona app --config-file <cfg> openai models list <provider_name>
159
+ ```
160
+
161
+ ## Configuration Overview
162
+
163
+ The app configuration supports these top-level fields:
164
+
165
+ - `provider_addons`: provider addons to register.
166
+ - `provider_loader_addons`: provider-loader addons to register.
167
+ - `sensor_addons`: sensor addons to register.
168
+ - `id_builder`: optional ID builder configuration.
169
+ - `provider_loaders`: loader definitions.
170
+
171
+ Built-in provider loader:
172
+
173
+ - `autodiscovery_dirs`: scans child directories under `src`, reads `provider.yaml`, and optionally merges definitions from `models/*.yaml`, `services/*.yaml`, and `sensors/*.yaml`.
174
+
175
+ ## Architecture Summary
176
+
177
+ Llimona receives OpenAI-compatible requests, decomposes model IDs, routes to the appropriate provider, and maps provider-specific responses back to interface models.
178
+
179
+ Every call flows through a `Context` object, which can carry:
180
+
181
+ - action metadata (`provider`, `service`, `service_action`, `model`)
182
+ - actor and origin information
183
+ - conversation metadata
184
+ - constraints
185
+ - collected sensor values
186
+
187
+ Routing strategies can create sub-contexts, enabling per-branch observability and post-execution failure inspection.
188
+
189
+ Sensors make the platform observable by exposing execution metrics across the full request context tree.
190
+
191
+ For full technical details, see `docs/arch.md`.
192
+
193
+ ## Addons in This Repository
194
+
195
+ - `addons/llimona_azure_openai`: Azure OpenAI provider addon.
196
+ - `addons/llimona_smart_provider`: smart/virtual provider routing addon.
197
+
198
+ ## Development
199
+
200
+ ### Install development tools
201
+
202
+ ```bash
203
+ uv sync --group dev
204
+ ```
205
+
206
+ ### Run tests
207
+
208
+ ```bash
209
+ uv run pytest
210
+ ```
211
+
212
+ ### Lint and format
213
+
214
+ ```bash
215
+ uv run ruff check .
216
+ uv run ruff format .
217
+ ```
218
+
219
+ ## Branching and Versioning
220
+
221
+ The repository follows a GitFlow-like model with:
222
+
223
+ - `main` as the default integration branch
224
+ - `feat/*`, `fix/*`, and `chore/*` working branches
225
+ - squash-merge pull requests
226
+ - SemVer/PEP 440 release semantics
227
+
228
+ See [branching model document](BRANCHING_MODEL.md) for the complete policy.
229
+
230
+ ## Security Notes
231
+
232
+ - Do not commit real API keys or secrets in provider files.
233
+ - Inject credentials at runtime through your deployment environment.
234
+
235
+ ## License
236
+
237
+ This project is licensed under the GNU AFFERO GENERAL PUBLIC LICENSE. See `LICENSE` for details.
@@ -0,0 +1,221 @@
1
+ # Llimona
2
+
3
+ Llimona is an open and modular Python framework for building production-ready LLM gateways.
4
+ It provides OpenAI-compatible APIs, provider-aware routing, and an extensible plugin model for integrating multiple backends behind a single interface.
5
+
6
+ By keeping providers as addons, Llimona stays lightweight at its core while enabling deployments to include only the integrations, policies, and observability components they actually need.
7
+
8
+ ## Key Features
9
+
10
+ - OpenAI-compatible service interfaces (currently Responses and Models).
11
+ - Provider routing using the `provider_name/model_name` naming convention.
12
+ - Addon-based extensibility through Python entry points (`llimona.addon`).
13
+ - Typed YAML configuration with Pydantic validation.
14
+ - Request `Context` propagation with actor/origin metadata, constraints, and sub-context trees.
15
+ - Sensor support for metrics such as request counters and elapsed time, making request execution observable.
16
+
17
+ ## Architecture
18
+
19
+ [Architecture documentation](docs/arch.md)
20
+
21
+ ## Requirements
22
+
23
+ - Python `>= 3.14`
24
+ - `uv` (recommended)
25
+
26
+ ## Installation
27
+
28
+ ### Install dependencies for local development
29
+
30
+ ```bash
31
+ uv sync
32
+ ```
33
+
34
+ ### Install the core package
35
+
36
+ ```bash
37
+ uv pip install .
38
+ ```
39
+
40
+ ### Install an addon package
41
+
42
+ ```bash
43
+ uv pip install ./addons/llimona_azure_openai
44
+ ```
45
+
46
+ ## Quick Start
47
+
48
+ ### 1) Create an app config
49
+
50
+ Example (`test_config/app.yaml`):
51
+
52
+ ```yaml
53
+ provider_addons:
54
+ - azure_openai
55
+ provider_loaders:
56
+ - type: autodiscovery_dirs
57
+ src: !path .
58
+ ```
59
+
60
+ ### 2) Create a provider directory with `provider.yaml`
61
+
62
+ Example (`example_config/azure_1/provider.yaml`):
63
+
64
+ ```yaml
65
+ type: azure_openai
66
+ name: azure_1
67
+ display_name: Azure Example 1
68
+ owner_id: 444444-222-333-222 # Not used, just for future purposes
69
+ base_url: !envvar AZURE_OPENAI_1_BASE_URL
70
+ credentials:
71
+ api_key: !envvar AZURE_OPENAI_1_API_KEY
72
+ services:
73
+ - type: openai_responses
74
+ - type: openai_models
75
+ models:
76
+ - name: gpt-4o-mini
77
+ allowed_services:
78
+ - openai_responses
79
+ ```
80
+
81
+ ### 3) Run a request
82
+
83
+ ```bash
84
+ uv run llimona app --config-file example_config/app.yaml openai responses create azure_1/gpt-4o-mini "Hello" --stream
85
+ ```
86
+
87
+ ### 4) Observe sensor metrics
88
+
89
+ After the request completes, Llimona prints sensor values that make execution observable:
90
+
91
+ ```text
92
+ Sensor value: elapsed_time=0.606314 (Elapsed time of the request.)
93
+ Sensor value: request_count=1 (Number of requests being processed for the sensor request_count.)
94
+ Sensor value: request_per_unit_of_time=1 (Number of requests in the last 0:01:00.)
95
+ Sensor value: request_per_window_of_time=1 (Number of requests until the next reset.)
96
+ ```
97
+
98
+ ## CLI Usage
99
+
100
+ ### Top-level help
101
+
102
+ ```bash
103
+ llimona --help
104
+ ```
105
+
106
+ ### List discovered addons
107
+
108
+ ```bash
109
+ llimona addons
110
+ ```
111
+
112
+ ### Run commands with an app config
113
+
114
+ ```bash
115
+ llimona app --config-file <path-to-app.yaml> <command>
116
+ ```
117
+
118
+ ### Providers
119
+
120
+ ```bash
121
+ # list all providers
122
+ llimona app --config-file <cfg> providers
123
+
124
+ # inspect one provider
125
+ llimona app --config-file <cfg> providers <provider_name>
126
+
127
+ # list models in one provider
128
+ llimona app --config-file <cfg> providers <provider_name> models
129
+ ```
130
+
131
+ ### OpenAI-compatible interface commands
132
+
133
+ ```bash
134
+ # create a response
135
+ llimona app --config-file <cfg> openai responses create <provider>/<model> "Prompt"
136
+
137
+ # streaming response
138
+ llimona app --config-file <cfg> openai responses create <provider>/<model> "Prompt" --stream
139
+
140
+ # list models (global or filtered by provider)
141
+ llimona app --config-file <cfg> openai models list
142
+ llimona app --config-file <cfg> openai models list <provider_name>
143
+ ```
144
+
145
+ ## Configuration Overview
146
+
147
+ The app configuration supports these top-level fields:
148
+
149
+ - `provider_addons`: provider addons to register.
150
+ - `provider_loader_addons`: provider-loader addons to register.
151
+ - `sensor_addons`: sensor addons to register.
152
+ - `id_builder`: optional ID builder configuration.
153
+ - `provider_loaders`: loader definitions.
154
+
155
+ Built-in provider loader:
156
+
157
+ - `autodiscovery_dirs`: scans child directories under `src`, reads `provider.yaml`, and optionally merges definitions from `models/*.yaml`, `services/*.yaml`, and `sensors/*.yaml`.
158
+
159
+ ## Architecture Summary
160
+
161
+ Llimona receives OpenAI-compatible requests, decomposes model IDs, routes to the appropriate provider, and maps provider-specific responses back to interface models.
162
+
163
+ Every call flows through a `Context` object, which can carry:
164
+
165
+ - action metadata (`provider`, `service`, `service_action`, `model`)
166
+ - actor and origin information
167
+ - conversation metadata
168
+ - constraints
169
+ - collected sensor values
170
+
171
+ Routing strategies can create sub-contexts, enabling per-branch observability and post-execution failure inspection.
172
+
173
+ Sensors make the platform observable by exposing execution metrics across the full request context tree.
174
+
175
+ For full technical details, see `docs/arch.md`.
176
+
177
+ ## Addons in This Repository
178
+
179
+ - `addons/llimona_azure_openai`: Azure OpenAI provider addon.
180
+ - `addons/llimona_smart_provider`: smart/virtual provider routing addon.
181
+
182
+ ## Development
183
+
184
+ ### Install development tools
185
+
186
+ ```bash
187
+ uv sync --group dev
188
+ ```
189
+
190
+ ### Run tests
191
+
192
+ ```bash
193
+ uv run pytest
194
+ ```
195
+
196
+ ### Lint and format
197
+
198
+ ```bash
199
+ uv run ruff check .
200
+ uv run ruff format .
201
+ ```
202
+
203
+ ## Branching and Versioning
204
+
205
+ The repository follows a GitFlow-like model with:
206
+
207
+ - `main` as the default integration branch
208
+ - `feat/*`, `fix/*`, and `chore/*` working branches
209
+ - squash-merge pull requests
210
+ - SemVer/PEP 440 release semantics
211
+
212
+ See [branching model document](BRANCHING_MODEL.md) for the complete policy.
213
+
214
+ ## Security Notes
215
+
216
+ - Do not commit real API keys or secrets in provider files.
217
+ - Inject credentials at runtime through your deployment environment.
218
+
219
+ ## License
220
+
221
+ This project is licensed under the GNU AFFERO GENERAL PUBLIC LICENSE. See `LICENSE` for details.
@@ -0,0 +1,189 @@
1
+ [project]
2
+ name = "Llimona"
3
+ version = "0.1.0.dev0"
4
+ description = "Open and modular framework for building observable LLM gateways with OpenAI-compatible APIs and pluggable providers."
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "Alfred", email = "alfred82santa@gmail.com" }
8
+ ]
9
+ requires-python = ">=3.14"
10
+ dependencies = [
11
+ "click>=8.3.1",
12
+ "litellm>=1.81.7",
13
+ "pydantic>=2.12.5",
14
+ "pydantic-settings>=2.13.0",
15
+ "pydantic-views>=0.3.0",
16
+ "pymongo>=4.16.0",
17
+ "pyyaml>=6.0.3",
18
+ ]
19
+
20
+ [project.scripts]
21
+ llimona = "llimona.cli:llimona"
22
+
23
+ [build-system]
24
+ requires = ["uv_build>=0.9.26,<0.12.0"]
25
+ build-backend = "uv_build"
26
+
27
+ [dependency-groups]
28
+ cron = [
29
+ "cronsim>=2.7",
30
+ ]
31
+ crypt = [
32
+ "pycryptodome>=3.23.0",
33
+ ]
34
+ dev = [
35
+ "mypy>=1.19.1",
36
+ "poethepoet>=0.42.1",
37
+ "pytest>=9.0.2",
38
+ "pytest-asyncio>=1.3.0",
39
+ "pytest-cov>=7.0.0",
40
+ "ruff>=0.14.14",
41
+ "types-croniter>=6.0.0.20250809",
42
+ "types-pyyaml>=6.0.12.20250915",
43
+ ]
44
+ timezone = [
45
+ "pydantic-extra-types>=2.11.0",
46
+ "pydantic[timezone]>=2.12.5",
47
+ ]
48
+
49
+ [tool.ruff]
50
+ line-length = 120
51
+ target-version = "py314"
52
+ exclude = [".venv", "dist", "build"]
53
+ force-exclude = true
54
+
55
+ [tool.ruff.lint]
56
+ # Rules to enforce
57
+ select = [
58
+ "E", # pycodestyle errors
59
+ "W", # pycodestyle warnings
60
+ "F", # Pyflakes
61
+ "I", # isort
62
+ "UP", # pyupgrade
63
+ "N", # pep8-naming
64
+ "B", # flake8-bugbear
65
+ "A", # flake8-builtins
66
+ "C4", # flake8-comprehensions
67
+ "PT", # flake8-pytest-style
68
+ "RUF", # Ruff-specific rules
69
+ ]
70
+
71
+
72
+ [tool.ruff.format]
73
+ quote-style = "single"
74
+ indent-style = "space"
75
+ skip-magic-trailing-comma = false
76
+ line-ending = "auto"
77
+
78
+
79
+ [tool.uv.workspace]
80
+ members = [
81
+ "addons/llimona_azure_openai",
82
+ "addons/llimona_smart_provider",
83
+ "addons/llimona_mock_provider",
84
+ "addons/llimona_opentelemetry",
85
+ ]
86
+
87
+ [tool.coverage.run]
88
+ omit = [".venv/*", "tests/**", "src/**/cli/**"]
89
+ source = ["src"]
90
+ branch = true
91
+ relative_files = false
92
+
93
+ [tool.coverage.report]
94
+ # Regexes for lines to exclude from consideration
95
+ exclude_also = [
96
+ # Don't complain about missing debug-only code:
97
+ "def __repr__",
98
+ "if self\\.debug",
99
+
100
+ # Don't complain if tests don't hit defensive assertion code:
101
+ "raise AssertionError",
102
+ "raise NotImplementedError",
103
+
104
+ # Don't complain if non-runnable code isn't run:
105
+ "if 0:",
106
+ "if __name__ == .__main__.:",
107
+
108
+ # Don't complain about abstract methods, they aren't run:
109
+ "@(abc\\.)?abstractmethod",
110
+
111
+ # Don't complain type checking imports, they aren't run:
112
+ "if TYPE_CHECKING",
113
+
114
+ # Don't complain overloads, they aren't run:
115
+ "@overload"
116
+ ]
117
+
118
+ [tool.coverage.paths]
119
+ source = ["src/"]
120
+ omit = [
121
+ "src/**/cli",
122
+ "tests",
123
+ ]
124
+
125
+ [tool.pytest.ini_options]
126
+ minversion = "8.0"
127
+ asyncio_mode = "auto"
128
+ addopts = ["--strict-markers", "--strict-config"]
129
+ testpaths = ["tests"]
130
+ pythonpath = ["src"]
131
+
132
+ [tool.poe.tasks]
133
+ build = "uv build"
134
+ test = "pytest -v -s --junitxml=pytest.xml --cov src --cov-report term-missing --cov-report xml:coverage.xml --cov-fail-under=85 --exitfirst"
135
+ typecheck = "mypy src"
136
+ format = "ruff format src tests"
137
+ "format:check" = "ruff format src tests --check"
138
+ lint = "ruff check src tests"
139
+ "lint:fix" = "ruff check src tests --fix"
140
+
141
+ [tool.poe.tasks.clean]
142
+ cmd = """
143
+ rm -rf ./**/src/**/*.pyc
144
+ ./**/src/**/*.pyo
145
+ ./**/src/**/__pycache__
146
+ ./**/tests/**/*.pyc
147
+ ./**/tests/**/*.pyo
148
+ ./**/tests/**/__pycache__
149
+ .coverage
150
+ coverage.xml
151
+ pytest.xml
152
+ .*_cache
153
+ """
154
+ empty_glob = "null"
155
+
156
+ [tool.poe.tasks.validate]
157
+ help = "Execute all validations: typecheck, lint, and test"
158
+ sequence = [
159
+ "typecheck",
160
+ "lint",
161
+ "test"
162
+ ]
163
+
164
+ [tool.poe.tasks.prebuild]
165
+ help = "Pre-build checks: typecheck and lint"
166
+ sequence = [
167
+ "typecheck",
168
+ "lint"
169
+ ]
170
+
171
+ [tool.poe.tasks."clean:all"]
172
+ help = "Complete cleanup: cache, coverage, and temporary files"
173
+ sequence = [
174
+ "clean",
175
+ { cmd = """rm -rf dist
176
+ build
177
+ .coverage
178
+ coverage.xml
179
+ pytest.xml
180
+ **/.*_cache
181
+ """},
182
+ ]
183
+
184
+ [tool.poe.tasks.fix]
185
+ help = "Fix linting and formatting issues, then run tests"
186
+ sequence = [
187
+ "format",
188
+ "lint:fix",
189
+ ]
@@ -0,0 +1,4 @@
1
+ def init():
2
+ from .addons import Addons
3
+
4
+ Addons().register_all_providers()