abstractvision 0.1.0__tar.gz → 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractvision-0.2.1/PKG-INFO +243 -0
- abstractvision-0.2.1/README.md +181 -0
- abstractvision-0.2.1/pyproject.toml +116 -0
- abstractvision-0.2.1/src/abstractvision/__init__.py +22 -0
- abstractvision-0.2.1/src/abstractvision/__main__.py +8 -0
- abstractvision-0.2.1/src/abstractvision/artifacts.py +320 -0
- abstractvision-0.2.1/src/abstractvision/assets/vision_model_capabilities.json +406 -0
- abstractvision-0.2.1/src/abstractvision/backends/__init__.py +43 -0
- abstractvision-0.2.1/src/abstractvision/backends/base_backend.py +63 -0
- abstractvision-0.2.1/src/abstractvision/backends/huggingface_diffusers.py +1503 -0
- abstractvision-0.2.1/src/abstractvision/backends/openai_compatible.py +325 -0
- abstractvision-0.2.1/src/abstractvision/backends/stable_diffusion_cpp.py +751 -0
- abstractvision-0.2.1/src/abstractvision/cli.py +778 -0
- abstractvision-0.2.1/src/abstractvision/errors.py +19 -0
- abstractvision-0.2.1/src/abstractvision/integrations/__init__.py +5 -0
- abstractvision-0.2.1/src/abstractvision/integrations/abstractcore.py +263 -0
- abstractvision-0.2.1/src/abstractvision/integrations/abstractcore_plugin.py +193 -0
- abstractvision-0.2.1/src/abstractvision/model_capabilities.py +255 -0
- abstractvision-0.2.1/src/abstractvision/types.py +95 -0
- abstractvision-0.2.1/src/abstractvision/vision_manager.py +115 -0
- abstractvision-0.2.1/src/abstractvision.egg-info/PKG-INFO +243 -0
- abstractvision-0.2.1/src/abstractvision.egg-info/SOURCES.txt +37 -0
- abstractvision-0.2.1/src/abstractvision.egg-info/entry_points.txt +5 -0
- abstractvision-0.2.1/src/abstractvision.egg-info/requires.txt +44 -0
- abstractvision-0.2.1/tests/test_abstractcore_plugin.py +113 -0
- abstractvision-0.2.1/tests/test_abstractcore_tool_integration.py +121 -0
- abstractvision-0.2.1/tests/test_artifact_outputs.py +74 -0
- abstractvision-0.2.1/tests/test_capabilities_schema_validation.py +105 -0
- abstractvision-0.2.1/tests/test_capability_registry_coverage.py +44 -0
- abstractvision-0.2.1/tests/test_cli_smoke.py +48 -0
- abstractvision-0.2.1/tests/test_huggingface_diffusers_backend.py +553 -0
- abstractvision-0.2.1/tests/test_manager_capability_checks.py +84 -0
- abstractvision-0.2.1/tests/test_openai_compatible_backend.py +90 -0
- abstractvision-0.2.1/tests/test_stable_diffusion_cpp_backend.py +212 -0
- abstractvision-0.2.1/tests/test_vision_model_capabilities.py +53 -0
- abstractvision-0.1.0/PKG-INFO +0 -65
- abstractvision-0.1.0/README.md +0 -37
- abstractvision-0.1.0/abstractvision/__init__.py +0 -7
- abstractvision-0.1.0/abstractvision.egg-info/PKG-INFO +0 -65
- abstractvision-0.1.0/abstractvision.egg-info/SOURCES.txt +0 -8
- abstractvision-0.1.0/setup.py +0 -32
- {abstractvision-0.1.0 → abstractvision-0.2.1}/LICENSE +0 -0
- {abstractvision-0.1.0 → abstractvision-0.2.1}/setup.cfg +0 -0
- {abstractvision-0.1.0 → abstractvision-0.2.1/src}/abstractvision.egg-info/dependency_links.txt +0 -0
- {abstractvision-0.1.0 → abstractvision-0.2.1/src}/abstractvision.egg-info/top_level.txt +0 -0
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: abstractvision
|
|
3
|
+
Version: 0.2.1
|
|
4
|
+
Summary: Model-agnostic generative vision abstractions (image/video) for the Abstract ecosystem
|
|
5
|
+
Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/abstractcore/abstractvision
|
|
8
|
+
Project-URL: Repository, https://github.com/abstractcore/abstractvision
|
|
9
|
+
Classifier: Development Status :: 3 - Alpha
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Classifier: Operating System :: OS Independent
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Topic :: Multimedia
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Requires-Python: >=3.8
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
License-File: LICENSE
|
|
23
|
+
Requires-Dist: diffusers>=0.36.0
|
|
24
|
+
Requires-Dist: torch<3.0.0,>=2.0
|
|
25
|
+
Requires-Dist: transformers<6.0.0,>=4.0
|
|
26
|
+
Requires-Dist: accelerate>=0.0
|
|
27
|
+
Requires-Dist: safetensors>=0.0
|
|
28
|
+
Requires-Dist: sentencepiece>=0.1.99
|
|
29
|
+
Requires-Dist: protobuf>=3.20.0
|
|
30
|
+
Requires-Dist: einops>=0.7.0
|
|
31
|
+
Requires-Dist: peft>=0.10.0
|
|
32
|
+
Requires-Dist: Pillow>=9.0
|
|
33
|
+
Requires-Dist: stable-diffusion-cpp-python>=0.4.2
|
|
34
|
+
Provides-Extra: openai-compatible
|
|
35
|
+
Provides-Extra: sdcpp
|
|
36
|
+
Requires-Dist: stable-diffusion-cpp-python>=0.4.2; extra == "sdcpp"
|
|
37
|
+
Provides-Extra: huggingface
|
|
38
|
+
Requires-Dist: diffusers>=0.36.0; extra == "huggingface"
|
|
39
|
+
Requires-Dist: torch>=2.0; extra == "huggingface"
|
|
40
|
+
Requires-Dist: transformers>=4.0; extra == "huggingface"
|
|
41
|
+
Requires-Dist: accelerate>=0.0; extra == "huggingface"
|
|
42
|
+
Requires-Dist: safetensors>=0.0; extra == "huggingface"
|
|
43
|
+
Requires-Dist: Pillow>=9.0; extra == "huggingface"
|
|
44
|
+
Provides-Extra: local
|
|
45
|
+
Requires-Dist: stable-diffusion-cpp-python>=0.4.2; extra == "local"
|
|
46
|
+
Requires-Dist: diffusers>=0.36.0; extra == "local"
|
|
47
|
+
Requires-Dist: torch>=2.0; extra == "local"
|
|
48
|
+
Requires-Dist: transformers>=4.0; extra == "local"
|
|
49
|
+
Requires-Dist: accelerate>=0.0; extra == "local"
|
|
50
|
+
Requires-Dist: safetensors>=0.0; extra == "local"
|
|
51
|
+
Requires-Dist: Pillow>=9.0; extra == "local"
|
|
52
|
+
Provides-Extra: huggingface-dev
|
|
53
|
+
Requires-Dist: diffusers>=0.36.0; extra == "huggingface-dev"
|
|
54
|
+
Requires-Dist: torch>=2.0; extra == "huggingface-dev"
|
|
55
|
+
Requires-Dist: transformers>=5.0; extra == "huggingface-dev"
|
|
56
|
+
Requires-Dist: accelerate>=0.0; extra == "huggingface-dev"
|
|
57
|
+
Requires-Dist: safetensors>=0.0; extra == "huggingface-dev"
|
|
58
|
+
Requires-Dist: Pillow>=9.0; extra == "huggingface-dev"
|
|
59
|
+
Provides-Extra: abstractcore
|
|
60
|
+
Requires-Dist: abstractcore>=2.0.0; extra == "abstractcore"
|
|
61
|
+
Dynamic: license-file
|
|
62
|
+
|
|
63
|
+
# AbstractVision
|
|
64
|
+
|
|
65
|
+
Model-agnostic generative vision API (images, optional video) for Python and the Abstract* ecosystem.
|
|
66
|
+
|
|
67
|
+
## What you get
|
|
68
|
+
|
|
69
|
+
- A stable task API: `VisionManager` (`src/abstractvision/vision_manager.py`)
|
|
70
|
+
- A packaged capability registry (“what models can do”): `VisionModelCapabilitiesRegistry` backed by `src/abstractvision/assets/vision_model_capabilities.json`
|
|
71
|
+
- Optional artifact-ref outputs (small JSON refs): `LocalAssetStore` / store adapters (`src/abstractvision/artifacts.py`)
|
|
72
|
+
- Built-in backends (`src/abstractvision/backends/`):
|
|
73
|
+
- OpenAI-compatible HTTP (`openai_compatible.py`)
|
|
74
|
+
- Local Diffusers (`huggingface_diffusers.py`)
|
|
75
|
+
- Local stable-diffusion.cpp / GGUF (`stable_diffusion_cpp.py`)
|
|
76
|
+
- CLI/REPL for manual testing: `abstractvision ...` (`src/abstractvision/cli.py`)
|
|
77
|
+
|
|
78
|
+
## Status (current backend support)
|
|
79
|
+
|
|
80
|
+
- Built-in backends implement: `text_to_image` and `image_to_image`.
|
|
81
|
+
- Video (`text_to_video`, `image_to_video`) is supported only via the OpenAI-compatible backend **when** endpoints are configured.
|
|
82
|
+
- `multi_view_image` is part of the public API (`VisionManager.generate_angles`) but no built-in backend implements it yet.
|
|
83
|
+
|
|
84
|
+
Details: `docs/reference/backends.md`.
|
|
85
|
+
|
|
86
|
+
## Installation
|
|
87
|
+
|
|
88
|
+
```bash
|
|
89
|
+
pip install abstractvision
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
Install optional integrations:
|
|
93
|
+
|
|
94
|
+
```bash
|
|
95
|
+
pip install "abstractvision[abstractcore]"
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
Some newer model pipelines may require Diffusers from GitHub `main` (see `docs/getting-started.md`):
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
pip install -U "abstractvision[huggingface-dev]"
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
For local dev (from a repo checkout):
|
|
105
|
+
|
|
106
|
+
```bash
|
|
107
|
+
pip install -e .
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
## Usage
|
|
111
|
+
|
|
112
|
+
Start here:
|
|
113
|
+
- Getting started: `docs/getting-started.md`
|
|
114
|
+
- FAQ: `docs/faq.md`
|
|
115
|
+
- API reference: `docs/api.md`
|
|
116
|
+
- Architecture: `docs/architecture.md`
|
|
117
|
+
- Docs index: `docs/README.md`
|
|
118
|
+
|
|
119
|
+
### Capability-driven model selection
|
|
120
|
+
|
|
121
|
+
```python
|
|
122
|
+
from abstractvision import VisionModelCapabilitiesRegistry
|
|
123
|
+
|
|
124
|
+
reg = VisionModelCapabilitiesRegistry()
|
|
125
|
+
assert reg.supports("Qwen/Qwen-Image-2512", "text_to_image")
|
|
126
|
+
|
|
127
|
+
print(reg.list_tasks())
|
|
128
|
+
print(reg.models_for_task("text_to_image"))
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### Backend wiring + generation (artifact outputs)
|
|
132
|
+
|
|
133
|
+
The default install is “batteries included” (Torch + Diffusers + stable-diffusion.cpp python bindings), but heavy
|
|
134
|
+
modules are imported lazily (see `src/abstractvision/backends/__init__.py`).
|
|
135
|
+
|
|
136
|
+
```python
|
|
137
|
+
from abstractvision import LocalAssetStore, VisionManager, VisionModelCapabilitiesRegistry, is_artifact_ref
|
|
138
|
+
from abstractvision.backends import OpenAICompatibleBackendConfig, OpenAICompatibleVisionBackend
|
|
139
|
+
|
|
140
|
+
reg = VisionModelCapabilitiesRegistry()
|
|
141
|
+
|
|
142
|
+
backend = OpenAICompatibleVisionBackend(
|
|
143
|
+
config=OpenAICompatibleBackendConfig(
|
|
144
|
+
base_url="http://localhost:1234/v1",
|
|
145
|
+
api_key="YOUR_KEY", # optional for local servers
|
|
146
|
+
model_id="REMOTE_MODEL", # optional (server-dependent)
|
|
147
|
+
)
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
vm = VisionManager(
|
|
151
|
+
backend=backend,
|
|
152
|
+
store=LocalAssetStore(), # enables artifact-ref outputs
|
|
153
|
+
model_id="zai-org/GLM-Image", # optional: capability gating
|
|
154
|
+
registry=reg, # optional: reuse loaded registry
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
out = vm.generate_image("a cinematic photo of a red fox in snow")
|
|
158
|
+
assert is_artifact_ref(out)
|
|
159
|
+
print(out) # {"$artifact": "...", "content_type": "...", ...}
|
|
160
|
+
|
|
161
|
+
png_bytes = vm.store.load_bytes(out["$artifact"]) # type: ignore[union-attr]
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
### Interactive testing (CLI / REPL)
|
|
165
|
+
|
|
166
|
+
```bash
|
|
167
|
+
abstractvision models
|
|
168
|
+
abstractvision tasks
|
|
169
|
+
abstractvision show-model zai-org/GLM-Image
|
|
170
|
+
|
|
171
|
+
abstractvision repl
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
Inside the REPL:
|
|
175
|
+
|
|
176
|
+
```text
|
|
177
|
+
/backend openai http://localhost:1234/v1
|
|
178
|
+
/cap-model zai-org/GLM-Image
|
|
179
|
+
/set width 1024
|
|
180
|
+
/set height 1024
|
|
181
|
+
/t2i "a watercolor painting of a lighthouse" --open
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
The CLI/REPL can also be configured via `ABSTRACTVISION_*` env vars; see `docs/reference/configuration.md`.
|
|
185
|
+
|
|
186
|
+
One-shot commands (OpenAI-compatible HTTP backend only):
|
|
187
|
+
|
|
188
|
+
```bash
|
|
189
|
+
abstractvision t2i --base-url http://localhost:1234/v1 "a studio photo of an espresso machine"
|
|
190
|
+
abstractvision i2i --base-url http://localhost:1234/v1 --image ./input.png "make it watercolor"
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
#### Local GGUF via stable-diffusion.cpp
|
|
194
|
+
|
|
195
|
+
If you want to run GGUF diffusion models locally (e.g. Qwen Image), use the stable-diffusion.cpp backend (`sdcpp`).
|
|
196
|
+
|
|
197
|
+
Recommended (pip-only; no external binary download): `pip install abstractvision` already includes the stable-diffusion.cpp python bindings (`stable-diffusion-cpp-python`).
|
|
198
|
+
|
|
199
|
+
Alternative (external executable):
|
|
200
|
+
|
|
201
|
+
- Install `sd-cli`: https://github.com/leejet/stable-diffusion.cpp/releases
|
|
202
|
+
|
|
203
|
+
In the REPL:
|
|
204
|
+
|
|
205
|
+
```text
|
|
206
|
+
/backend sdcpp /path/to/qwen-image-2512-Q4_K_M.gguf /path/to/qwen_image_vae.safetensors /path/to/Qwen2.5-VL-7B-Instruct-*.gguf
|
|
207
|
+
/t2i "a watercolor painting of a lighthouse" --sampling-method euler --offload-to-cpu --diffusion-fa --flow-shift 3 --open
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
Extra flags are forwarded via `request.extra`. In CLI mode they are forwarded to `sd-cli`; in python bindings mode, keys are mapped to python binding kwargs when supported and unsupported keys are ignored.
|
|
211
|
+
|
|
212
|
+
### AbstractCore tool integration (artifact refs)
|
|
213
|
+
|
|
214
|
+
If you’re using AbstractCore tool calling, AbstractVision can expose vision tasks as tools:
|
|
215
|
+
|
|
216
|
+
```python
|
|
217
|
+
from abstractvision.integrations.abstractcore import make_vision_tools
|
|
218
|
+
|
|
219
|
+
tools = make_vision_tools(vision_manager=vm, model_id="zai-org/GLM-Image")
|
|
220
|
+
```
|
|
221
|
+
|
|
222
|
+
## Project
|
|
223
|
+
|
|
224
|
+
- Release notes: `CHANGELOG.md`
|
|
225
|
+
- Contributing: `CONTRIBUTING.md`
|
|
226
|
+
- Security: `SECURITY.md`
|
|
227
|
+
- Acknowledgments: `ACKNOWLEDMENTS.md`
|
|
228
|
+
|
|
229
|
+
## Requirements
|
|
230
|
+
|
|
231
|
+
- Python >= 3.8
|
|
232
|
+
|
|
233
|
+
## License
|
|
234
|
+
|
|
235
|
+
MIT License - see LICENSE file for details.
|
|
236
|
+
|
|
237
|
+
## Author
|
|
238
|
+
|
|
239
|
+
Laurent-Philippe Albou
|
|
240
|
+
|
|
241
|
+
## Contact
|
|
242
|
+
|
|
243
|
+
contact@abstractcore.ai
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
# AbstractVision
|
|
2
|
+
|
|
3
|
+
Model-agnostic generative vision API (images, optional video) for Python and the Abstract* ecosystem.
|
|
4
|
+
|
|
5
|
+
## What you get
|
|
6
|
+
|
|
7
|
+
- A stable task API: `VisionManager` (`src/abstractvision/vision_manager.py`)
|
|
8
|
+
- A packaged capability registry (“what models can do”): `VisionModelCapabilitiesRegistry` backed by `src/abstractvision/assets/vision_model_capabilities.json`
|
|
9
|
+
- Optional artifact-ref outputs (small JSON refs): `LocalAssetStore` / store adapters (`src/abstractvision/artifacts.py`)
|
|
10
|
+
- Built-in backends (`src/abstractvision/backends/`):
|
|
11
|
+
- OpenAI-compatible HTTP (`openai_compatible.py`)
|
|
12
|
+
- Local Diffusers (`huggingface_diffusers.py`)
|
|
13
|
+
- Local stable-diffusion.cpp / GGUF (`stable_diffusion_cpp.py`)
|
|
14
|
+
- CLI/REPL for manual testing: `abstractvision ...` (`src/abstractvision/cli.py`)
|
|
15
|
+
|
|
16
|
+
## Status (current backend support)
|
|
17
|
+
|
|
18
|
+
- Built-in backends implement: `text_to_image` and `image_to_image`.
|
|
19
|
+
- Video (`text_to_video`, `image_to_video`) is supported only via the OpenAI-compatible backend **when** endpoints are configured.
|
|
20
|
+
- `multi_view_image` is part of the public API (`VisionManager.generate_angles`) but no built-in backend implements it yet.
|
|
21
|
+
|
|
22
|
+
Details: `docs/reference/backends.md`.
|
|
23
|
+
|
|
24
|
+
## Installation
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
pip install abstractvision
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
Install optional integrations:
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
pip install "abstractvision[abstractcore]"
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
Some newer model pipelines may require Diffusers from GitHub `main` (see `docs/getting-started.md`):
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
pip install -U "abstractvision[huggingface-dev]"
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
For local dev (from a repo checkout):
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install -e .
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Usage
|
|
49
|
+
|
|
50
|
+
Start here:
|
|
51
|
+
- Getting started: `docs/getting-started.md`
|
|
52
|
+
- FAQ: `docs/faq.md`
|
|
53
|
+
- API reference: `docs/api.md`
|
|
54
|
+
- Architecture: `docs/architecture.md`
|
|
55
|
+
- Docs index: `docs/README.md`
|
|
56
|
+
|
|
57
|
+
### Capability-driven model selection
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
from abstractvision import VisionModelCapabilitiesRegistry
|
|
61
|
+
|
|
62
|
+
reg = VisionModelCapabilitiesRegistry()
|
|
63
|
+
assert reg.supports("Qwen/Qwen-Image-2512", "text_to_image")
|
|
64
|
+
|
|
65
|
+
print(reg.list_tasks())
|
|
66
|
+
print(reg.models_for_task("text_to_image"))
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Backend wiring + generation (artifact outputs)
|
|
70
|
+
|
|
71
|
+
The default install is “batteries included” (Torch + Diffusers + stable-diffusion.cpp python bindings), but heavy
|
|
72
|
+
modules are imported lazily (see `src/abstractvision/backends/__init__.py`).
|
|
73
|
+
|
|
74
|
+
```python
|
|
75
|
+
from abstractvision import LocalAssetStore, VisionManager, VisionModelCapabilitiesRegistry, is_artifact_ref
|
|
76
|
+
from abstractvision.backends import OpenAICompatibleBackendConfig, OpenAICompatibleVisionBackend
|
|
77
|
+
|
|
78
|
+
reg = VisionModelCapabilitiesRegistry()
|
|
79
|
+
|
|
80
|
+
backend = OpenAICompatibleVisionBackend(
|
|
81
|
+
config=OpenAICompatibleBackendConfig(
|
|
82
|
+
base_url="http://localhost:1234/v1",
|
|
83
|
+
api_key="YOUR_KEY", # optional for local servers
|
|
84
|
+
model_id="REMOTE_MODEL", # optional (server-dependent)
|
|
85
|
+
)
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
vm = VisionManager(
|
|
89
|
+
backend=backend,
|
|
90
|
+
store=LocalAssetStore(), # enables artifact-ref outputs
|
|
91
|
+
model_id="zai-org/GLM-Image", # optional: capability gating
|
|
92
|
+
registry=reg, # optional: reuse loaded registry
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
out = vm.generate_image("a cinematic photo of a red fox in snow")
|
|
96
|
+
assert is_artifact_ref(out)
|
|
97
|
+
print(out) # {"$artifact": "...", "content_type": "...", ...}
|
|
98
|
+
|
|
99
|
+
png_bytes = vm.store.load_bytes(out["$artifact"]) # type: ignore[union-attr]
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### Interactive testing (CLI / REPL)
|
|
103
|
+
|
|
104
|
+
```bash
|
|
105
|
+
abstractvision models
|
|
106
|
+
abstractvision tasks
|
|
107
|
+
abstractvision show-model zai-org/GLM-Image
|
|
108
|
+
|
|
109
|
+
abstractvision repl
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
Inside the REPL:
|
|
113
|
+
|
|
114
|
+
```text
|
|
115
|
+
/backend openai http://localhost:1234/v1
|
|
116
|
+
/cap-model zai-org/GLM-Image
|
|
117
|
+
/set width 1024
|
|
118
|
+
/set height 1024
|
|
119
|
+
/t2i "a watercolor painting of a lighthouse" --open
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
The CLI/REPL can also be configured via `ABSTRACTVISION_*` env vars; see `docs/reference/configuration.md`.
|
|
123
|
+
|
|
124
|
+
One-shot commands (OpenAI-compatible HTTP backend only):
|
|
125
|
+
|
|
126
|
+
```bash
|
|
127
|
+
abstractvision t2i --base-url http://localhost:1234/v1 "a studio photo of an espresso machine"
|
|
128
|
+
abstractvision i2i --base-url http://localhost:1234/v1 --image ./input.png "make it watercolor"
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
#### Local GGUF via stable-diffusion.cpp
|
|
132
|
+
|
|
133
|
+
If you want to run GGUF diffusion models locally (e.g. Qwen Image), use the stable-diffusion.cpp backend (`sdcpp`).
|
|
134
|
+
|
|
135
|
+
Recommended (pip-only; no external binary download): `pip install abstractvision` already includes the stable-diffusion.cpp python bindings (`stable-diffusion-cpp-python`).
|
|
136
|
+
|
|
137
|
+
Alternative (external executable):
|
|
138
|
+
|
|
139
|
+
- Install `sd-cli`: https://github.com/leejet/stable-diffusion.cpp/releases
|
|
140
|
+
|
|
141
|
+
In the REPL:
|
|
142
|
+
|
|
143
|
+
```text
|
|
144
|
+
/backend sdcpp /path/to/qwen-image-2512-Q4_K_M.gguf /path/to/qwen_image_vae.safetensors /path/to/Qwen2.5-VL-7B-Instruct-*.gguf
|
|
145
|
+
/t2i "a watercolor painting of a lighthouse" --sampling-method euler --offload-to-cpu --diffusion-fa --flow-shift 3 --open
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
Extra flags are forwarded via `request.extra`. In CLI mode they are forwarded to `sd-cli`; in python bindings mode, keys are mapped to python binding kwargs when supported and unsupported keys are ignored.
|
|
149
|
+
|
|
150
|
+
### AbstractCore tool integration (artifact refs)
|
|
151
|
+
|
|
152
|
+
If you’re using AbstractCore tool calling, AbstractVision can expose vision tasks as tools:
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
from abstractvision.integrations.abstractcore import make_vision_tools
|
|
156
|
+
|
|
157
|
+
tools = make_vision_tools(vision_manager=vm, model_id="zai-org/GLM-Image")
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
## Project
|
|
161
|
+
|
|
162
|
+
- Release notes: `CHANGELOG.md`
|
|
163
|
+
- Contributing: `CONTRIBUTING.md`
|
|
164
|
+
- Security: `SECURITY.md`
|
|
165
|
+
- Acknowledgments: `ACKNOWLEDMENTS.md`
|
|
166
|
+
|
|
167
|
+
## Requirements
|
|
168
|
+
|
|
169
|
+
- Python >= 3.8
|
|
170
|
+
|
|
171
|
+
## License
|
|
172
|
+
|
|
173
|
+
MIT License - see LICENSE file for details.
|
|
174
|
+
|
|
175
|
+
## Author
|
|
176
|
+
|
|
177
|
+
Laurent-Philippe Albou
|
|
178
|
+
|
|
179
|
+
## Contact
|
|
180
|
+
|
|
181
|
+
contact@abstractcore.ai
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "abstractvision"
|
|
7
|
+
dynamic = ["version"]
|
|
8
|
+
description = "Model-agnostic generative vision abstractions (image/video) for the Abstract ecosystem"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = {text = "MIT"}
|
|
11
|
+
authors = [{name = "Laurent-Philippe Albou", email = "contact@abstractcore.ai"}]
|
|
12
|
+
requires-python = ">=3.8"
|
|
13
|
+
classifiers = [
|
|
14
|
+
"Development Status :: 3 - Alpha",
|
|
15
|
+
"Intended Audience :: Developers",
|
|
16
|
+
"License :: OSI Approved :: MIT License",
|
|
17
|
+
"Operating System :: OS Independent",
|
|
18
|
+
"Programming Language :: Python :: 3",
|
|
19
|
+
"Programming Language :: Python :: 3.8",
|
|
20
|
+
"Programming Language :: Python :: 3.9",
|
|
21
|
+
"Programming Language :: Python :: 3.10",
|
|
22
|
+
"Programming Language :: Python :: 3.11",
|
|
23
|
+
"Topic :: Multimedia",
|
|
24
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
25
|
+
]
|
|
26
|
+
# Batteries-included by default: users should only need to download model weights.
|
|
27
|
+
# NOTE: This is intentionally heavy (torch + diffusers + stable-diffusion.cpp bindings).
|
|
28
|
+
dependencies = [
|
|
29
|
+
"diffusers>=0.36.0",
|
|
30
|
+
"torch>=2.0,<3.0.0",
|
|
31
|
+
"transformers>=4.0,<6.0.0",
|
|
32
|
+
"accelerate>=0.0",
|
|
33
|
+
"safetensors>=0.0",
|
|
34
|
+
# Needed by T5 tokenizers used in SD3/FLUX and some other diffusion pipelines.
|
|
35
|
+
"sentencepiece>=0.1.99",
|
|
36
|
+
# Some HF tokenizers/pipelines require protobuf at runtime.
|
|
37
|
+
"protobuf>=3.20.0",
|
|
38
|
+
# Used by some modern diffusion architectures.
|
|
39
|
+
"einops>=0.7.0",
|
|
40
|
+
# LoRA adapter support in Diffusers.
|
|
41
|
+
"peft>=0.10.0",
|
|
42
|
+
"Pillow>=9.0",
|
|
43
|
+
"stable-diffusion-cpp-python>=0.4.2",
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
[project.urls]
|
|
47
|
+
Homepage = "https://github.com/abstractcore/abstractvision"
|
|
48
|
+
Repository = "https://github.com/abstractcore/abstractvision"
|
|
49
|
+
|
|
50
|
+
[project.scripts]
|
|
51
|
+
abstractvision = "abstractvision.cli:main"
|
|
52
|
+
|
|
53
|
+
[project.entry-points."abstractcore.capabilities_plugins"]
|
|
54
|
+
abstractvision = "abstractvision.integrations.abstractcore_plugin:register"
|
|
55
|
+
|
|
56
|
+
[project.optional-dependencies]
|
|
57
|
+
# OpenAI-compatible HTTP backend is stdlib-only today; keep the extra for forward compatibility.
|
|
58
|
+
openai-compatible = []
|
|
59
|
+
|
|
60
|
+
# Local generation via stable-diffusion.cpp python bindings (pip-installable).
|
|
61
|
+
sdcpp = [
|
|
62
|
+
"stable-diffusion-cpp-python>=0.4.2",
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
# Local generation via Diffusers (heavy deps; opt-in).
|
|
66
|
+
huggingface = [
|
|
67
|
+
"diffusers>=0.36.0",
|
|
68
|
+
"torch>=2.0",
|
|
69
|
+
"transformers>=4.0",
|
|
70
|
+
"accelerate>=0.0",
|
|
71
|
+
"safetensors>=0.0",
|
|
72
|
+
"Pillow>=9.0",
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
# Convenience: installs both local backends (Diffusers + stable-diffusion.cpp python bindings).
|
|
76
|
+
local = [
|
|
77
|
+
"stable-diffusion-cpp-python>=0.4.2",
|
|
78
|
+
"diffusers>=0.36.0",
|
|
79
|
+
"torch>=2.0",
|
|
80
|
+
"transformers>=4.0",
|
|
81
|
+
"accelerate>=0.0",
|
|
82
|
+
"safetensors>=0.0",
|
|
83
|
+
"Pillow>=9.0",
|
|
84
|
+
]
|
|
85
|
+
|
|
86
|
+
# NOTE: PyPI rejects VCS/direct URL dependencies in package metadata.
|
|
87
|
+
# If you need Diffusers "main" for unreleased pipelines, install it explicitly *after*:
|
|
88
|
+
# pip install "abstractvision[huggingface-dev]"
|
|
89
|
+
# pip install "diffusers @ git+https://github.com/huggingface/diffusers@main"
|
|
90
|
+
huggingface-dev = [
|
|
91
|
+
"diffusers>=0.36.0",
|
|
92
|
+
"torch>=2.0",
|
|
93
|
+
"transformers>=5.0",
|
|
94
|
+
"accelerate>=0.0",
|
|
95
|
+
"safetensors>=0.0",
|
|
96
|
+
"Pillow>=9.0",
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
# Tool integration module (optional import). Kept optional to avoid circular deps with AbstractCore.
|
|
100
|
+
abstractcore = ["abstractcore>=2.0.0"]
|
|
101
|
+
|
|
102
|
+
[tool.setuptools]
|
|
103
|
+
packages = [
|
|
104
|
+
"abstractvision",
|
|
105
|
+
"abstractvision.backends",
|
|
106
|
+
"abstractvision.integrations",
|
|
107
|
+
]
|
|
108
|
+
|
|
109
|
+
[tool.setuptools.package-dir]
|
|
110
|
+
"" = "src"
|
|
111
|
+
|
|
112
|
+
[tool.setuptools.dynamic]
|
|
113
|
+
version = {attr = "abstractvision.__version__"}
|
|
114
|
+
|
|
115
|
+
[tool.setuptools.package-data]
|
|
116
|
+
abstractvision = ["assets/*.json"]
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""abstractvision: Generative vision capabilities for abstractcore.ai.
|
|
2
|
+
|
|
3
|
+
The default install is batteries-included (Diffusers + stable-diffusion.cpp python bindings),
|
|
4
|
+
so users generally only need to download model weights.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .artifacts import LocalAssetStore, RuntimeArtifactStoreAdapter, is_artifact_ref
|
|
8
|
+
from .model_capabilities import VisionModelCapabilitiesRegistry
|
|
9
|
+
from .vision_manager import VisionManager
|
|
10
|
+
|
|
11
|
+
__version__ = "0.2.1"
|
|
12
|
+
__author__ = "Laurent-Philippe Albou"
|
|
13
|
+
__email__ = "contact@abstractcore.ai"
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"VisionManager",
|
|
17
|
+
"VisionModelCapabilitiesRegistry",
|
|
18
|
+
"LocalAssetStore",
|
|
19
|
+
"RuntimeArtifactStoreAdapter",
|
|
20
|
+
"is_artifact_ref",
|
|
21
|
+
"__version__",
|
|
22
|
+
]
|