clawid 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clawid-0.1.0/LICENSE +21 -0
- clawid-0.1.0/PKG-INFO +277 -0
- clawid-0.1.0/README.md +226 -0
- clawid-0.1.0/clawid/__init__.py +18 -0
- clawid-0.1.0/clawid/algorithms/__init__.py +0 -0
- clawid-0.1.0/clawid/algorithms/dwt_qim.py +144 -0
- clawid-0.1.0/clawid/algorithms/fourier_mellin.py +72 -0
- clawid-0.1.0/clawid/algorithms/visible.py +104 -0
- clawid-0.1.0/clawid/api/__init__.py +3 -0
- clawid-0.1.0/clawid/api/app.py +57 -0
- clawid-0.1.0/clawid/api/models.py +26 -0
- clawid-0.1.0/clawid/api/routes.py +183 -0
- clawid-0.1.0/clawid/cli.py +259 -0
- clawid-0.1.0/clawid/core/__init__.py +0 -0
- clawid-0.1.0/clawid/core/detect.py +108 -0
- clawid-0.1.0/clawid/core/embed.py +106 -0
- clawid-0.1.0/clawid/core/payload.py +60 -0
- clawid-0.1.0/clawid/storage/__init__.py +73 -0
- clawid-0.1.0/clawid/storage/base.py +51 -0
- clawid-0.1.0/clawid/storage/mongo.py +56 -0
- clawid-0.1.0/clawid/storage/postgres.py +90 -0
- clawid-0.1.0/clawid/storage/redis_store.py +61 -0
- clawid-0.1.0/clawid/storage/sqlite.py +75 -0
- clawid-0.1.0/clawid/video.py +222 -0
- clawid-0.1.0/clawid.egg-info/PKG-INFO +277 -0
- clawid-0.1.0/clawid.egg-info/SOURCES.txt +38 -0
- clawid-0.1.0/clawid.egg-info/dependency_links.txt +1 -0
- clawid-0.1.0/clawid.egg-info/entry_points.txt +2 -0
- clawid-0.1.0/clawid.egg-info/requires.txt +33 -0
- clawid-0.1.0/clawid.egg-info/top_level.txt +2 -0
- clawid-0.1.0/pyproject.toml +77 -0
- clawid-0.1.0/setup.cfg +4 -0
- clawid-0.1.0/setup.py +33 -0
- clawid-0.1.0/tests/test_api.py +156 -0
- clawid-0.1.0/tests/test_fourier_mellin.py +211 -0
- clawid-0.1.0/tests/test_invisible.py +100 -0
- clawid-0.1.0/tests/test_payload.py +48 -0
- clawid-0.1.0/tests/test_storage.py +88 -0
- clawid-0.1.0/tests/test_video.py +120 -0
- clawid-0.1.0/tests/test_visible.py +68 -0
clawid-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 hawky.ai (https://hawky.ai)
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
clawid-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: clawid
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Invisible & visible watermarking SDK for AI-generated images and video
|
|
5
|
+
Author-email: "hawky.ai Research" <research@hawky.ai>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://hawky.ai
|
|
8
|
+
Project-URL: Repository, https://github.com/Hawky-ai/clawID
|
|
9
|
+
Project-URL: Bug Tracker, https://github.com/Hawky-ai/clawID/issues
|
|
10
|
+
Keywords: watermarking,steganography,AI,image,provenance,SynthID,C2PA,digital-rights,generative-ai,hawky
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Topic :: Multimedia :: Graphics
|
|
19
|
+
Classifier: Topic :: Security
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE
|
|
24
|
+
Requires-Dist: Pillow>=9.0.0
|
|
25
|
+
Requires-Dist: numpy>=1.21.0
|
|
26
|
+
Requires-Dist: PyWavelets>=1.3.0
|
|
27
|
+
Provides-Extra: api
|
|
28
|
+
Requires-Dist: fastapi>=0.100.0; extra == "api"
|
|
29
|
+
Requires-Dist: uvicorn[standard]>=0.23.0; extra == "api"
|
|
30
|
+
Requires-Dist: python-multipart>=0.0.6; extra == "api"
|
|
31
|
+
Provides-Extra: postgres
|
|
32
|
+
Requires-Dist: psycopg2-binary>=2.9.0; extra == "postgres"
|
|
33
|
+
Provides-Extra: mongo
|
|
34
|
+
Requires-Dist: pymongo>=4.0.0; extra == "mongo"
|
|
35
|
+
Provides-Extra: redis
|
|
36
|
+
Requires-Dist: redis>=4.0.0; extra == "redis"
|
|
37
|
+
Provides-Extra: video
|
|
38
|
+
Requires-Dist: imageio[ffmpeg]>=2.28.0; extra == "video"
|
|
39
|
+
Provides-Extra: spaces
|
|
40
|
+
Requires-Dist: gradio>=4.0.0; extra == "spaces"
|
|
41
|
+
Provides-Extra: all
|
|
42
|
+
Requires-Dist: clawid[api,mongo,postgres,redis,video]; extra == "all"
|
|
43
|
+
Provides-Extra: dev
|
|
44
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
45
|
+
Requires-Dist: httpx>=0.24.0; extra == "dev"
|
|
46
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
47
|
+
Requires-Dist: build>=1.0.0; extra == "dev"
|
|
48
|
+
Requires-Dist: twine>=4.0.0; extra == "dev"
|
|
49
|
+
Dynamic: license-file
|
|
50
|
+
Dynamic: requires-python
|
|
51
|
+
|
|
52
|
+
# clawID
|
|
53
|
+
|
|
54
|
+
**Invisible & visible watermarking SDK for AI-generated images and video.**
|
|
55
|
+
|
|
56
|
+
[](https://pypi.org/project/clawid/)
|
|
57
|
+
[](https://pypi.org/project/clawid/)
|
|
58
|
+
[](https://github.com/Hawky-ai/clawID/actions/workflows/ci.yml)
|
|
59
|
+
[](LICENSE)
|
|
60
|
+
|
|
61
|
+
> A research project by **[hawky.ai](https://hawky.ai)** — the AI content provenance platform.
|
|
62
|
+
|
|
63
|
+
clawID embeds a UUID-based watermark into images and video frames — invisible to the human eye but detectable by the SDK. Inspired by Google SynthID. Built for AI-generated asset provenance tracking.
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
## Features
|
|
68
|
+
|
|
69
|
+
- **Invisible watermarking** via DWT-QIM (Discrete Wavelet Transform + Quantization Index Modulation)
|
|
70
|
+
- **Resize-robust mode** (`algorithm='fm'`) — survives ±35% resize, JPEG compression, and minor edits
|
|
71
|
+
- **Visible watermarking** — overlay text/logo with configurable opacity and position
|
|
72
|
+
- **Video support** — watermark every Nth frame; detect from any single watermarked frame
|
|
73
|
+
- **REST API** — FastAPI server with `/embed` and `/detect` endpoints
|
|
74
|
+
- **Storage backends** — SQLite, PostgreSQL, MongoDB, Redis
|
|
75
|
+
- **CLI** — `clawid embed`, `clawid detect`, `clawid embed-video`, `clawid detect-video`
|
|
76
|
+
|
|
77
|
+
---
|
|
78
|
+
|
|
79
|
+
## Installation
|
|
80
|
+
|
|
81
|
+
```bash
|
|
82
|
+
# Core (images only)
|
|
83
|
+
pip install clawid
|
|
84
|
+
|
|
85
|
+
# With video support
|
|
86
|
+
pip install "clawid[video]"
|
|
87
|
+
|
|
88
|
+
# With REST API
|
|
89
|
+
pip install "clawid[api]"
|
|
90
|
+
|
|
91
|
+
# Everything
|
|
92
|
+
pip install "clawid[all]"
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
---
|
|
96
|
+
|
|
97
|
+
## Quick Start
|
|
98
|
+
|
|
99
|
+
### Images
|
|
100
|
+
|
|
101
|
+
```python
|
|
102
|
+
from clawid import embed, detect
|
|
103
|
+
|
|
104
|
+
# Embed invisible watermark
|
|
105
|
+
meta = embed(
|
|
106
|
+
source='photo.jpg',
|
|
107
|
+
output_path='photo_wm.png',
|
|
108
|
+
metadata={'uid': 'creator123', 'platform': 'hawky.ai'},
|
|
109
|
+
)
|
|
110
|
+
print(meta['clawid']) # e.g. "3f2a1b4c-..."
|
|
111
|
+
|
|
112
|
+
# Detect
|
|
113
|
+
result = detect('photo_wm.png')
|
|
114
|
+
print(result['uid']) # creator123
|
|
115
|
+
print(result['platform']) # hawky.ai
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
### Resize-robust mode
|
|
119
|
+
|
|
120
|
+
```python
|
|
121
|
+
meta = embed(
|
|
122
|
+
source='photo.jpg',
|
|
123
|
+
output_path='photo_wm.png',
|
|
124
|
+
metadata={'uid': 'creator123'},
|
|
125
|
+
algorithm='fm', # survives ±35% resize
|
|
126
|
+
)
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
### Video
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
from clawid import embed_video, detect_video
|
|
133
|
+
|
|
134
|
+
meta = embed_video(
|
|
135
|
+
'input.mp4',
|
|
136
|
+
'output_wm.mp4',
|
|
137
|
+
metadata={'uid': 'creator123', 'platform': 'hawky.ai'},
|
|
138
|
+
frame_stride=5, # watermark every 5th frame
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
result = detect_video('output_wm.mp4')
|
|
142
|
+
print(result['uid']) # creator123
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
### Visible watermark
|
|
146
|
+
|
|
147
|
+
```python
|
|
148
|
+
meta = embed(
|
|
149
|
+
source='photo.jpg',
|
|
150
|
+
output_path='photo_wm.png',
|
|
151
|
+
metadata={'uid': 'creator123'},
|
|
152
|
+
mode='visible', # or 'both' for invisible + visible
|
|
153
|
+
opacity=0.6,
|
|
154
|
+
position='bottom-right',
|
|
155
|
+
)
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
---
|
|
159
|
+
|
|
160
|
+
## CLI
|
|
161
|
+
|
|
162
|
+
```bash
|
|
163
|
+
# Embed
|
|
164
|
+
clawid embed -i photo.jpg -o photo_wm.png --uid creator123 --platform hawky.ai
|
|
165
|
+
|
|
166
|
+
# Embed (resize-robust)
|
|
167
|
+
clawid embed -i photo.jpg -o photo_wm.png --uid creator123 --algorithm fm
|
|
168
|
+
|
|
169
|
+
# Detect
|
|
170
|
+
clawid detect -i photo_wm.png
|
|
171
|
+
|
|
172
|
+
# Video
|
|
173
|
+
clawid embed-video -i input.mp4 -o output_wm.mp4 --uid creator123
|
|
174
|
+
clawid detect-video -i output_wm.mp4
|
|
175
|
+
|
|
176
|
+
# Save to SQLite store on embed, enrich on detect
|
|
177
|
+
clawid embed -i photo.jpg -o wm.png --uid creator123 --store sqlite:///clawid.db
|
|
178
|
+
clawid detect -i wm.png --store sqlite:///clawid.db
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
---
|
|
182
|
+
|
|
183
|
+
## REST API
|
|
184
|
+
|
|
185
|
+
```bash
|
|
186
|
+
pip install "clawid[api]"
|
|
187
|
+
clawid serve --store sqlite:///clawid.db --port 8000
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
**Endpoints:**
|
|
191
|
+
|
|
192
|
+
| Method | Path | Description |
|
|
193
|
+
|--------|------|-------------|
|
|
194
|
+
| `POST` | `/embed` | Upload image, get back watermarked image + metadata |
|
|
195
|
+
| `POST` | `/detect` | Upload image, get back detected metadata |
|
|
196
|
+
| `GET` | `/docs` | Interactive Swagger UI |
|
|
197
|
+
|
|
198
|
+
```bash
|
|
199
|
+
# Embed via API
|
|
200
|
+
curl -X POST http://localhost:8000/embed \
|
|
201
|
+
-F "file=@photo.jpg" \
|
|
202
|
+
-F "uid=creator123" \
|
|
203
|
+
-F "platform=hawky.ai" \
|
|
204
|
+
--output photo_wm.png
|
|
205
|
+
|
|
206
|
+
# Detect via API
|
|
207
|
+
curl -X POST http://localhost:8000/detect \
|
|
208
|
+
-F "file=@photo_wm.png"
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
---
|
|
212
|
+
|
|
213
|
+
## Storage Backends
|
|
214
|
+
|
|
215
|
+
```python
|
|
216
|
+
from clawid.storage import from_uri
|
|
217
|
+
|
|
218
|
+
# SQLite (local)
|
|
219
|
+
with from_uri('sqlite:///clawid.db') as store:
|
|
220
|
+
store.save(meta['clawid'], meta)
|
|
221
|
+
record = store.get(meta['clawid'])
|
|
222
|
+
|
|
223
|
+
# PostgreSQL
|
|
224
|
+
with from_uri('postgresql://user:pass@localhost/mydb') as store:
|
|
225
|
+
store.save(meta['clawid'], meta)
|
|
226
|
+
|
|
227
|
+
# MongoDB
|
|
228
|
+
with from_uri('mongodb://localhost:27017/mydb') as store:
|
|
229
|
+
store.save(meta['clawid'], meta)
|
|
230
|
+
|
|
231
|
+
# Redis
|
|
232
|
+
with from_uri('redis://localhost:6379/0') as store:
|
|
233
|
+
store.save(meta['clawid'], meta)
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
---
|
|
237
|
+
|
|
238
|
+
## How It Works
|
|
239
|
+
|
|
240
|
+
### Invisible watermark (DWT-QIM)
|
|
241
|
+
|
|
242
|
+
The payload (UUID + metadata, CBOR-encoded + CRC-checked) is serialised to bits, then each bit is embedded by quantizing a coefficient in the Haar wavelet LL2 sub-band. Each LL2 coefficient represents a 4×4 pixel block, so a change of `delta/16 ≈ 2+ pixels` per block — above the uint8 quantization step — making the watermark survive JPEG compression and format round-trips.
|
|
243
|
+
|
|
244
|
+
| Mode | Delta | PSNR | Survives |
|
|
245
|
+
|------|-------|------|---------|
|
|
246
|
+
| `qim` (default) | 32 | ~51 dB | PNG/JPEG q≥75, minor edits |
|
|
247
|
+
| `fm` (resize-robust) | 192 | ~36 dB | ±35% resize, PNG/JPEG q≥75 |
|
|
248
|
+
|
|
249
|
+
### Video watermarking
|
|
250
|
+
|
|
251
|
+
Each video is processed frame by frame. Every Nth frame (`frame_stride`, default 5) receives the same DWT-QIM watermark with the same `clawid` UUID. Detection samples up to 10 frames and uses majority vote — detection succeeds as long as at least one watermarked frame survives.
|
|
252
|
+
|
|
253
|
+
---
|
|
254
|
+
|
|
255
|
+
## Docker
|
|
256
|
+
|
|
257
|
+
```bash
|
|
258
|
+
docker compose up
|
|
259
|
+
# API available at http://localhost:8000
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
---
|
|
263
|
+
|
|
264
|
+
## Development
|
|
265
|
+
|
|
266
|
+
```bash
|
|
267
|
+
git clone https://github.com/Hawky-ai/clawID
|
|
268
|
+
cd clawid
|
|
269
|
+
pip install -e ".[dev,api,video]"
|
|
270
|
+
pytest
|
|
271
|
+
```
|
|
272
|
+
|
|
273
|
+
---
|
|
274
|
+
|
|
275
|
+
## License
|
|
276
|
+
|
|
277
|
+
MIT
|
clawid-0.1.0/README.md
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
# clawID
|
|
2
|
+
|
|
3
|
+
**Invisible & visible watermarking SDK for AI-generated images and video.**
|
|
4
|
+
|
|
5
|
+
[](https://pypi.org/project/clawid/)
|
|
6
|
+
[](https://pypi.org/project/clawid/)
|
|
7
|
+
[](https://github.com/Hawky-ai/clawID/actions/workflows/ci.yml)
|
|
8
|
+
[](LICENSE)
|
|
9
|
+
|
|
10
|
+
> A research project by **[hawky.ai](https://hawky.ai)** — the AI content provenance platform.
|
|
11
|
+
|
|
12
|
+
clawID embeds a UUID-based watermark into images and video frames — invisible to the human eye but detectable by the SDK. Inspired by Google SynthID. Built for AI-generated asset provenance tracking.
|
|
13
|
+
|
|
14
|
+
---
|
|
15
|
+
|
|
16
|
+
## Features
|
|
17
|
+
|
|
18
|
+
- **Invisible watermarking** via DWT-QIM (Discrete Wavelet Transform + Quantization Index Modulation)
|
|
19
|
+
- **Resize-robust mode** (`algorithm='fm'`) — survives ±35% resize, JPEG compression, and minor edits
|
|
20
|
+
- **Visible watermarking** — overlay text/logo with configurable opacity and position
|
|
21
|
+
- **Video support** — watermark every Nth frame; detect from any single watermarked frame
|
|
22
|
+
- **REST API** — FastAPI server with `/embed` and `/detect` endpoints
|
|
23
|
+
- **Storage backends** — SQLite, PostgreSQL, MongoDB, Redis
|
|
24
|
+
- **CLI** — `clawid embed`, `clawid detect`, `clawid embed-video`, `clawid detect-video`
|
|
25
|
+
|
|
26
|
+
---
|
|
27
|
+
|
|
28
|
+
## Installation
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
# Core (images only)
|
|
32
|
+
pip install clawid
|
|
33
|
+
|
|
34
|
+
# With video support
|
|
35
|
+
pip install "clawid[video]"
|
|
36
|
+
|
|
37
|
+
# With REST API
|
|
38
|
+
pip install "clawid[api]"
|
|
39
|
+
|
|
40
|
+
# Everything
|
|
41
|
+
pip install "clawid[all]"
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
|
|
46
|
+
## Quick Start
|
|
47
|
+
|
|
48
|
+
### Images
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
from clawid import embed, detect
|
|
52
|
+
|
|
53
|
+
# Embed invisible watermark
|
|
54
|
+
meta = embed(
|
|
55
|
+
source='photo.jpg',
|
|
56
|
+
output_path='photo_wm.png',
|
|
57
|
+
metadata={'uid': 'creator123', 'platform': 'hawky.ai'},
|
|
58
|
+
)
|
|
59
|
+
print(meta['clawid']) # e.g. "3f2a1b4c-..."
|
|
60
|
+
|
|
61
|
+
# Detect
|
|
62
|
+
result = detect('photo_wm.png')
|
|
63
|
+
print(result['uid']) # creator123
|
|
64
|
+
print(result['platform']) # hawky.ai
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Resize-robust mode
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
meta = embed(
|
|
71
|
+
source='photo.jpg',
|
|
72
|
+
output_path='photo_wm.png',
|
|
73
|
+
metadata={'uid': 'creator123'},
|
|
74
|
+
algorithm='fm', # survives ±35% resize
|
|
75
|
+
)
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### Video
|
|
79
|
+
|
|
80
|
+
```python
|
|
81
|
+
from clawid import embed_video, detect_video
|
|
82
|
+
|
|
83
|
+
meta = embed_video(
|
|
84
|
+
'input.mp4',
|
|
85
|
+
'output_wm.mp4',
|
|
86
|
+
metadata={'uid': 'creator123', 'platform': 'hawky.ai'},
|
|
87
|
+
frame_stride=5, # watermark every 5th frame
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
result = detect_video('output_wm.mp4')
|
|
91
|
+
print(result['uid']) # creator123
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
### Visible watermark
|
|
95
|
+
|
|
96
|
+
```python
|
|
97
|
+
meta = embed(
|
|
98
|
+
source='photo.jpg',
|
|
99
|
+
output_path='photo_wm.png',
|
|
100
|
+
metadata={'uid': 'creator123'},
|
|
101
|
+
mode='visible', # or 'both' for invisible + visible
|
|
102
|
+
opacity=0.6,
|
|
103
|
+
position='bottom-right',
|
|
104
|
+
)
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
---
|
|
108
|
+
|
|
109
|
+
## CLI
|
|
110
|
+
|
|
111
|
+
```bash
|
|
112
|
+
# Embed
|
|
113
|
+
clawid embed -i photo.jpg -o photo_wm.png --uid creator123 --platform hawky.ai
|
|
114
|
+
|
|
115
|
+
# Embed (resize-robust)
|
|
116
|
+
clawid embed -i photo.jpg -o photo_wm.png --uid creator123 --algorithm fm
|
|
117
|
+
|
|
118
|
+
# Detect
|
|
119
|
+
clawid detect -i photo_wm.png
|
|
120
|
+
|
|
121
|
+
# Video
|
|
122
|
+
clawid embed-video -i input.mp4 -o output_wm.mp4 --uid creator123
|
|
123
|
+
clawid detect-video -i output_wm.mp4
|
|
124
|
+
|
|
125
|
+
# Save to SQLite store on embed, enrich on detect
|
|
126
|
+
clawid embed -i photo.jpg -o wm.png --uid creator123 --store sqlite:///clawid.db
|
|
127
|
+
clawid detect -i wm.png --store sqlite:///clawid.db
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
---
|
|
131
|
+
|
|
132
|
+
## REST API
|
|
133
|
+
|
|
134
|
+
```bash
|
|
135
|
+
pip install "clawid[api]"
|
|
136
|
+
clawid serve --store sqlite:///clawid.db --port 8000
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
**Endpoints:**
|
|
140
|
+
|
|
141
|
+
| Method | Path | Description |
|
|
142
|
+
|--------|------|-------------|
|
|
143
|
+
| `POST` | `/embed` | Upload image, get back watermarked image + metadata |
|
|
144
|
+
| `POST` | `/detect` | Upload image, get back detected metadata |
|
|
145
|
+
| `GET` | `/docs` | Interactive Swagger UI |
|
|
146
|
+
|
|
147
|
+
```bash
|
|
148
|
+
# Embed via API
|
|
149
|
+
curl -X POST http://localhost:8000/embed \
|
|
150
|
+
-F "file=@photo.jpg" \
|
|
151
|
+
-F "uid=creator123" \
|
|
152
|
+
-F "platform=hawky.ai" \
|
|
153
|
+
--output photo_wm.png
|
|
154
|
+
|
|
155
|
+
# Detect via API
|
|
156
|
+
curl -X POST http://localhost:8000/detect \
|
|
157
|
+
-F "file=@photo_wm.png"
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
---
|
|
161
|
+
|
|
162
|
+
## Storage Backends
|
|
163
|
+
|
|
164
|
+
```python
|
|
165
|
+
from clawid.storage import from_uri
|
|
166
|
+
|
|
167
|
+
# SQLite (local)
|
|
168
|
+
with from_uri('sqlite:///clawid.db') as store:
|
|
169
|
+
store.save(meta['clawid'], meta)
|
|
170
|
+
record = store.get(meta['clawid'])
|
|
171
|
+
|
|
172
|
+
# PostgreSQL
|
|
173
|
+
with from_uri('postgresql://user:pass@localhost/mydb') as store:
|
|
174
|
+
store.save(meta['clawid'], meta)
|
|
175
|
+
|
|
176
|
+
# MongoDB
|
|
177
|
+
with from_uri('mongodb://localhost:27017/mydb') as store:
|
|
178
|
+
store.save(meta['clawid'], meta)
|
|
179
|
+
|
|
180
|
+
# Redis
|
|
181
|
+
with from_uri('redis://localhost:6379/0') as store:
|
|
182
|
+
store.save(meta['clawid'], meta)
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
---
|
|
186
|
+
|
|
187
|
+
## How It Works
|
|
188
|
+
|
|
189
|
+
### Invisible watermark (DWT-QIM)
|
|
190
|
+
|
|
191
|
+
The payload (UUID + metadata, CBOR-encoded + CRC-checked) is serialised to bits, then each bit is embedded by quantizing a coefficient in the Haar wavelet LL2 sub-band. Each LL2 coefficient represents a 4×4 pixel block, so a change of `delta/16 ≈ 2+ pixels` per block — above the uint8 quantization step — making the watermark survive JPEG compression and format round-trips.
|
|
192
|
+
|
|
193
|
+
| Mode | Delta | PSNR | Survives |
|
|
194
|
+
|------|-------|------|---------|
|
|
195
|
+
| `qim` (default) | 32 | ~51 dB | PNG/JPEG q≥75, minor edits |
|
|
196
|
+
| `fm` (resize-robust) | 192 | ~36 dB | ±35% resize, PNG/JPEG q≥75 |
|
|
197
|
+
|
|
198
|
+
### Video watermarking
|
|
199
|
+
|
|
200
|
+
Each video is processed frame by frame. Every Nth frame (`frame_stride`, default 5) receives the same DWT-QIM watermark with the same `clawid` UUID. Detection samples up to 10 frames and uses majority vote — detection succeeds as long as at least one watermarked frame survives.
|
|
201
|
+
|
|
202
|
+
---
|
|
203
|
+
|
|
204
|
+
## Docker
|
|
205
|
+
|
|
206
|
+
```bash
|
|
207
|
+
docker compose up
|
|
208
|
+
# API available at http://localhost:8000
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
---
|
|
212
|
+
|
|
213
|
+
## Development
|
|
214
|
+
|
|
215
|
+
```bash
|
|
216
|
+
git clone https://github.com/Hawky-ai/clawID
|
|
217
|
+
cd clawid
|
|
218
|
+
pip install -e ".[dev,api,video]"
|
|
219
|
+
pytest
|
|
220
|
+
```
|
|
221
|
+
|
|
222
|
+
---
|
|
223
|
+
|
|
224
|
+
## License
|
|
225
|
+
|
|
226
|
+
MIT
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from .core.embed import embed
|
|
2
|
+
from .core.detect import detect
|
|
3
|
+
from .video import embed_video, detect_video
|
|
4
|
+
from .storage import ClawIDStore, SQLiteStore, PostgreSQLStore, MongoDBStore, RedisStore, from_uri
|
|
5
|
+
|
|
6
|
+
__version__ = "0.1.0"
|
|
7
|
+
__all__ = [
|
|
8
|
+
"embed",
|
|
9
|
+
"detect",
|
|
10
|
+
"embed_video",
|
|
11
|
+
"detect_video",
|
|
12
|
+
"ClawIDStore",
|
|
13
|
+
"SQLiteStore",
|
|
14
|
+
"PostgreSQLStore",
|
|
15
|
+
"MongoDBStore",
|
|
16
|
+
"RedisStore",
|
|
17
|
+
"from_uri",
|
|
18
|
+
]
|
|
File without changes
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Invisible watermarking via DWT + QIM (Quantization Index Modulation).
|
|
3
|
+
|
|
4
|
+
Algorithm:
|
|
5
|
+
1. Compute luminance channel: Y = 0.299·R + 0.587·G + 0.114·B (float64, no color conversion)
|
|
6
|
+
2. Apply 2-level Haar DWT → extract LL2 sub-band (low-frequency, most robust region)
|
|
7
|
+
3. Embed bits using QIM:
|
|
8
|
+
bit=0 → quantize coefficient to nearest multiple of delta
|
|
9
|
+
bit=1 → quantize coefficient to nearest half-multiple (N+0.5)·delta
|
|
10
|
+
4. Reconstruct modified Y via inverse DWT
|
|
11
|
+
5. Distribute the per-pixel Y change back to all three RGB channels equally
|
|
12
|
+
6. Detect: apply same transform and read fractional parts of LL2 coefficients
|
|
13
|
+
|
|
14
|
+
Working in float64 luminance and distributing back to RGB avoids YCbCr roundtrip errors.
|
|
15
|
+
Each bit is stored REPEAT times; majority vote recovers the bit on detection.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
import pywt
|
|
20
|
+
from PIL import Image
|
|
21
|
+
from typing import List
|
|
22
|
+
|
|
23
|
+
DELTA = 32.0 # quantization step — higher = more robust, slightly more visible
|
|
24
|
+
# 32 gives PSNR ~51 dB (imperceptible) and survives JPEG q≥85
|
|
25
|
+
REPEAT = 3 # repetition factor for error correction via majority vote
|
|
26
|
+
MIN_DIM = 256 # minimum image dimension in pixels
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _luminance(arr: np.ndarray) -> np.ndarray:
|
|
30
|
+
"""Compute perceptual luminance channel from float64 RGB array (H, W, 3)."""
|
|
31
|
+
return 0.299 * arr[:, :, 0] + 0.587 * arr[:, :, 1] + 0.114 * arr[:, :, 2]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def embed_bits(
|
|
35
|
+
image: Image.Image,
|
|
36
|
+
bits: List[int],
|
|
37
|
+
delta: float = DELTA,
|
|
38
|
+
repeat: int = REPEAT,
|
|
39
|
+
) -> Image.Image:
|
|
40
|
+
"""
|
|
41
|
+
Embed a list of bits into an image invisibly using DWT-QIM.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
image: RGB PIL Image
|
|
45
|
+
bits: list of 0/1 values to embed
|
|
46
|
+
delta: quantization step (default 32.0)
|
|
47
|
+
repeat: repetition per bit for error correction (default 3)
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
Watermarked RGB PIL Image (visually identical to input, PSNR > 50 dB)
|
|
51
|
+
"""
|
|
52
|
+
if min(image.size) < MIN_DIM:
|
|
53
|
+
raise ValueError(
|
|
54
|
+
f"Image must be at least {MIN_DIM}×{MIN_DIM} pixels "
|
|
55
|
+
f"(got {image.size[0]}×{image.size[1]})"
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Expand bits with repetition code
|
|
59
|
+
repeated = [b for bit in bits for b in [bit] * repeat]
|
|
60
|
+
|
|
61
|
+
arr = np.array(image.convert('RGB'), dtype=np.float64)
|
|
62
|
+
Y = _luminance(arr)
|
|
63
|
+
h, w = Y.shape
|
|
64
|
+
|
|
65
|
+
# 2-level Haar DWT on luminance
|
|
66
|
+
LL, (LH, HL, HH) = pywt.dwt2(Y, 'haar')
|
|
67
|
+
LL2, (LH2, HL2, HH2) = pywt.dwt2(LL, 'haar')
|
|
68
|
+
|
|
69
|
+
flat = LL2.flatten().copy()
|
|
70
|
+
|
|
71
|
+
if len(repeated) > len(flat):
|
|
72
|
+
raise ValueError(
|
|
73
|
+
f"Payload too large: need {len(repeated)} coefficient slots, "
|
|
74
|
+
f"image provides {len(flat)}. Use a larger image or reduce payload."
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# QIM embedding
|
|
78
|
+
for i, bit in enumerate(repeated):
|
|
79
|
+
c = flat[i]
|
|
80
|
+
if bit == 0:
|
|
81
|
+
flat[i] = np.round(c / delta) * delta
|
|
82
|
+
else:
|
|
83
|
+
flat[i] = (np.round(c / delta - 0.5) + 0.5) * delta
|
|
84
|
+
|
|
85
|
+
# Reconstruct modified luminance
|
|
86
|
+
LL2_w = flat.reshape(LL2.shape)
|
|
87
|
+
LL_w = pywt.idwt2((LL2_w, (LH2, HL2, HH2)), 'haar')
|
|
88
|
+
Y_w = pywt.idwt2((LL_w, (LH, HL, HH)), 'haar')
|
|
89
|
+
Y_w = Y_w[:h, :w]
|
|
90
|
+
|
|
91
|
+
# Distribute the luminance delta equally to all RGB channels
|
|
92
|
+
delta_Y = Y_w - Y
|
|
93
|
+
arr_wm = arr.copy()
|
|
94
|
+
for ch in range(3):
|
|
95
|
+
arr_wm[:, :, ch] = np.clip(arr[:, :, ch] + delta_Y, 0, 255)
|
|
96
|
+
|
|
97
|
+
return Image.fromarray(arr_wm.astype(np.uint8), 'RGB')
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def extract_bits(
|
|
101
|
+
image: Image.Image,
|
|
102
|
+
n_bits: int,
|
|
103
|
+
delta: float = DELTA,
|
|
104
|
+
repeat: int = REPEAT,
|
|
105
|
+
) -> List[int]:
|
|
106
|
+
"""
|
|
107
|
+
Extract n_bits from a (possibly watermarked) image using QIM detection.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
image: RGB PIL Image
|
|
111
|
+
n_bits: number of bits to extract (before repetition)
|
|
112
|
+
delta: quantization step (must match embed)
|
|
113
|
+
repeat: repetition factor (must match embed)
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
List of detected bits (0 or 1)
|
|
117
|
+
"""
|
|
118
|
+
arr = np.array(image.convert('RGB'), dtype=np.float64)
|
|
119
|
+
Y = _luminance(arr)
|
|
120
|
+
|
|
121
|
+
LL, _ = pywt.dwt2(Y, 'haar')
|
|
122
|
+
LL2, _ = pywt.dwt2(LL, 'haar')
|
|
123
|
+
flat = LL2.flatten()
|
|
124
|
+
|
|
125
|
+
n_needed = n_bits * repeat
|
|
126
|
+
if n_needed > len(flat):
|
|
127
|
+
raise ValueError(
|
|
128
|
+
f"Cannot extract {n_bits} bits from this image "
|
|
129
|
+
f"(max capacity: {len(flat) // repeat} bits)"
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# QIM detection: frac near 0/1 → bit 0, frac near 0.5 → bit 1
|
|
133
|
+
raw = []
|
|
134
|
+
for i in range(n_needed):
|
|
135
|
+
frac = (flat[i] / delta) % 1.0
|
|
136
|
+
raw.append(1 if 0.25 <= frac < 0.75 else 0)
|
|
137
|
+
|
|
138
|
+
# Majority vote over repeated copies
|
|
139
|
+
bits = []
|
|
140
|
+
for i in range(n_bits):
|
|
141
|
+
chunk = raw[i * repeat:(i + 1) * repeat]
|
|
142
|
+
bits.append(1 if sum(chunk) > repeat / 2 else 0)
|
|
143
|
+
|
|
144
|
+
return bits
|