ultracompress 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,76 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main, master]
6
+ pull_request:
7
+ branches: [main, master]
8
+
9
+ jobs:
10
+ test:
11
+ name: Test on Python ${{ matrix.python-version }}
12
+ runs-on: ubuntu-latest
13
+ strategy:
14
+ fail-fast: false
15
+ matrix:
16
+ python-version: ["3.10", "3.11", "3.12"]
17
+
18
+ steps:
19
+ - uses: actions/checkout@v4
20
+
21
+ - name: Set up Python ${{ matrix.python-version }}
22
+ uses: actions/setup-python@v5
23
+ with:
24
+ python-version: ${{ matrix.python-version }}
25
+ cache: pip
26
+
27
+ - name: Install package + dev deps
28
+ run: |
29
+ python -m pip install --upgrade pip
30
+ pip install -e ".[dev]"
31
+
32
+ - name: Lint (ruff)
33
+ run: ruff check src/ tests/
34
+
35
+ - name: Type check (mypy)
36
+ run: mypy src/ --ignore-missing-imports
37
+ continue-on-error: true
38
+
39
+ - name: Run tests
40
+ run: pytest tests/ -v --cov=ultracompress_cli --cov-report=term-missing
41
+
42
+ - name: Build package
43
+ run: |
44
+ pip install build twine
45
+ python -m build
46
+ twine check dist/*
47
+
48
+ - name: Upload dist artifacts
49
+ if: matrix.python-version == '3.12'
50
+ uses: actions/upload-artifact@v4
51
+ with:
52
+ name: dist
53
+ path: dist/
54
+ retention-days: 30
55
+
56
+ publish:
57
+ name: Publish to PyPI
58
+ needs: test
59
+ runs-on: ubuntu-latest
60
+ if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v')
61
+ permissions:
62
+ id-token: write # for trusted publishing
63
+ steps:
64
+ - uses: actions/checkout@v4
65
+
66
+ - uses: actions/setup-python@v5
67
+ with:
68
+ python-version: "3.12"
69
+
70
+ - name: Build
71
+ run: |
72
+ python -m pip install --upgrade pip build
73
+ python -m build
74
+
75
+ - name: Publish to PyPI via Trusted Publishing
76
+ uses: pypa/gh-action-pypi-publish@release/v1
@@ -0,0 +1,45 @@
1
+ name: Security
2
+
3
+ on:
4
+ push:
5
+ branches: [main, master]
6
+ pull_request:
7
+ branches: [main, master]
8
+ schedule:
9
+ - cron: "0 6 * * 1" # Weekly Monday 6am UTC
10
+
11
+ jobs:
12
+ security:
13
+ name: Security scans
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - uses: actions/checkout@v4
18
+
19
+ - uses: actions/setup-python@v5
20
+ with:
21
+ python-version: "3.12"
22
+
23
+ - name: Install
24
+ run: |
25
+ python -m pip install --upgrade pip
26
+ pip install -e "."
27
+ pip install bandit pip-audit safety
28
+
29
+ - name: Bandit (static security scan)
30
+ run: bandit -r src/ -ll
31
+ continue-on-error: true
32
+
33
+ - name: pip-audit (dependency CVEs)
34
+ run: pip-audit
35
+ continue-on-error: true
36
+
37
+ - name: Safety (dependency CVEs alternative)
38
+ run: safety check --json
39
+ continue-on-error: true
40
+
41
+ - name: Check for secrets (gitleaks)
42
+ uses: gitleaks/gitleaks-action@v2
43
+ continue-on-error: true
44
+ env:
45
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -0,0 +1,49 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ share/python-wheels/
20
+ *.egg-info/
21
+ .installed.cfg
22
+ *.egg
23
+ MANIFEST
24
+
25
+ # Virtual envs
26
+ .env
27
+ .venv
28
+ env/
29
+ venv/
30
+ ENV/
31
+
32
+ # Testing
33
+ .pytest_cache/
34
+ .coverage
35
+ .hypothesis/
36
+ htmlcov/
37
+
38
+ # IDEs
39
+ .vscode/
40
+ .idea/
41
+ *.swp
42
+ *.swo
43
+
44
+ # Model downloads (can be huge)
45
+ models/
46
+ eval-results/
47
+ *.safetensors
48
+ *.pt
49
+ *.bin
@@ -0,0 +1,33 @@
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ Licensed under the Apache License, Version 2.0 (the "License");
6
+ you may not use this file except in compliance with the License.
7
+ You may obtain a copy of the License at
8
+
9
+ http://www.apache.org/licenses/LICENSE-2.0
10
+
11
+ Unless required by applicable law or agreed to in writing, software
12
+ distributed under the License is distributed on an "AS IS" BASIS,
13
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ See the License for the specific language governing permissions and
15
+ limitations under the License.
16
+
17
+ ------------------------------------------------------------------------------
18
+
19
+ SCOPE OF THIS LICENSE
20
+
21
+ The Apache-2.0 License above applies to the source code of this CLI
22
+ (`ultracompress-cli`). It does NOT automatically grant any rights to the
23
+ patent-pending compression methodologies described as "UltraCompress" or
24
+ "Fractal Residual Recursion" or "Activation-Aware Row-Overlay Quantization",
25
+ which are the subject of pending U.S. Provisional Patent Applications.
26
+
27
+ Pre-compressed model artifacts distributed via the Hugging Face Hub are
28
+ subject to separate license terms accompanying each artifact. The model
29
+ weights themselves are governed by the upstream teacher model's license
30
+ (e.g., Apache 2.0 for Qwen3, Llama Community License for Llama variants)
31
+ plus any additional patent grant terms specific to this project.
32
+
33
+ Copyright 2026 Missipssa Ounnar.
@@ -0,0 +1,120 @@
1
+ Metadata-Version: 2.4
2
+ Name: ultracompress
3
+ Version: 0.1.0
4
+ Summary: Extreme compression for large language models. Download pre-compressed models from Hugging Face Hub; self-compress support coming soon.
5
+ Project-URL: Homepage, https://sipsalabs.com
6
+ Project-URL: Documentation, https://github.com/mounnar/ultracompress-cli#readme
7
+ Project-URL: Repository, https://github.com/mounnar/ultracompress-cli
8
+ Project-URL: Issues, https://github.com/mounnar/ultracompress-cli/issues
9
+ Author-email: Missipssa Ounnar <micipsa.ounner@gmail.com>
10
+ License: Apache-2.0
11
+ License-File: LICENSE
12
+ Keywords: compression,edge-ai,inference,llm,quantization,transformer
13
+ Classifier: Development Status :: 3 - Alpha
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Intended Audience :: Science/Research
16
+ Classifier: License :: OSI Approved :: Apache Software License
17
+ Classifier: Operating System :: OS Independent
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
23
+ Requires-Python: >=3.10
24
+ Requires-Dist: click>=8.1.0
25
+ Requires-Dist: huggingface-hub>=0.24.0
26
+ Requires-Dist: pyyaml>=6.0
27
+ Requires-Dist: rich>=13.0.0
28
+ Requires-Dist: safetensors>=0.4.0
29
+ Requires-Dist: tqdm>=4.66.0
30
+ Provides-Extra: dev
31
+ Requires-Dist: mypy>=1.0; extra == 'dev'
32
+ Requires-Dist: pytest-cov>=4.0; extra == 'dev'
33
+ Requires-Dist: pytest>=7.0; extra == 'dev'
34
+ Requires-Dist: ruff>=0.1.0; extra == 'dev'
35
+ Provides-Extra: torch
36
+ Requires-Dist: torch>=2.0.0; extra == 'torch'
37
+ Description-Content-Type: text/markdown
38
+
39
+ # UltraCompress
40
+
41
+ > Extreme compression for large language models. Patent pending — USPTO 64/049,511 + 64/049,517
42
+
43
+ [![PyPI](https://img.shields.io/pypi/v/ultracompress.svg)](https://pypi.org/project/ultracompress/)
44
+ [![Python](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/)
45
+ [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](LICENSE)
46
+
47
+ Run large language models on less hardware.
48
+ UltraCompress compresses modern transformer LLMs by **26–734×** with minimal quality loss. The underlying methods are patent pending; this CLI lets you **download pre-compressed reference models** and run them locally.
49
+
50
+ ## Install
51
+
52
+ ```bash
53
+ pip install ultracompress
54
+ ```
55
+
56
+ ## Quickstart
57
+
58
+ ```bash
59
+ # List pre-compressed models available on the official Hugging Face Hub
60
+ uc list
61
+
62
+ # Download a pre-compressed model (Qwen3-1.7B at 2.798 bpw, ~30% smaller than bnb-nf4)
63
+ uc pull sipsalabs/qwen3-1.7b-uc2p79
64
+
65
+ # Inspect what's in a compressed artifact
66
+ uc info ./models/qwen3-1.7b-uc2p79
67
+
68
+ # Benchmark the compressed model against the fp16 teacher
69
+ uc eval ./models/qwen3-1.7b-uc2p79 --tasks hellaswag --limit 500
70
+ ```
71
+
72
+ ## What's available today (v0.1 — alpha)
73
+
74
+ - `uc list` — browse pre-compressed models from our Hugging Face Hub collection.
75
+ - `uc pull <model-id>` — download a pre-compressed model locally.
76
+ - `uc info <path>` — inspect the compression metadata of an artifact.
77
+ - `uc eval <path> --tasks <list>` — run downstream benchmarks via `lm-eval-harness` on the compressed model.
78
+
79
+ ## What's coming
80
+
81
+ - `uc compress <hf-model-id> --bpw 2.8` — self-compression (private method, coming once patents clear).
82
+ - `uc serve <path>` — inference server with OpenAI-compatible API.
83
+ - `uc export --format gguf` — export to llama.cpp GGUF format.
84
+ - `uc export --format coreml` — export to Apple CoreML for on-device inference.
85
+
86
+ ## Why UltraCompress
87
+
88
+ On a 6-model × 8-method × 500-sample head-to-head benchmark:
89
+
90
+ | Method | Bits per weight | Cohort median T1 retention | Catastrophic failures |
91
+ |---|---:|---:|---:|
92
+ | bitsandbytes int8 | 8.000 | 99.75% | 0/6 |
93
+ | bitsandbytes nf4 | 4.000 | 98.31% | 0/6 |
94
+ | HQQ 4-bit g64 | 4.500 | 97.72% | 0/6 |
95
+ | **UltraCompress 2.8 bpw** | **2.798** | **95.63%** | **0/6** |
96
+ | HQQ 3-bit g64 | 3.500 | 72.46% | 1/6 |
97
+ | HQQ 2-bit g64 | 2.500 | 3.46% | 6/6 |
98
+
99
+ UltraCompress is the only sub-3-bpw method on this cohort that produces zero catastrophic failures.
100
+
101
+ ## Patent status
102
+
103
+ The UltraCompress compression methods are the subject of pending U.S. patent applications. Pre-compressed models are distributed under a separate licensing arrangement described in [LICENSE](LICENSE). The CLI code in this repository is Apache-2.0.
104
+
105
+ ## Citation
106
+
107
+ ```bibtex
108
+ @misc{ounnar2026ultracompress,
109
+ title = {UltraCompress: Extreme Compression for Large Language Models},
110
+ author = {Missipssa Ounnar},
111
+ year = {2026},
112
+ howpublished = {\url{https://mounnar.vercel.app}}
113
+ }
114
+ ```
115
+
116
+ ## Author
117
+
118
+ **Missipssa Ounnar** — [mounnar.vercel.app](https://mounnar.vercel.app) · [github.com/mounnar](https://github.com/mounnar)
119
+
120
+ Built on a dual-RTX-5090 workstation I designed and assembled myself. Patent pending.
@@ -0,0 +1,82 @@
1
+ # UltraCompress
2
+
3
+ > Extreme compression for large language models. Patent pending — USPTO 64/049,511 + 64/049,517
4
+
5
+ [![PyPI](https://img.shields.io/pypi/v/ultracompress.svg)](https://pypi.org/project/ultracompress/)
6
+ [![Python](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/)
7
+ [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](LICENSE)
8
+
9
+ Run large language models on less hardware.
10
+ UltraCompress compresses modern transformer LLMs by **26–734×** with minimal quality loss. The underlying methods are patent pending; this CLI lets you **download pre-compressed reference models** and run them locally.
11
+
12
+ ## Install
13
+
14
+ ```bash
15
+ pip install ultracompress
16
+ ```
17
+
18
+ ## Quickstart
19
+
20
+ ```bash
21
+ # List pre-compressed models available on the official Hugging Face Hub
22
+ uc list
23
+
24
+ # Download a pre-compressed model (Qwen3-1.7B at 2.798 bpw, ~30% smaller than bnb-nf4)
25
+ uc pull sipsalabs/qwen3-1.7b-uc2p79
26
+
27
+ # Inspect what's in a compressed artifact
28
+ uc info ./models/qwen3-1.7b-uc2p79
29
+
30
+ # Benchmark the compressed model against the fp16 teacher
31
+ uc eval ./models/qwen3-1.7b-uc2p79 --tasks hellaswag --limit 500
32
+ ```
33
+
34
+ ## What's available today (v0.1 — alpha)
35
+
36
+ - `uc list` — browse pre-compressed models from our Hugging Face Hub collection.
37
+ - `uc pull <model-id>` — download a pre-compressed model locally.
38
+ - `uc info <path>` — inspect the compression metadata of an artifact.
39
+ - `uc eval <path> --tasks <list>` — run downstream benchmarks via `lm-eval-harness` on the compressed model.
40
+
41
+ ## What's coming
42
+
43
+ - `uc compress <hf-model-id> --bpw 2.8` — self-compression (private method, coming once patents clear).
44
+ - `uc serve <path>` — inference server with OpenAI-compatible API.
45
+ - `uc export --format gguf` — export to llama.cpp GGUF format.
46
+ - `uc export --format coreml` — export to Apple CoreML for on-device inference.
47
+
48
+ ## Why UltraCompress
49
+
50
+ On a 6-model × 8-method × 500-sample head-to-head benchmark:
51
+
52
+ | Method | Bits per weight | Cohort median T1 retention | Catastrophic failures |
53
+ |---|---:|---:|---:|
54
+ | bitsandbytes int8 | 8.000 | 99.75% | 0/6 |
55
+ | bitsandbytes nf4 | 4.000 | 98.31% | 0/6 |
56
+ | HQQ 4-bit g64 | 4.500 | 97.72% | 0/6 |
57
+ | **UltraCompress 2.8 bpw** | **2.798** | **95.63%** | **0/6** |
58
+ | HQQ 3-bit g64 | 3.500 | 72.46% | 1/6 |
59
+ | HQQ 2-bit g64 | 2.500 | 3.46% | 6/6 |
60
+
61
+ UltraCompress is the only sub-3-bpw method on this cohort that produces zero catastrophic failures.
62
+
63
+ ## Patent status
64
+
65
+ The UltraCompress compression methods are the subject of pending U.S. patent applications. Pre-compressed models are distributed under a separate licensing arrangement described in [LICENSE](LICENSE). The CLI code in this repository is Apache-2.0.
66
+
67
+ ## Citation
68
+
69
+ ```bibtex
70
+ @misc{ounnar2026ultracompress,
71
+ title = {UltraCompress: Extreme Compression for Large Language Models},
72
+ author = {Missipssa Ounnar},
73
+ year = {2026},
74
+ howpublished = {\url{https://mounnar.vercel.app}}
75
+ }
76
+ ```
77
+
78
+ ## Author
79
+
80
+ **Missipssa Ounnar** — [mounnar.vercel.app](https://mounnar.vercel.app) · [github.com/mounnar](https://github.com/mounnar)
81
+
82
+ Built on a dual-RTX-5090 workstation I designed and assembled myself. Patent pending.
@@ -0,0 +1,70 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "ultracompress"
7
+ version = "0.1.0"
8
+ description = "Extreme compression for large language models. Download pre-compressed models from Hugging Face Hub; self-compress support coming soon."
9
+ readme = "README.md"
10
+ requires-python = ">=3.10"
11
+ license = { text = "Apache-2.0" }
12
+ authors = [
13
+ { name = "Missipssa Ounnar", email = "micipsa.ounner@gmail.com" }
14
+ ]
15
+ keywords = ["llm", "compression", "quantization", "transformer", "inference", "edge-ai"]
16
+ classifiers = [
17
+ "Development Status :: 3 - Alpha",
18
+ "Intended Audience :: Developers",
19
+ "Intended Audience :: Science/Research",
20
+ "License :: OSI Approved :: Apache Software License",
21
+ "Operating System :: OS Independent",
22
+ "Programming Language :: Python :: 3",
23
+ "Programming Language :: Python :: 3.10",
24
+ "Programming Language :: Python :: 3.11",
25
+ "Programming Language :: Python :: 3.12",
26
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
27
+ ]
28
+ dependencies = [
29
+ "huggingface_hub>=0.24.0",
30
+ "safetensors>=0.4.0",
31
+ "tqdm>=4.66.0",
32
+ "click>=8.1.0",
33
+ "rich>=13.0.0",
34
+ "pyyaml>=6.0",
35
+ ]
36
+
37
+ [project.optional-dependencies]
38
+ torch = [
39
+ "torch>=2.0.0",
40
+ ]
41
+ dev = [
42
+ "pytest>=7.0",
43
+ "pytest-cov>=4.0",
44
+ "ruff>=0.1.0",
45
+ "mypy>=1.0",
46
+ ]
47
+
48
+ [project.scripts]
49
+ uc = "ultracompress_cli.__main__:main"
50
+ ultracompress = "ultracompress_cli.__main__:main"
51
+
52
+ [project.urls]
53
+ Homepage = "https://sipsalabs.com"
54
+ Documentation = "https://github.com/mounnar/ultracompress-cli#readme"
55
+ Repository = "https://github.com/mounnar/ultracompress-cli"
56
+ Issues = "https://github.com/mounnar/ultracompress-cli/issues"
57
+
58
+ [tool.hatch.build.targets.wheel]
59
+ packages = ["src/ultracompress_cli"]
60
+
61
+ [tool.hatch.build.targets.wheel.sources]
62
+ "src/ultracompress_cli" = "ultracompress_cli"
63
+
64
+ [tool.ruff]
65
+ line-length = 100
66
+ target-version = "py310"
67
+
68
+ [tool.ruff.lint]
69
+ select = ["E", "F", "I", "UP"]
70
+ ignore = ["E501"]
@@ -0,0 +1,213 @@
1
+ """Render the `uc demo` output as a 1080p MP4 terminal-style screen recording.
2
+
3
+ Approach: run `uc demo`, capture stdout in real time with timestamps, replay
4
+ into a pyte virtual terminal at video framerate, render each frame as a PNG
5
+ with monospace font on dark background, then stitch into MP4 with ffmpeg.
6
+
7
+ Output: ~60 sec, 1920×1080, H.264, ready to upload to YC's demo video field.
8
+ """
9
+ from __future__ import annotations
10
+ import os, subprocess, time, threading, queue, shutil, tempfile, sys
11
+ from pathlib import Path
12
+ from PIL import Image, ImageDraw, ImageFont
13
+
14
+ # ----- Config -----
15
+ TERM_COLS = 110
16
+ TERM_ROWS = 30
17
+ W, H = 1920, 1080
18
+ FPS = 30
19
+ FONT_SIZE = 20
20
+ PADDING = 40
21
+ CHAR_W = 0 # auto
22
+ LINE_H = 0 # auto
23
+ BG = (12, 14, 18)
24
+ FG = (220, 220, 220)
25
+ DIM = (130, 130, 130)
26
+ ACCENT = (102, 217, 239) # cyan
27
+ GREEN = (102, 217, 102)
28
+ RED = (240, 100, 100)
29
+ YELLOW = (240, 220, 102)
30
+ WHITE = (245, 245, 245)
31
+ BLACK = (12, 14, 18)
32
+
33
+ # Pyte palette for ANSI 16-color
34
+ PALETTE = {
35
+ 'default': FG,
36
+ 'black': (40, 44, 52),
37
+ 'red': RED,
38
+ 'green': GREEN,
39
+ 'yellow': YELLOW,
40
+ 'blue': (100, 149, 237),
41
+ 'magenta': (199, 146, 234),
42
+ 'cyan': ACCENT,
43
+ 'white': WHITE,
44
+ 'brown': (180, 140, 80),
45
+ 'brightblack': (90, 96, 108),
46
+ 'brightred': (250, 130, 130),
47
+ 'brightgreen': (130, 240, 130),
48
+ 'brightyellow': (250, 235, 130),
49
+ 'brightblue': (130, 170, 250),
50
+ 'brightmagenta': (220, 170, 250),
51
+ 'brightcyan': (130, 230, 250),
52
+ 'brightwhite': (255, 255, 255),
53
+ }
54
+
55
+ FFMPEG = r"C:\Users\scamd\AppData\Local\Microsoft\WinGet\Packages\Gyan.FFmpeg_Microsoft.Winget.Source_8wekyb3d8bbwe\ffmpeg-8.1-full_build\bin\ffmpeg.exe"
56
+
57
+ # Try Cascadia Mono first, then Consolas as fallback
58
+ FONT_CANDIDATES = [
59
+ "C:/Windows/Fonts/CascadiaMono.ttf",
60
+ "C:/Windows/Fonts/CascadiaCode.ttf",
61
+ "C:/Windows/Fonts/consola.ttf",
62
+ "C:/Windows/Fonts/cour.ttf",
63
+ ]
64
+ FONT_BOLD_CANDIDATES = [
65
+ "C:/Windows/Fonts/CascadiaMonoBold.ttf",
66
+ "C:/Windows/Fonts/CascadiaCodeBold.ttf",
67
+ "C:/Windows/Fonts/consolab.ttf",
68
+ "C:/Windows/Fonts/courbd.ttf",
69
+ ]
70
+
71
+
72
+ def find_font(candidates):
73
+ for p in candidates:
74
+ if Path(p).exists():
75
+ return p
76
+ raise FileNotFoundError(f"None of these fonts found: {candidates}")
77
+
78
+
79
+ def capture_demo() -> list[tuple[float, bytes]]:
80
+ """Run `uc demo` and capture stdout with timestamps. Returns list of (ts, chunk)."""
81
+ print("Capturing uc demo output...")
82
+ env = os.environ.copy()
83
+ env["PYTHONIOENCODING"] = "utf-8"
84
+ env["TERM"] = "xterm-256color"
85
+ env["COLUMNS"] = str(TERM_COLS)
86
+ env["LINES"] = str(TERM_ROWS)
87
+ env["FORCE_COLOR"] = "1" # rich auto-detection
88
+ env["COLORTERM"] = "truecolor"
89
+
90
+ proc = subprocess.Popen(
91
+ ["uc", "demo"],
92
+ stdout=subprocess.PIPE,
93
+ stderr=subprocess.STDOUT,
94
+ env=env,
95
+ bufsize=0,
96
+ )
97
+
98
+ chunks = []
99
+ t0 = time.time()
100
+ while True:
101
+ chunk = proc.stdout.read(64)
102
+ if not chunk:
103
+ break
104
+ chunks.append((time.time() - t0, chunk))
105
+ proc.wait()
106
+ print(f" captured {len(chunks)} chunks over {time.time() - t0:.1f}s")
107
+ return chunks
108
+
109
+
110
+ def render_frames(chunks: list[tuple[float, bytes]], out_dir: Path, total_seconds: float | None = None):
111
+ """Replay chunks into pyte and render frames."""
112
+ import pyte
113
+
114
+ screen = pyte.Screen(TERM_COLS, TERM_ROWS)
115
+ stream = pyte.ByteStream(screen)
116
+
117
+ if total_seconds is None:
118
+ total_seconds = chunks[-1][0] + 1.5 if chunks else 5.0
119
+ n_frames = int(total_seconds * FPS) + 1
120
+
121
+ font_path = find_font(FONT_CANDIDATES)
122
+ bold_path = find_font(FONT_BOLD_CANDIDATES)
123
+ font = ImageFont.truetype(font_path, FONT_SIZE)
124
+ bold = ImageFont.truetype(bold_path, FONT_SIZE)
125
+
126
+ bbox = font.getbbox("M")
127
+ char_w = bbox[2] - bbox[0]
128
+ bbox = font.getbbox("Mg")
129
+ line_h = int((bbox[3] - bbox[1]) * 1.4) + 2
130
+
131
+ chunk_idx = 0
132
+ print(f"Rendering {n_frames} frames at {FPS} fps...")
133
+
134
+ for f in range(n_frames):
135
+ t = f / FPS
136
+ # Feed all chunks up to time t
137
+ while chunk_idx < len(chunks) and chunks[chunk_idx][0] <= t:
138
+ try:
139
+ stream.feed(chunks[chunk_idx][1])
140
+ except Exception:
141
+ pass
142
+ chunk_idx += 1
143
+
144
+ img = Image.new("RGB", (W, H), BG)
145
+ draw = ImageDraw.Draw(img)
146
+
147
+ # Draw header bar
148
+ draw.rectangle([0, 0, W, 36], fill=(28, 32, 40))
149
+ draw.ellipse([18, 12, 30, 24], fill=(255, 95, 86))
150
+ draw.ellipse([38, 12, 50, 24], fill=(255, 189, 46))
151
+ draw.ellipse([58, 12, 70, 24], fill=(39, 201, 63))
152
+ draw.text((W // 2 - 80, 8), "ultracompress demo", fill=DIM, font=font)
153
+
154
+ # Render each row
155
+ cursor = screen.cursor
156
+ for row_idx, line in enumerate(screen.display):
157
+ x = PADDING
158
+ y = PADDING + 36 + row_idx * line_h
159
+ row_data = screen.buffer[row_idx]
160
+ for col_idx in range(min(len(line), TERM_COLS)):
161
+ ch = line[col_idx] if col_idx < len(line) else " "
162
+ if ch == "\x00":
163
+ ch = " "
164
+ ce = row_data.get(col_idx)
165
+ fg_name = ce.fg if ce else "default"
166
+ bgname = ce.bg if ce else "default"
167
+ bold_flag = ce.bold if ce else False
168
+ fg_color = PALETTE.get(fg_name, FG) if fg_name != "default" else FG
169
+ f_use = bold if bold_flag else font
170
+ if ch.strip():
171
+ draw.text((x, y), ch, fill=fg_color, font=f_use)
172
+ x += char_w
173
+
174
+ # Save frame
175
+ img.save(out_dir / f"frame_{f:05d}.png", optimize=False, compress_level=1)
176
+ if f % 60 == 0:
177
+ print(f" frame {f}/{n_frames} (t={t:.1f}s)")
178
+
179
+ return n_frames, total_seconds
180
+
181
+
182
+ def main():
183
+ out_dir = Path(tempfile.mkdtemp(prefix="uc_demo_render_"))
184
+ try:
185
+ chunks = capture_demo()
186
+ if not chunks:
187
+ print("ERROR: no output captured")
188
+ sys.exit(1)
189
+ n_frames, total = render_frames(chunks, out_dir)
190
+ print(f"Rendered {n_frames} frames into {out_dir}")
191
+ # Stitch with ffmpeg
192
+ out_mp4 = Path(__file__).parent / "ultracompress_demo.mp4"
193
+ cmd = [
194
+ FFMPEG, "-y",
195
+ "-framerate", str(FPS),
196
+ "-i", str(out_dir / "frame_%05d.png"),
197
+ "-c:v", "libx264",
198
+ "-preset", "fast",
199
+ "-crf", "20",
200
+ "-pix_fmt", "yuv420p",
201
+ "-movflags", "+faststart",
202
+ str(out_mp4),
203
+ ]
204
+ print("Encoding MP4...")
205
+ subprocess.run(cmd, check=True)
206
+ size_mb = out_mp4.stat().st_size / 1024 / 1024
207
+ print(f"Wrote {out_mp4} ({size_mb:.1f} MB, {total:.1f}s)")
208
+ finally:
209
+ shutil.rmtree(out_dir, ignore_errors=True)
210
+
211
+
212
+ if __name__ == "__main__":
213
+ main()