PDASC 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- core/__init__.py +10 -0
- core/ascii_converter.py +111 -0
- core/ascii_displayer.py +317 -0
- core/ascii_file_encoding.py +258 -0
- core/audio_player.py +150 -0
- core/generate_color_ramp.py +69 -0
- core/utils.py +26 -0
- core/video_ascii_video.py +220 -0
- core/video_extractor.py +128 -0
- pdasc/__init__.py +0 -0
- pdasc/fonts/CascadiaMono.ttf +0 -0
- pdasc/fonts/font8x8.ttf +0 -0
- pdasc/main.py +296 -0
- pdasc-0.1.0.dist-info/METADATA +16 -0
- pdasc-0.1.0.dist-info/RECORD +26 -0
- pdasc-0.1.0.dist-info/WHEEL +5 -0
- pdasc-0.1.0.dist-info/entry_points.txt +2 -0
- pdasc-0.1.0.dist-info/licenses/LICENSE +21 -0
- pdasc-0.1.0.dist-info/top_level.txt +3 -0
- web/__init__.py +4 -0
- web/image_controller/__init__.py +1 -0
- web/image_controller/app.py +99 -0
- web/image_controller/templates/index.html +383 -0
- web/video_player/__init__.py +1 -0
- web/video_player/app.py +47 -0
- web/video_player/templates/index.html +296 -0
core/__init__.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from .ascii_converter import AsciiConverter
|
|
2
|
+
from .ascii_displayer import AsciiDisplayer
|
|
3
|
+
from .ascii_file_encoding import AsciiEncoder, AsciiDecoder
|
|
4
|
+
from .audio_player import AudioPlayer
|
|
5
|
+
from .generate_color_ramp import generate_color_ramp, get_charmap, render_charmap
|
|
6
|
+
from .utils import pack_int24, unpack_int24, pack_int24_chunk, unpack_int24_array, format_file_size
|
|
7
|
+
from .video_ascii_video import VideoAsciiConverter, process_video
|
|
8
|
+
from .video_extractor import extract_video
|
|
9
|
+
|
|
10
|
+
__all__ = ["AsciiConverter", "AsciiDisplayer", "AsciiEncoder", "AsciiDecoder", "AudioPlayer", "generate_color_ramp", "get_charmap", "render_charmap", "pack_int24", "unpack_int24", "pack_int24_chunk", "unpack_int24_array", "format_file_size", "VideoAsciiConverter", "process_video", "extract_video"]
|
core/ascii_converter.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
from PIL import Image
|
|
2
|
+
import numpy as np
|
|
3
|
+
from .generate_color_ramp import generate_color_ramp, get_charmap
|
|
4
|
+
from numba import njit, prange
|
|
5
|
+
|
|
6
|
+
@njit(parallel=True, fastmath=True, cache=True)
|
|
7
|
+
def compute_blocks(img: np.ndarray, cs: int, gray_levels: int, color: bool):
|
|
8
|
+
h, w, _ = img.shape
|
|
9
|
+
bh, bw = h // cs, w // cs
|
|
10
|
+
|
|
11
|
+
char_idx = np.empty((bh, bw), np.int32)
|
|
12
|
+
colors = np.empty((bh, bw), np.uint32) # Always allocate
|
|
13
|
+
|
|
14
|
+
inv_area = 1.0 / (cs * cs)
|
|
15
|
+
|
|
16
|
+
for by in prange(bh):
|
|
17
|
+
for bx in range(bw):
|
|
18
|
+
r = g = b = 0.0
|
|
19
|
+
|
|
20
|
+
if color:
|
|
21
|
+
for y in range(cs):
|
|
22
|
+
for x in range(cs):
|
|
23
|
+
px = img[by*cs + y, bx*cs + x]
|
|
24
|
+
r += px[0]
|
|
25
|
+
g += px[1]
|
|
26
|
+
b += px[2]
|
|
27
|
+
|
|
28
|
+
r *= inv_area
|
|
29
|
+
g *= inv_area
|
|
30
|
+
b *= inv_area
|
|
31
|
+
else:
|
|
32
|
+
# compute grayscale
|
|
33
|
+
for y in range(cs):
|
|
34
|
+
for x in range(cs):
|
|
35
|
+
px = img[by*cs + y, bx*cs + x]
|
|
36
|
+
r += px[0]
|
|
37
|
+
g += px[1]
|
|
38
|
+
b += px[2]
|
|
39
|
+
|
|
40
|
+
r *= inv_area
|
|
41
|
+
g *= inv_area
|
|
42
|
+
b *= inv_area
|
|
43
|
+
|
|
44
|
+
# Grayscale -> char index
|
|
45
|
+
lum = 0.2126*r + 0.7152*g + 0.0722*b
|
|
46
|
+
gi = int(lum * gray_levels)
|
|
47
|
+
if gi >= gray_levels:
|
|
48
|
+
gi = gray_levels - 1
|
|
49
|
+
char_idx[by, bx] = gi
|
|
50
|
+
|
|
51
|
+
if color:
|
|
52
|
+
# Convert to 8-bit RGB
|
|
53
|
+
ri = np.uint32(r * 255.0)
|
|
54
|
+
gi_color = np.uint32(g * 255.0)
|
|
55
|
+
bi = np.uint32(b * 255.0)
|
|
56
|
+
|
|
57
|
+
# Pack RGB -> 0xRRGGBB
|
|
58
|
+
colors[by, bx] = (ri << 16) | (gi_color << 8) | bi
|
|
59
|
+
else:
|
|
60
|
+
colors[by, bx] = 0 # Dummy value when no color
|
|
61
|
+
|
|
62
|
+
return char_idx, colors
|
|
63
|
+
|
|
64
|
+
class AsciiConverter:
|
|
65
|
+
def __init__(self, num_ascii: int = 8, chunk_size: int = 8, font_path: str = "CascadiaMono.ttf"):
|
|
66
|
+
self.font_path = font_path
|
|
67
|
+
self.chunk_size: int = chunk_size
|
|
68
|
+
self.char_map = get_charmap(generate_color_ramp(font_path=self.font_path), num_ascii)
|
|
69
|
+
self.num_ascii: int = min(num_ascii, len(self.char_map))
|
|
70
|
+
|
|
71
|
+
def regen_charmap(self):
|
|
72
|
+
self.char_map = get_charmap(generate_color_ramp(font_path=self.font_path), self.num_ascii)
|
|
73
|
+
|
|
74
|
+
def quantize_grayscale(self, image: Image.Image) -> Image.Image:
|
|
75
|
+
img_data = np.array(image, dtype=np.float32) / 255.0
|
|
76
|
+
height, width, channels = img_data.shape
|
|
77
|
+
out_data = np.zeros_like(img_data)
|
|
78
|
+
|
|
79
|
+
for y in range(height):
|
|
80
|
+
for x in range(width):
|
|
81
|
+
in_pix = img_data[y, x]
|
|
82
|
+
gray = sum([0.2126 * in_pix[0], 0.7152 * in_pix[1], 0.0722 * in_pix[2]])
|
|
83
|
+
quantized = np.floor(gray * self.num_ascii) / (self.num_ascii - 1)
|
|
84
|
+
out_data[y, x] = [quantized, quantized, quantized]
|
|
85
|
+
|
|
86
|
+
out_data = (out_data * 255).astype(np.uint8)
|
|
87
|
+
|
|
88
|
+
return Image.fromarray(out_data, "RGB")
|
|
89
|
+
|
|
90
|
+
def get_ascii(self, image: Image.Image, color: bool = True) -> np.ndarray:
|
|
91
|
+
img = np.asarray(image, dtype=np.float32) / 255.0
|
|
92
|
+
cs = self.chunk_size
|
|
93
|
+
|
|
94
|
+
char_idx, colors = compute_blocks(
|
|
95
|
+
img,
|
|
96
|
+
cs,
|
|
97
|
+
self.num_ascii,
|
|
98
|
+
color
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# map char indices to chars
|
|
102
|
+
char_map_arr = np.array(list(self.char_map), dtype='<U1')
|
|
103
|
+
chars = char_map_arr[char_idx]
|
|
104
|
+
|
|
105
|
+
# Structured array
|
|
106
|
+
dtype = np.dtype([('char', '<U1'), ('color', np.uint32)])
|
|
107
|
+
out = np.empty(chars.shape, dtype=dtype)
|
|
108
|
+
out['char'] = chars
|
|
109
|
+
out['color'] = colors if color else np.zeros_like(colors, dtype=np.uint32)
|
|
110
|
+
|
|
111
|
+
return out
|
core/ascii_displayer.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
from PIL import Image
|
|
2
|
+
import numpy as np
|
|
3
|
+
import time
|
|
4
|
+
import sys
|
|
5
|
+
import signal
|
|
6
|
+
import atexit
|
|
7
|
+
from .ascii_converter import AsciiConverter
|
|
8
|
+
from .utils import unpack_int24_array
|
|
9
|
+
from .audio_player import AudioPlayer
|
|
10
|
+
from .ascii_file_encoding import AsciiDecoder
|
|
11
|
+
|
|
12
|
+
class AsciiDisplayer:
|
|
13
|
+
def __init__(self, converter: AsciiConverter, debug: bool = False):
|
|
14
|
+
self.converter: AsciiConverter = converter
|
|
15
|
+
self._cleanup_done = False
|
|
16
|
+
self.debug = debug
|
|
17
|
+
|
|
18
|
+
def color_text(self, text: str, r: int, g: int, b: int):
|
|
19
|
+
r = max(min(r, 255), 0)
|
|
20
|
+
g = max(min(g, 255), 0)
|
|
21
|
+
b = max(min(b, 255), 0)
|
|
22
|
+
return f"\033[38;2;{r};{g};{b}m{text}"
|
|
23
|
+
|
|
24
|
+
def render_ascii(self, ascii_array: np.ndarray, colored: bool):
|
|
25
|
+
"""
|
|
26
|
+
Render a structured ASCII array to a colored string for terminal.
|
|
27
|
+
|
|
28
|
+
ascii_array: np.ndarray with dtype [('char','<U1'),('color',np.uint32)]
|
|
29
|
+
|
|
30
|
+
Returns: str with ANSI color codes (or plain text if not colored)
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
lines = []
|
|
34
|
+
if colored:
|
|
35
|
+
for row in ascii_array:
|
|
36
|
+
chars = np.char.multiply(row['char'], 2)
|
|
37
|
+
r, g, b = unpack_int24_array(row['color'])
|
|
38
|
+
line = ""
|
|
39
|
+
last_color = None
|
|
40
|
+
for ch, ri, gi, bi in zip(chars, r, g, b):
|
|
41
|
+
color = (ri, gi, bi)
|
|
42
|
+
if color != last_color:
|
|
43
|
+
line += f"\033[38;2;{ri};{gi};{bi}m"
|
|
44
|
+
last_color = color
|
|
45
|
+
line += ch
|
|
46
|
+
lines.append(line)
|
|
47
|
+
return "\033[1;40m" + "\n".join(lines) + "\033[0m" # set background black and bold at start and reset everything at end
|
|
48
|
+
else:
|
|
49
|
+
for row in ascii_array:
|
|
50
|
+
chars = np.char.multiply(row['char'], 2)
|
|
51
|
+
line = "".join(chars)
|
|
52
|
+
lines.append(line)
|
|
53
|
+
return "\n".join(lines)
|
|
54
|
+
|
|
55
|
+
def render_image(self, image: Image.Image, color: bool = True):
|
|
56
|
+
ascii = self.converter.get_ascii(image, color)
|
|
57
|
+
frame = self.render_ascii(ascii, color)
|
|
58
|
+
sys.stdout.write(f"\033[H{frame}")
|
|
59
|
+
sys.stdout.flush()
|
|
60
|
+
|
|
61
|
+
def _cleanup_terminal(self):
|
|
62
|
+
"""Restore terminal to normal state"""
|
|
63
|
+
if not self._cleanup_done:
|
|
64
|
+
self._cleanup_done = True
|
|
65
|
+
# Disable signal handlers during cleanup to prevent interruption
|
|
66
|
+
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
|
67
|
+
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
|
68
|
+
|
|
69
|
+
# Write cleanup sequences atomically
|
|
70
|
+
try:
|
|
71
|
+
cleanup_seq = "\033[0m\033[?25h\033[?1049l" # reset style, show cursor, old buffer
|
|
72
|
+
sys.stdout.write(cleanup_seq)
|
|
73
|
+
sys.stdout.flush()
|
|
74
|
+
except:
|
|
75
|
+
# If stdout fails, try stderr as fallback
|
|
76
|
+
try:
|
|
77
|
+
sys.stderr.write(cleanup_seq)
|
|
78
|
+
sys.stderr.flush()
|
|
79
|
+
except:
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
# Force terminal back to sane state using stty for linux
|
|
83
|
+
import subprocess
|
|
84
|
+
try:
|
|
85
|
+
subprocess.run(['stty', 'echo', 'icanon'],
|
|
86
|
+
stdin=sys.stdin,
|
|
87
|
+
stdout=subprocess.DEVNULL,
|
|
88
|
+
stderr=subprocess.DEVNULL,
|
|
89
|
+
timeout=0.5)
|
|
90
|
+
except:
|
|
91
|
+
pass
|
|
92
|
+
|
|
93
|
+
def _setup_terminal(self):
|
|
94
|
+
"""Setup terminal and ensure cleanup happens"""
|
|
95
|
+
print("\033[?1049h\033[?25l\033[H\033[2J", end="") # seperate buffer, hide cursor, move cursor home, clear
|
|
96
|
+
sys.stdout.flush()
|
|
97
|
+
|
|
98
|
+
self._cleanup_done = False
|
|
99
|
+
|
|
100
|
+
# Multiple layers of protection
|
|
101
|
+
atexit.register(self._cleanup_terminal)
|
|
102
|
+
|
|
103
|
+
def signal_handler(sig, frame):
|
|
104
|
+
self._cleanup_terminal()
|
|
105
|
+
# Force exit without further exception handling
|
|
106
|
+
import os
|
|
107
|
+
os._exit(0)
|
|
108
|
+
|
|
109
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
110
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
|
111
|
+
|
|
112
|
+
def display_image(self, image: Image.Image, color: bool = True):
|
|
113
|
+
import shutil
|
|
114
|
+
|
|
115
|
+
self._setup_terminal()
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
term_cols, term_rows = shutil.get_terminal_size()
|
|
119
|
+
|
|
120
|
+
img_width, img_height = image.size
|
|
121
|
+
chars_wide = img_width // self.converter.chunk_size
|
|
122
|
+
chars_tall = img_height // self.converter.chunk_size
|
|
123
|
+
|
|
124
|
+
needed_cols = chars_wide * 2
|
|
125
|
+
needed_rows = chars_tall
|
|
126
|
+
|
|
127
|
+
# Scale down if image would be larger than terminal because would cut off
|
|
128
|
+
if needed_cols > term_cols or needed_rows > (term_rows - 2):
|
|
129
|
+
scale_factor = min(
|
|
130
|
+
term_cols / needed_cols,
|
|
131
|
+
(term_rows - 2) / needed_rows
|
|
132
|
+
)
|
|
133
|
+
new_width = int(img_width * scale_factor)
|
|
134
|
+
new_height = int(img_height * scale_factor)
|
|
135
|
+
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
|
136
|
+
|
|
137
|
+
self.render_image(image, color)
|
|
138
|
+
input()
|
|
139
|
+
finally:
|
|
140
|
+
self._cleanup_terminal()
|
|
141
|
+
|
|
142
|
+
def display_video(self, video_path: str, play_audio: bool = True, color: bool = True):
|
|
143
|
+
from core.video_extractor import extract_video
|
|
144
|
+
|
|
145
|
+
self._setup_terminal()
|
|
146
|
+
|
|
147
|
+
player = None
|
|
148
|
+
|
|
149
|
+
try:
|
|
150
|
+
fps, frame_gen, audio_gen = extract_video(video_path)
|
|
151
|
+
|
|
152
|
+
if play_audio and audio_gen is not None:
|
|
153
|
+
player = AudioPlayer(audio_gen)
|
|
154
|
+
player.start()
|
|
155
|
+
|
|
156
|
+
frame_time = 1.0 / fps
|
|
157
|
+
start_time = time.time()
|
|
158
|
+
frame_idx = 0
|
|
159
|
+
|
|
160
|
+
for frame in frame_gen:
|
|
161
|
+
elapsed = time.time() - start_time
|
|
162
|
+
target_frame = int(elapsed * fps)
|
|
163
|
+
|
|
164
|
+
frame_idx += 1
|
|
165
|
+
|
|
166
|
+
# Skip frame if behind
|
|
167
|
+
if frame_idx - 1 < target_frame:
|
|
168
|
+
continue
|
|
169
|
+
|
|
170
|
+
self.render_image(frame, color)
|
|
171
|
+
|
|
172
|
+
if self.debug:
|
|
173
|
+
current_time = time.time()
|
|
174
|
+
actual_fps = frame_idx / (current_time - start_time)
|
|
175
|
+
sys.stdout.write(f"\n\033[2KFPS: {round(actual_fps)}")
|
|
176
|
+
sys.stdout.flush()
|
|
177
|
+
|
|
178
|
+
# Sleep if extra time to cap fps to video frame rate
|
|
179
|
+
target_time = start_time + frame_idx * frame_time
|
|
180
|
+
sleep_time = target_time - time.time()
|
|
181
|
+
if sleep_time > 0:
|
|
182
|
+
time.sleep(sleep_time)
|
|
183
|
+
|
|
184
|
+
except KeyboardInterrupt:
|
|
185
|
+
pass # Handle Ctrl+C gracefully
|
|
186
|
+
|
|
187
|
+
finally:
|
|
188
|
+
if player:
|
|
189
|
+
try:
|
|
190
|
+
player.stop()
|
|
191
|
+
except:
|
|
192
|
+
pass
|
|
193
|
+
self._cleanup_terminal()
|
|
194
|
+
|
|
195
|
+
def display_asc_file(self, asc_path: str, play_audio: bool = True):
|
|
196
|
+
"""Display a pre-encoded .asc file - BLAZING FAST!"""
|
|
197
|
+
decoder = AsciiDecoder()
|
|
198
|
+
decoder.read(asc_path)
|
|
199
|
+
|
|
200
|
+
self._setup_terminal()
|
|
201
|
+
|
|
202
|
+
player = None
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
if play_audio and decoder.has_audio and decoder.audio_data:
|
|
206
|
+
def audio_gen():
|
|
207
|
+
if decoder.audio_data:
|
|
208
|
+
chunk_size = decoder.audio_rate * 2 * decoder.audio_channels
|
|
209
|
+
for i in range(0, len(decoder.audio_data), chunk_size):
|
|
210
|
+
chunk = decoder.audio_data[i:i+chunk_size]
|
|
211
|
+
valid_size = (len(chunk) // 4) * 4
|
|
212
|
+
if valid_size > 0:
|
|
213
|
+
audio_np = np.frombuffer(chunk[:valid_size], dtype=np.int16).reshape(-1, decoder.audio_channels)
|
|
214
|
+
yield audio_np
|
|
215
|
+
|
|
216
|
+
player = AudioPlayer(audio_gen(), samplerate=decoder.audio_rate, channels=decoder.audio_channels)
|
|
217
|
+
player.start()
|
|
218
|
+
|
|
219
|
+
if decoder.is_video:
|
|
220
|
+
# Video playback - just write pre-rendered strings!
|
|
221
|
+
frame_time = 1.0 / decoder.fps
|
|
222
|
+
start_time = time.time()
|
|
223
|
+
|
|
224
|
+
for frame_idx, frame_str in enumerate(decoder.frames):
|
|
225
|
+
elapsed = time.time() - start_time
|
|
226
|
+
target_frame = int(elapsed * decoder.fps)
|
|
227
|
+
|
|
228
|
+
# Skip frame if behind
|
|
229
|
+
if frame_idx < target_frame:
|
|
230
|
+
continue
|
|
231
|
+
|
|
232
|
+
sys.stdout.write(f"\033[H{frame_str}")
|
|
233
|
+
sys.stdout.flush()
|
|
234
|
+
|
|
235
|
+
# Sleep to maintain frame rate
|
|
236
|
+
target_time = start_time + (frame_idx + 1) * frame_time
|
|
237
|
+
sleep_time = target_time - time.time()
|
|
238
|
+
if sleep_time > 0:
|
|
239
|
+
time.sleep(sleep_time)
|
|
240
|
+
else:
|
|
241
|
+
sys.stdout.write(f"\033[H{decoder.frames[0]}")
|
|
242
|
+
sys.stdout.flush()
|
|
243
|
+
input()
|
|
244
|
+
|
|
245
|
+
except KeyboardInterrupt:
|
|
246
|
+
pass
|
|
247
|
+
|
|
248
|
+
finally:
|
|
249
|
+
if player and player.stream:
|
|
250
|
+
player.stream.stop()
|
|
251
|
+
player.stream.close()
|
|
252
|
+
self._cleanup_terminal()
|
|
253
|
+
|
|
254
|
+
def display_camera(self, camera_index: int = 0, color: bool = True):
|
|
255
|
+
"""Display live camera feed as ASCII art"""
|
|
256
|
+
import cv2
|
|
257
|
+
import os
|
|
258
|
+
|
|
259
|
+
# Suppress OpenCV errors temporarily
|
|
260
|
+
devnull = open(os.devnull, 'w')
|
|
261
|
+
old_stderr = os.dup(2)
|
|
262
|
+
os.dup2(devnull.fileno(), 2)
|
|
263
|
+
|
|
264
|
+
try:
|
|
265
|
+
# Try to open camera before setting up terminal for better errors
|
|
266
|
+
cap = cv2.VideoCapture(camera_index)
|
|
267
|
+
|
|
268
|
+
# read test frame
|
|
269
|
+
ret, test_frame = cap.read()
|
|
270
|
+
finally:
|
|
271
|
+
# Restore stderr
|
|
272
|
+
os.dup2(old_stderr, 2)
|
|
273
|
+
os.close(old_stderr)
|
|
274
|
+
devnull.close()
|
|
275
|
+
|
|
276
|
+
if not ret or test_frame is None:
|
|
277
|
+
cap.release()
|
|
278
|
+
print(f"Error: Could not open camera {camera_index}")
|
|
279
|
+
print("Make sure a camera is connected and is available")
|
|
280
|
+
return
|
|
281
|
+
|
|
282
|
+
self._setup_terminal()
|
|
283
|
+
|
|
284
|
+
# Get camera FPS (default 30)
|
|
285
|
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
286
|
+
if fps == 0 or fps > 60:
|
|
287
|
+
fps = 30
|
|
288
|
+
|
|
289
|
+
frame_time = 1.0 / fps
|
|
290
|
+
|
|
291
|
+
try:
|
|
292
|
+
last_time = time.time()
|
|
293
|
+
|
|
294
|
+
while True:
|
|
295
|
+
ret, frame = cap.read()
|
|
296
|
+
|
|
297
|
+
if not ret:
|
|
298
|
+
break
|
|
299
|
+
|
|
300
|
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
301
|
+
|
|
302
|
+
image = Image.fromarray(frame_rgb)
|
|
303
|
+
|
|
304
|
+
self.render_image(image, color)
|
|
305
|
+
|
|
306
|
+
elapsed = time.time() - last_time
|
|
307
|
+
sleep_time = frame_time - elapsed
|
|
308
|
+
if sleep_time > 0:
|
|
309
|
+
time.sleep(sleep_time)
|
|
310
|
+
last_time = time.time()
|
|
311
|
+
|
|
312
|
+
except KeyboardInterrupt:
|
|
313
|
+
pass # Handle Ctrl+C gracefully
|
|
314
|
+
|
|
315
|
+
finally:
|
|
316
|
+
cap.release()
|
|
317
|
+
self._cleanup_terminal()
|
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
import struct
|
|
2
|
+
import os
|
|
3
|
+
from .utils import format_file_size
|
|
4
|
+
from typing import List
|
|
5
|
+
import zstandard as zstd
|
|
6
|
+
|
|
7
|
+
class AsciiEncoder:
|
|
8
|
+
"""Encoder for .asc (ASCII Container) file format - stores pre-rendered ANSI strings"""
|
|
9
|
+
|
|
10
|
+
MAGIC = b'ASII'
|
|
11
|
+
VERSION = 2
|
|
12
|
+
|
|
13
|
+
# Flag bits
|
|
14
|
+
FLAG_IS_VIDEO = 1 << 0
|
|
15
|
+
FLAG_HAS_AUDIO = 1 << 1
|
|
16
|
+
|
|
17
|
+
def __init__(self):
|
|
18
|
+
self.frames: list[str] = [] # Store actual ANSI strings
|
|
19
|
+
self.fps: float = 30.0
|
|
20
|
+
self.has_audio: bool = False
|
|
21
|
+
self.audio_data = None
|
|
22
|
+
self.audio_rate: int = 44100
|
|
23
|
+
self.audio_channels: int = 2
|
|
24
|
+
|
|
25
|
+
def add_rendered_frame(self, ansi_string: str):
|
|
26
|
+
"""
|
|
27
|
+
Add a pre-rendered frame
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
ansi_string: Complete ANSI string ready to display
|
|
31
|
+
"""
|
|
32
|
+
self.frames.append(ansi_string)
|
|
33
|
+
|
|
34
|
+
def set_audio(self, audio_data: bytes, sample_rate: int = 44100, channels: int = 2):
|
|
35
|
+
"""Set audio data (raw PCM16)"""
|
|
36
|
+
self.has_audio = True
|
|
37
|
+
self.audio_data = audio_data
|
|
38
|
+
self.audio_rate = sample_rate
|
|
39
|
+
self.audio_channels = channels
|
|
40
|
+
|
|
41
|
+
def write(self, output_path: str):
|
|
42
|
+
"""Write encoded data to file"""
|
|
43
|
+
if not self.frames:
|
|
44
|
+
raise ValueError("No frames added")
|
|
45
|
+
|
|
46
|
+
with open(output_path, 'wb') as f:
|
|
47
|
+
flags = 0
|
|
48
|
+
is_video = len(self.frames) > 1
|
|
49
|
+
|
|
50
|
+
if is_video:
|
|
51
|
+
flags |= self.FLAG_IS_VIDEO
|
|
52
|
+
if self.has_audio:
|
|
53
|
+
flags |= self.FLAG_HAS_AUDIO
|
|
54
|
+
|
|
55
|
+
# Write header (24 bytes)
|
|
56
|
+
header = struct.pack(
|
|
57
|
+
'!4sHHfI8s',
|
|
58
|
+
self.MAGIC, # Magic number (4 bytes)
|
|
59
|
+
self.VERSION, # Version (2 bytes)
|
|
60
|
+
flags, # Flags (2 bytes)
|
|
61
|
+
self.fps, # FPS (4 bytes)
|
|
62
|
+
len(self.frames), # Frame count (4 bytes)
|
|
63
|
+
b'\x00' * 8 # Reserved (8 bytes)
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
f.write(header)
|
|
67
|
+
|
|
68
|
+
frame_bytes = [frame.encode("utf-8") for frame in self.frames]
|
|
69
|
+
frame_lengths = [len(b) for b in frame_bytes]
|
|
70
|
+
all_frames_bytes = b"".join(frame_bytes)
|
|
71
|
+
|
|
72
|
+
print("Compressing")
|
|
73
|
+
cctx = zstd.ZstdCompressor(level=5)
|
|
74
|
+
compressed = cctx.compress(all_frames_bytes)
|
|
75
|
+
|
|
76
|
+
print("Writing")
|
|
77
|
+
for l in frame_lengths:
|
|
78
|
+
f.write(struct.pack("!I", l))
|
|
79
|
+
|
|
80
|
+
# write compressed blob
|
|
81
|
+
f.write(struct.pack("!I", len(compressed)))
|
|
82
|
+
f.write(compressed)
|
|
83
|
+
|
|
84
|
+
# Write audio if present
|
|
85
|
+
if self.has_audio and self.audio_data:
|
|
86
|
+
audio_header = struct.pack(
|
|
87
|
+
'!IBI',
|
|
88
|
+
len(self.audio_data), # Audio data size
|
|
89
|
+
1, # Audio format (1 = PCM16)
|
|
90
|
+
self.audio_rate, # Sample rate
|
|
91
|
+
)
|
|
92
|
+
f.write(audio_header)
|
|
93
|
+
f.write(struct.pack('!B', self.audio_channels))
|
|
94
|
+
f.write(self.audio_data)
|
|
95
|
+
|
|
96
|
+
def encode_image_to_asc(self, image_path: str, output_path: str, color: bool = True, converter=None, displayer=None):
|
|
97
|
+
"""Encode a single image to .asc format"""
|
|
98
|
+
from PIL import Image
|
|
99
|
+
|
|
100
|
+
if not converter or not displayer:
|
|
101
|
+
from .ascii_converter import AsciiConverter
|
|
102
|
+
from .ascii_displayer import AsciiDisplayer
|
|
103
|
+
converter = converter or AsciiConverter()
|
|
104
|
+
displayer = displayer or AsciiDisplayer(converter)
|
|
105
|
+
|
|
106
|
+
# Load and convert image
|
|
107
|
+
image = Image.open(image_path)
|
|
108
|
+
ascii_array = converter.get_ascii(image, color)
|
|
109
|
+
|
|
110
|
+
# Render to ANSI string
|
|
111
|
+
ansi_string = displayer.render_ascii(ascii_array, color)
|
|
112
|
+
|
|
113
|
+
self.fps = 1.0
|
|
114
|
+
self.add_rendered_frame(ansi_string)
|
|
115
|
+
self.write(output_path)
|
|
116
|
+
|
|
117
|
+
print(f"Encoded image to {output_path}")
|
|
118
|
+
print(f"File size: {format_file_size(os.path.getsize(output_path))}")
|
|
119
|
+
|
|
120
|
+
def encode_video_to_asc(self, video_path: str, output_path: str,
|
|
121
|
+
play_audio: bool = True, color: bool = True,
|
|
122
|
+
converter=None, displayer=None):
|
|
123
|
+
"""Encode a video to .asc format"""
|
|
124
|
+
from core.video_extractor import extract_video
|
|
125
|
+
import threading
|
|
126
|
+
|
|
127
|
+
if not converter:
|
|
128
|
+
from .ascii_converter import AsciiConverter
|
|
129
|
+
converter = converter or AsciiConverter()
|
|
130
|
+
if not displayer:
|
|
131
|
+
from .ascii_displayer import AsciiDisplayer
|
|
132
|
+
displayer = displayer or AsciiDisplayer(converter)
|
|
133
|
+
|
|
134
|
+
print(f"Encoding {video_path}...")
|
|
135
|
+
|
|
136
|
+
# Extract video
|
|
137
|
+
fps, frame_gen, audio_gen = extract_video(video_path)
|
|
138
|
+
self.fps = fps
|
|
139
|
+
|
|
140
|
+
# Collect audio in background
|
|
141
|
+
audio_chunks = []
|
|
142
|
+
audio_thread = None
|
|
143
|
+
|
|
144
|
+
if play_audio and audio_gen is not None:
|
|
145
|
+
def collect_audio():
|
|
146
|
+
for chunk in audio_gen:
|
|
147
|
+
audio_chunks.append(chunk.tobytes())
|
|
148
|
+
|
|
149
|
+
audio_thread = threading.Thread(target=collect_audio, daemon=True)
|
|
150
|
+
audio_thread.start()
|
|
151
|
+
|
|
152
|
+
# Encode frames
|
|
153
|
+
frame_count = 0
|
|
154
|
+
try:
|
|
155
|
+
for frame in frame_gen:
|
|
156
|
+
# Convert to ASCII and render to ANSI string
|
|
157
|
+
ascii_array = converter.get_ascii(frame, color)
|
|
158
|
+
ansi_string = displayer.render_ascii(ascii_array, color)
|
|
159
|
+
|
|
160
|
+
self.add_rendered_frame(ansi_string)
|
|
161
|
+
frame_count += 1
|
|
162
|
+
|
|
163
|
+
if frame_count % 30 == 0:
|
|
164
|
+
print(f"Encoded {frame_count} frames...", end='\r')
|
|
165
|
+
|
|
166
|
+
print(f"\nEncoded {frame_count} frames total")
|
|
167
|
+
|
|
168
|
+
# Wait for audio
|
|
169
|
+
if audio_thread:
|
|
170
|
+
audio_thread.join(timeout=5.0)
|
|
171
|
+
|
|
172
|
+
if audio_chunks:
|
|
173
|
+
audio_data = b''.join(audio_chunks)
|
|
174
|
+
self.set_audio(audio_data)
|
|
175
|
+
print(f"Added {format_file_size(len(audio_data))} of audio")
|
|
176
|
+
|
|
177
|
+
self.write(output_path)
|
|
178
|
+
|
|
179
|
+
file_size = os.path.getsize(output_path)
|
|
180
|
+
print(f"Saved to {output_path}")
|
|
181
|
+
print(f"File size: {format_file_size(file_size)}")
|
|
182
|
+
|
|
183
|
+
except Exception as e:
|
|
184
|
+
print(f"\nError during encoding: {e}")
|
|
185
|
+
raise
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class AsciiDecoder:
|
|
189
|
+
"""Decoder for .asc (ASCII Container) file format - reads pre-rendered frames"""
|
|
190
|
+
|
|
191
|
+
def __init__(self):
|
|
192
|
+
self.fps = 0.0
|
|
193
|
+
self.has_audio = False
|
|
194
|
+
self.is_video = False
|
|
195
|
+
self.frames = [] # Pre-rendered ANSI strings
|
|
196
|
+
self.audio_data = None
|
|
197
|
+
self.audio_rate = 44100
|
|
198
|
+
self.audio_channels = 2
|
|
199
|
+
|
|
200
|
+
def read(self, input_path: str):
|
|
201
|
+
"""Read and decode .asc file with block-compressed frames"""
|
|
202
|
+
with open(input_path, 'rb') as f:
|
|
203
|
+
# Read header (24 bytes)
|
|
204
|
+
header_data = f.read(24)
|
|
205
|
+
if len(header_data) < 24:
|
|
206
|
+
raise ValueError("Invalid file: header too short")
|
|
207
|
+
|
|
208
|
+
magic, version, flags, fps, frame_count, reserved = struct.unpack('!4sHHfI8s', header_data)
|
|
209
|
+
|
|
210
|
+
if magic != b'ASII':
|
|
211
|
+
raise ValueError(f"Invalid file format: expected 'ASII', got {magic}")
|
|
212
|
+
|
|
213
|
+
if version != 2:
|
|
214
|
+
raise ValueError(f"Unsupported version: {version}")
|
|
215
|
+
|
|
216
|
+
# Parse flags
|
|
217
|
+
self.is_video = bool(flags & AsciiEncoder.FLAG_IS_VIDEO)
|
|
218
|
+
self.has_audio = bool(flags & AsciiEncoder.FLAG_HAS_AUDIO)
|
|
219
|
+
self.fps = fps
|
|
220
|
+
|
|
221
|
+
frame_lengths = [
|
|
222
|
+
struct.unpack("!I", f.read(4))[0]
|
|
223
|
+
for _ in range(frame_count)
|
|
224
|
+
]
|
|
225
|
+
|
|
226
|
+
compressed_size = struct.unpack("!I", f.read(4))[0]
|
|
227
|
+
compressed = f.read(compressed_size)
|
|
228
|
+
|
|
229
|
+
print("Decompressing")
|
|
230
|
+
dctx = zstd.ZstdDecompressor()
|
|
231
|
+
all_frames_bytes = dctx.decompress(compressed)
|
|
232
|
+
print("Done decompressing")
|
|
233
|
+
|
|
234
|
+
self.frames = []
|
|
235
|
+
idx = 0
|
|
236
|
+
for length in frame_lengths:
|
|
237
|
+
frame_bytes = all_frames_bytes[idx:idx+length]
|
|
238
|
+
self.frames.append(frame_bytes.decode('utf-8'))
|
|
239
|
+
idx += length
|
|
240
|
+
|
|
241
|
+
if self.has_audio:
|
|
242
|
+
audio_size, audio_format, self.audio_rate = struct.unpack('!IBI', f.read(9))
|
|
243
|
+
self.audio_channels = struct.unpack('!B', f.read(1))[0]
|
|
244
|
+
|
|
245
|
+
if audio_format != 1:
|
|
246
|
+
raise ValueError(f"Unsupported audio format: {audio_format}")
|
|
247
|
+
|
|
248
|
+
self.audio_data = f.read(audio_size)
|
|
249
|
+
|
|
250
|
+
def get_frame(self, index: int) -> str:
|
|
251
|
+
"""Get a pre-rendered frame by index"""
|
|
252
|
+
if index < 0 or index >= len(self.frames):
|
|
253
|
+
raise IndexError(f"Frame index {index} out of range")
|
|
254
|
+
return self.frames[index]
|
|
255
|
+
|
|
256
|
+
def get_all_frames(self) -> List[str]:
|
|
257
|
+
"""Get all pre-rendered frames"""
|
|
258
|
+
return self.frames
|