notso-glb 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- notso_glb/__init__.py +38 -0
- notso_glb/__main__.py +6 -0
- notso_glb/analyzers/__init__.py +20 -0
- notso_glb/analyzers/bloat.py +117 -0
- notso_glb/analyzers/bones.py +100 -0
- notso_glb/analyzers/duplicates.py +71 -0
- notso_glb/analyzers/skinned_mesh.py +47 -0
- notso_glb/analyzers/uv_maps.py +59 -0
- notso_glb/cleaners/__init__.py +23 -0
- notso_glb/cleaners/bones.py +49 -0
- notso_glb/cleaners/duplicates.py +110 -0
- notso_glb/cleaners/mesh.py +183 -0
- notso_glb/cleaners/textures.py +116 -0
- notso_glb/cleaners/uv_maps.py +29 -0
- notso_glb/cleaners/vertex_groups.py +34 -0
- notso_glb/cli.py +330 -0
- notso_glb/exporters/__init__.py +8 -0
- notso_glb/exporters/gltf.py +647 -0
- notso_glb/utils/__init__.py +20 -0
- notso_glb/utils/blender.py +49 -0
- notso_glb/utils/constants.py +41 -0
- notso_glb/utils/gltfpack.py +273 -0
- notso_glb/utils/logging.py +421 -0
- notso_glb/utils/naming.py +24 -0
- notso_glb/wasm/__init__.py +32 -0
- notso_glb/wasm/constants.py +8 -0
- notso_glb/wasm/gltfpack.version +1 -0
- notso_glb/wasm/gltfpack.wasm +0 -0
- notso_glb/wasm/py.typed +0 -0
- notso_glb/wasm/runner.py +137 -0
- notso_glb/wasm/runtime.py +244 -0
- notso_glb/wasm/wasi.py +347 -0
- notso_glb-0.1.0.dist-info/METADATA +150 -0
- notso_glb-0.1.0.dist-info/RECORD +36 -0
- notso_glb-0.1.0.dist-info/WHEEL +4 -0
- notso_glb-0.1.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
"""Mesh cleanup and decimation functions."""
|
|
2
|
+
|
|
3
|
+
import bpy
|
|
4
|
+
from bpy.types import Modifier, Object
|
|
5
|
+
|
|
6
|
+
from notso_glb.utils import get_mesh_data, get_view_layer
|
|
7
|
+
from notso_glb.utils.constants import BLOAT_THRESHOLDS
|
|
8
|
+
from notso_glb.utils.logging import log_warn
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def cleanup_mesh_bmesh(obj: Object) -> dict[str, int] | None:
|
|
12
|
+
"""
|
|
13
|
+
Clean up mesh using bmesh operations:
|
|
14
|
+
- Remove duplicate vertices (doubles)
|
|
15
|
+
- Dissolve degenerate geometry (zero-area faces, zero-length edges)
|
|
16
|
+
- Remove loose vertices
|
|
17
|
+
|
|
18
|
+
Returns dict with cleanup stats or None if failed.
|
|
19
|
+
"""
|
|
20
|
+
import bmesh
|
|
21
|
+
|
|
22
|
+
mesh = get_mesh_data(obj)
|
|
23
|
+
original_verts = len(mesh.vertices)
|
|
24
|
+
original_faces = len(mesh.polygons)
|
|
25
|
+
|
|
26
|
+
bm = bmesh.new()
|
|
27
|
+
bm.from_mesh(mesh)
|
|
28
|
+
|
|
29
|
+
stats: dict[str, int] = {
|
|
30
|
+
"doubles_merged": 0,
|
|
31
|
+
"degenerate_dissolved": 0,
|
|
32
|
+
"loose_removed": 0,
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
# 1. Remove doubles (merge vertices within threshold)
|
|
36
|
+
merge_dist = 0.0001 # 0.1mm threshold
|
|
37
|
+
verts_before_doubles = len(bm.verts)
|
|
38
|
+
bmesh.ops.remove_doubles(bm, verts=list(bm.verts), dist=merge_dist)
|
|
39
|
+
stats["doubles_merged"] = verts_before_doubles - len(bm.verts)
|
|
40
|
+
|
|
41
|
+
# 2. Dissolve degenerate geometry
|
|
42
|
+
degenerate_faces = [f for f in bm.faces if f.calc_area() < 1e-8]
|
|
43
|
+
if degenerate_faces:
|
|
44
|
+
bmesh.ops.delete(bm, geom=degenerate_faces, context="FACES")
|
|
45
|
+
stats["degenerate_dissolved"] += len(degenerate_faces)
|
|
46
|
+
|
|
47
|
+
degenerate_edges = [e for e in bm.edges if e.calc_length() < 1e-8]
|
|
48
|
+
if degenerate_edges:
|
|
49
|
+
bmesh.ops.delete(bm, geom=degenerate_edges, context="EDGES")
|
|
50
|
+
stats["degenerate_dissolved"] += len(degenerate_edges)
|
|
51
|
+
|
|
52
|
+
# 3. Remove loose vertices (not connected to any face)
|
|
53
|
+
loose_verts = [v for v in bm.verts if not v.link_faces]
|
|
54
|
+
if loose_verts:
|
|
55
|
+
bmesh.ops.delete(bm, geom=loose_verts, context="VERTS")
|
|
56
|
+
stats["loose_removed"] = len(loose_verts)
|
|
57
|
+
|
|
58
|
+
# Write back to mesh
|
|
59
|
+
bm.to_mesh(mesh)
|
|
60
|
+
bm.free()
|
|
61
|
+
|
|
62
|
+
mesh.update()
|
|
63
|
+
|
|
64
|
+
new_verts = len(mesh.vertices)
|
|
65
|
+
new_faces = len(mesh.polygons)
|
|
66
|
+
|
|
67
|
+
stats["verts_before"] = original_verts
|
|
68
|
+
stats["verts_after"] = new_verts
|
|
69
|
+
stats["faces_before"] = original_faces
|
|
70
|
+
stats["faces_after"] = new_faces
|
|
71
|
+
|
|
72
|
+
return stats
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def decimate_mesh(obj: Object, target_verts: int) -> tuple[int, int] | None:
|
|
76
|
+
"""
|
|
77
|
+
Apply decimation to reduce mesh to approximately target vertex count.
|
|
78
|
+
|
|
79
|
+
Returns (original_verts, new_verts) or None if failed.
|
|
80
|
+
"""
|
|
81
|
+
mesh = get_mesh_data(obj)
|
|
82
|
+
original_verts = len(mesh.vertices)
|
|
83
|
+
if original_verts <= target_verts:
|
|
84
|
+
return None
|
|
85
|
+
|
|
86
|
+
ratio: float = target_verts / original_verts
|
|
87
|
+
|
|
88
|
+
mod: Modifier = obj.modifiers.new(name="AutoDecimate", type="DECIMATE")
|
|
89
|
+
mod.decimate_type = "COLLAPSE" # ty:ignore[unresolved-attribute] # pyright: ignore[reportAttributeAccessIssue]
|
|
90
|
+
mod.ratio = ratio # ty:ignore[unresolved-attribute] # pyright: ignore[reportAttributeAccessIssue]
|
|
91
|
+
mod.use_collapse_triangulate = True # ty:ignore[unresolved-attribute] # pyright: ignore[reportAttributeAccessIssue]
|
|
92
|
+
|
|
93
|
+
view_layer = get_view_layer()
|
|
94
|
+
view_layer.objects.active = obj
|
|
95
|
+
try:
|
|
96
|
+
bpy.ops.object.modifier_apply(modifier=mod.name)
|
|
97
|
+
new_verts = len(mesh.vertices)
|
|
98
|
+
return (original_verts, new_verts)
|
|
99
|
+
except Exception as e: # pragma: no cover - defensive
|
|
100
|
+
obj.modifiers.remove(mod)
|
|
101
|
+
log_warn(f"Failed to decimate {obj.name}: {e}")
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def auto_fix_bloat(
|
|
106
|
+
warnings: list[dict[str, object]],
|
|
107
|
+
) -> dict[str, list[dict[str, object]]]:
|
|
108
|
+
"""
|
|
109
|
+
Automatically fix bloated meshes using bmesh cleanup + decimation.
|
|
110
|
+
|
|
111
|
+
Pipeline:
|
|
112
|
+
1. Run bmesh cleanup on ALL meshes (doubles, degenerate, loose)
|
|
113
|
+
2. Decimate non-skinned props still flagged as BLOATED_PROP
|
|
114
|
+
|
|
115
|
+
Returns dict with cleanup_stats and decimation_fixes.
|
|
116
|
+
"""
|
|
117
|
+
results: dict[str, list[dict[str, object]]] = {
|
|
118
|
+
"cleanup": [],
|
|
119
|
+
"decimation": [],
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
# Phase 1: BMesh cleanup on ALL meshes
|
|
123
|
+
for obj in bpy.data.objects:
|
|
124
|
+
if obj.type != "MESH":
|
|
125
|
+
continue
|
|
126
|
+
|
|
127
|
+
mesh = get_mesh_data(obj)
|
|
128
|
+
verts_before = len(mesh.vertices)
|
|
129
|
+
if verts_before < 10:
|
|
130
|
+
continue
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
stats = cleanup_mesh_bmesh(obj)
|
|
134
|
+
if stats and (
|
|
135
|
+
stats["doubles_merged"] > 0
|
|
136
|
+
or stats["degenerate_dissolved"] > 0
|
|
137
|
+
or stats["loose_removed"] > 0
|
|
138
|
+
):
|
|
139
|
+
results["cleanup"].append({
|
|
140
|
+
"object": obj.name,
|
|
141
|
+
"doubles": stats["doubles_merged"],
|
|
142
|
+
"degenerate": stats["degenerate_dissolved"],
|
|
143
|
+
"loose": stats["loose_removed"],
|
|
144
|
+
"verts_saved": stats["verts_before"] - stats["verts_after"],
|
|
145
|
+
})
|
|
146
|
+
except Exception as e:
|
|
147
|
+
log_warn(f"Cleanup failed for {obj.name}: {e}")
|
|
148
|
+
|
|
149
|
+
# Phase 2: Decimate bloated props
|
|
150
|
+
bloated_objects = [
|
|
151
|
+
str(w["object"])
|
|
152
|
+
for w in warnings
|
|
153
|
+
if w["issue"] == "BLOATED_PROP" and w["severity"] == "CRITICAL"
|
|
154
|
+
]
|
|
155
|
+
|
|
156
|
+
for obj_name in bloated_objects:
|
|
157
|
+
obj = bpy.data.objects.get(obj_name)
|
|
158
|
+
if not obj or obj.type != "MESH":
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
is_skinned = any(mod.type == "ARMATURE" for mod in obj.modifiers)
|
|
162
|
+
if is_skinned:
|
|
163
|
+
continue
|
|
164
|
+
|
|
165
|
+
mesh = get_mesh_data(obj)
|
|
166
|
+
current_verts = len(mesh.vertices)
|
|
167
|
+
if current_verts <= BLOAT_THRESHOLDS["prop_critical"]:
|
|
168
|
+
continue
|
|
169
|
+
|
|
170
|
+
target = int(BLOAT_THRESHOLDS["prop_critical"] * 0.8)
|
|
171
|
+
|
|
172
|
+
result = decimate_mesh(obj, target)
|
|
173
|
+
if result:
|
|
174
|
+
orig, new = result
|
|
175
|
+
reduction = ((orig - new) / orig) * 100
|
|
176
|
+
results["decimation"].append({
|
|
177
|
+
"object": obj_name,
|
|
178
|
+
"original": orig,
|
|
179
|
+
"new": new,
|
|
180
|
+
"reduction": reduction,
|
|
181
|
+
})
|
|
182
|
+
|
|
183
|
+
return results
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"""Texture resizing functions."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING
|
|
6
|
+
|
|
7
|
+
import bpy
|
|
8
|
+
|
|
9
|
+
from notso_glb.utils.logging import bright_cyan
|
|
10
|
+
from notso_glb.utils.logging import dim
|
|
11
|
+
from notso_glb.utils.logging import log_detail
|
|
12
|
+
from notso_glb.utils.logging import log_warn
|
|
13
|
+
from notso_glb.utils.logging import magenta
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from bpy.types import Image
|
|
17
|
+
|
|
18
|
+
# Images to skip during resize
|
|
19
|
+
_SKIP_IMAGES = frozenset(["Render Result", "Viewer Node"])
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _nearest_pot(n: int) -> int:
|
|
23
|
+
"""Round to nearest power of two."""
|
|
24
|
+
if n <= 1:
|
|
25
|
+
return 1
|
|
26
|
+
bl = (n - 1).bit_length()
|
|
27
|
+
lower = 1 << (bl - 1)
|
|
28
|
+
upper = 1 << bl
|
|
29
|
+
return lower if (n - lower) < (upper - n) else upper
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _is_power_of_two(n: int) -> bool:
|
|
33
|
+
"""Check if n is a power of two."""
|
|
34
|
+
return n > 0 and (n & (n - 1)) == 0
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _should_skip_image(img: Image, max_size: int, force_pot: bool) -> bool:
|
|
38
|
+
"""Determine if image should be skipped."""
|
|
39
|
+
if img.name in _SKIP_IMAGES:
|
|
40
|
+
return True
|
|
41
|
+
w, h = img.size[0], img.size[1]
|
|
42
|
+
if w <= max_size and h <= max_size:
|
|
43
|
+
if not force_pot:
|
|
44
|
+
return True
|
|
45
|
+
if _is_power_of_two(w) and _is_power_of_two(h):
|
|
46
|
+
return True
|
|
47
|
+
return False
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _calc_scaled_size(w: int, h: int, max_size: int) -> tuple[int, int]:
|
|
51
|
+
"""Calculate new size maintaining aspect ratio."""
|
|
52
|
+
if w > h:
|
|
53
|
+
new_w = min(max_size, w)
|
|
54
|
+
new_h = int(h * (new_w / w))
|
|
55
|
+
else:
|
|
56
|
+
new_h = min(max_size, h)
|
|
57
|
+
new_w = int(w * (new_h / h))
|
|
58
|
+
return new_w, new_h
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _adjust_to_pot(w: int, h: int, max_size: int) -> tuple[int, int]:
|
|
62
|
+
"""Adjust dimensions to power-of-two, clamped to max_size."""
|
|
63
|
+
w = _nearest_pot(w)
|
|
64
|
+
h = _nearest_pot(h)
|
|
65
|
+
while w > max_size:
|
|
66
|
+
w //= 2
|
|
67
|
+
while h > max_size:
|
|
68
|
+
h //= 2
|
|
69
|
+
return w, h
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _adjust_to_even(w: int, h: int) -> tuple[int, int]:
|
|
73
|
+
"""Adjust dimensions to even numbers."""
|
|
74
|
+
if w % 2 != 0:
|
|
75
|
+
w += 1
|
|
76
|
+
if h % 2 != 0:
|
|
77
|
+
h += 1
|
|
78
|
+
return w, h
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def resize_textures(max_size: int = 1024, force_pot: bool = False) -> int:
|
|
82
|
+
"""
|
|
83
|
+
Resize all textures larger than max_size.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
max_size: Maximum dimension (default 1024)
|
|
87
|
+
force_pot: Force power-of-two dimensions (better GPU compatibility)
|
|
88
|
+
"""
|
|
89
|
+
resized = 0
|
|
90
|
+
|
|
91
|
+
for img in bpy.data.images:
|
|
92
|
+
if _should_skip_image(img, max_size, force_pot):
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
w, h = img.size[0], img.size[1]
|
|
96
|
+
new_w, new_h = _calc_scaled_size(w, h, max_size)
|
|
97
|
+
|
|
98
|
+
if force_pot:
|
|
99
|
+
new_w, new_h = _adjust_to_pot(new_w, new_h, max_size)
|
|
100
|
+
else:
|
|
101
|
+
new_w, new_h = _adjust_to_even(new_w, new_h)
|
|
102
|
+
|
|
103
|
+
if new_w == w and new_h == h:
|
|
104
|
+
continue
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
img.scale(new_w, new_h)
|
|
108
|
+
resized += 1
|
|
109
|
+
pot_note = f" {magenta('(POT)')}" if force_pot else ""
|
|
110
|
+
log_detail(
|
|
111
|
+
f"{img.name}: {dim(f'{w}x{h}')} -> {bright_cyan(f'{new_w}x{new_h}')}{pot_note}"
|
|
112
|
+
)
|
|
113
|
+
except Exception as e:
|
|
114
|
+
log_warn(f"Failed to resize {img.name}: {e}")
|
|
115
|
+
|
|
116
|
+
return resized
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""UV map cleanup functions."""
|
|
2
|
+
|
|
3
|
+
from typing import cast
|
|
4
|
+
|
|
5
|
+
import bpy
|
|
6
|
+
|
|
7
|
+
from notso_glb.utils import get_mesh_data
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def remove_unused_uv_maps(warnings: list[dict[str, object]]) -> int:
|
|
11
|
+
"""Remove unused UV maps detected by analyze_unused_uv_maps."""
|
|
12
|
+
removed = 0
|
|
13
|
+
|
|
14
|
+
for warn in warnings:
|
|
15
|
+
mesh_name = cast(str, warn["mesh"])
|
|
16
|
+
obj = bpy.data.objects.get(mesh_name)
|
|
17
|
+
if not obj or obj.type != "MESH":
|
|
18
|
+
continue
|
|
19
|
+
|
|
20
|
+
mesh = get_mesh_data(obj)
|
|
21
|
+
uv_names = cast(list[str], warn["unused_uvs"])
|
|
22
|
+
for uv_name in uv_names:
|
|
23
|
+
uv_layer = mesh.uv_layers.get(uv_name)
|
|
24
|
+
if uv_layer:
|
|
25
|
+
mesh.uv_layers.remove(uv_layer)
|
|
26
|
+
removed += 1
|
|
27
|
+
print(f" Removed unused UV '{uv_name}' from {obj.name}")
|
|
28
|
+
|
|
29
|
+
return removed
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""Vertex group cleanup functions."""
|
|
2
|
+
|
|
3
|
+
import bpy
|
|
4
|
+
|
|
5
|
+
from notso_glb.utils import get_mesh_data
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def clean_vertex_groups() -> int:
|
|
9
|
+
"""Remove vertex groups with no weights (empty bone references)."""
|
|
10
|
+
total_removed = 0
|
|
11
|
+
|
|
12
|
+
for obj in bpy.data.objects:
|
|
13
|
+
if obj.type != "MESH" or len(obj.vertex_groups) == 0:
|
|
14
|
+
continue
|
|
15
|
+
|
|
16
|
+
mesh = get_mesh_data(obj)
|
|
17
|
+
used_groups: set[int] = set()
|
|
18
|
+
|
|
19
|
+
for v in mesh.vertices:
|
|
20
|
+
for g in v.groups:
|
|
21
|
+
if g.weight > 0.0001:
|
|
22
|
+
used_groups.add(g.group)
|
|
23
|
+
|
|
24
|
+
unused_names = [
|
|
25
|
+
vg.name for i, vg in enumerate(obj.vertex_groups) if i not in used_groups
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
for name in unused_names:
|
|
29
|
+
vg = obj.vertex_groups.get(name)
|
|
30
|
+
if vg:
|
|
31
|
+
obj.vertex_groups.remove(vg)
|
|
32
|
+
total_removed += 1
|
|
33
|
+
|
|
34
|
+
return total_removed
|
notso_glb/cli.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
"""Command-line interface for GLB export optimizer."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from importlib.metadata import PackageNotFoundError, version
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Annotated
|
|
9
|
+
|
|
10
|
+
import typer
|
|
11
|
+
from rich.console import Console
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
__version__ = version("notso-glb")
|
|
15
|
+
except PackageNotFoundError:
|
|
16
|
+
__version__ = "unknown"
|
|
17
|
+
|
|
18
|
+
from notso_glb.utils.constants import DEFAULT_CONFIG
|
|
19
|
+
from notso_glb.utils.gltfpack import find_gltfpack
|
|
20
|
+
|
|
21
|
+
app = typer.Typer(
|
|
22
|
+
name="notso-glb",
|
|
23
|
+
help="Optimize GLB/glTF/blend files for web delivery",
|
|
24
|
+
add_completion=False,
|
|
25
|
+
rich_markup_mode="rich",
|
|
26
|
+
suggest_commands=True,
|
|
27
|
+
no_args_is_help=True,
|
|
28
|
+
)
|
|
29
|
+
console = Console()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def version_callback(value: bool) -> None:
|
|
33
|
+
if value:
|
|
34
|
+
print(f"notso-glb {__version__}")
|
|
35
|
+
raise typer.Exit()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ExportFormat(Enum):
|
|
39
|
+
"""Output format for glTF export."""
|
|
40
|
+
|
|
41
|
+
glb = "glb"
|
|
42
|
+
gltf = "gltf"
|
|
43
|
+
gltf_embedded = "gltf-embedded"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@app.command()
|
|
47
|
+
def optimize(
|
|
48
|
+
input_path: Annotated[
|
|
49
|
+
Path,
|
|
50
|
+
typer.Argument(
|
|
51
|
+
help="Input file ([bold green].blend[/], [bold green].glb[/], or [bold green].gltf[/])",
|
|
52
|
+
metavar="FILE",
|
|
53
|
+
),
|
|
54
|
+
],
|
|
55
|
+
output: Annotated[
|
|
56
|
+
Path | None,
|
|
57
|
+
typer.Option(
|
|
58
|
+
"--output",
|
|
59
|
+
"-o",
|
|
60
|
+
help="Output path (default: [italic]input_optimized.\\[glb|gltf][/])",
|
|
61
|
+
rich_help_panel="Core Options",
|
|
62
|
+
),
|
|
63
|
+
] = DEFAULT_CONFIG["output_path"],
|
|
64
|
+
export_format: Annotated[
|
|
65
|
+
ExportFormat,
|
|
66
|
+
typer.Option(
|
|
67
|
+
"--format",
|
|
68
|
+
"-f",
|
|
69
|
+
help="Output format",
|
|
70
|
+
rich_help_panel="Core Options",
|
|
71
|
+
),
|
|
72
|
+
] = ExportFormat.glb,
|
|
73
|
+
use_draco: Annotated[
|
|
74
|
+
bool,
|
|
75
|
+
typer.Option(
|
|
76
|
+
"--draco/--no-draco",
|
|
77
|
+
help="Enable/Disable Draco compression",
|
|
78
|
+
rich_help_panel="Compression & Textures",
|
|
79
|
+
),
|
|
80
|
+
] = DEFAULT_CONFIG["use_draco"],
|
|
81
|
+
use_webp: Annotated[
|
|
82
|
+
bool,
|
|
83
|
+
typer.Option(
|
|
84
|
+
"--webp/--no-webp",
|
|
85
|
+
help="Enable/Disable WebP textures",
|
|
86
|
+
rich_help_panel="Compression & Textures",
|
|
87
|
+
),
|
|
88
|
+
] = DEFAULT_CONFIG["use_webp"],
|
|
89
|
+
max_texture_size: Annotated[
|
|
90
|
+
int,
|
|
91
|
+
typer.Option(
|
|
92
|
+
help="Max texture size (0=no resize)",
|
|
93
|
+
rich_help_panel="Compression & Textures",
|
|
94
|
+
metavar="PIXELS",
|
|
95
|
+
),
|
|
96
|
+
] = DEFAULT_CONFIG["max_texture_size"],
|
|
97
|
+
force_pot: Annotated[
|
|
98
|
+
bool,
|
|
99
|
+
typer.Option(
|
|
100
|
+
"--force-pot/",
|
|
101
|
+
help="Force power-of-two texture dimensions (better GPU compatibility)",
|
|
102
|
+
rich_help_panel="Compression & Textures",
|
|
103
|
+
),
|
|
104
|
+
] = DEFAULT_CONFIG["force_pot_textures"],
|
|
105
|
+
analyze_animations: Annotated[
|
|
106
|
+
bool,
|
|
107
|
+
typer.Option(
|
|
108
|
+
"--analyze-animations/--skip-animation-analysis",
|
|
109
|
+
help="Analyze bones for static/animated properties",
|
|
110
|
+
rich_help_panel="Analysis & Optimization",
|
|
111
|
+
),
|
|
112
|
+
] = DEFAULT_CONFIG["analyze_animations"],
|
|
113
|
+
check_bloat: Annotated[
|
|
114
|
+
bool,
|
|
115
|
+
typer.Option(
|
|
116
|
+
"--check-bloat/--skip-bloat-check",
|
|
117
|
+
help="Analyze meshes for unreasonable complexity",
|
|
118
|
+
rich_help_panel="Analysis & Optimization",
|
|
119
|
+
),
|
|
120
|
+
] = DEFAULT_CONFIG["check_bloat"],
|
|
121
|
+
autofix: Annotated[
|
|
122
|
+
bool,
|
|
123
|
+
typer.Option(
|
|
124
|
+
"--autofix/--stable",
|
|
125
|
+
help="Auto-decimate bloated props, remove unused UVs",
|
|
126
|
+
rich_help_panel="[bold red][EXPERIMENTAL][/]",
|
|
127
|
+
),
|
|
128
|
+
] = DEFAULT_CONFIG["experimental_autofix"],
|
|
129
|
+
use_gltfpack: Annotated[
|
|
130
|
+
bool,
|
|
131
|
+
typer.Option(
|
|
132
|
+
"--gltfpack/--no-gltfpack",
|
|
133
|
+
help="Post-process with gltfpack for extra compression (WASM fallback if native unavailable)",
|
|
134
|
+
rich_help_panel="Compression & Textures",
|
|
135
|
+
),
|
|
136
|
+
] = True,
|
|
137
|
+
quiet: Annotated[
|
|
138
|
+
bool,
|
|
139
|
+
typer.Option(
|
|
140
|
+
"--quiet/--verbose",
|
|
141
|
+
"-q/-v",
|
|
142
|
+
help="Suppress Blender's verbose output (show only warnings/errors)",
|
|
143
|
+
rich_help_panel="Output",
|
|
144
|
+
),
|
|
145
|
+
] = DEFAULT_CONFIG["quiet"],
|
|
146
|
+
version: Annotated[
|
|
147
|
+
bool | None,
|
|
148
|
+
typer.Option(
|
|
149
|
+
"--version",
|
|
150
|
+
"-V",
|
|
151
|
+
callback=version_callback,
|
|
152
|
+
is_eager=True,
|
|
153
|
+
help="Show the version and exit.",
|
|
154
|
+
),
|
|
155
|
+
] = None,
|
|
156
|
+
) -> None:
|
|
157
|
+
"""
|
|
158
|
+
Optimize and export 3D models for the web.
|
|
159
|
+
"""
|
|
160
|
+
# Verify input existence before expensive imports
|
|
161
|
+
abs_input_path = os.path.abspath(input_path)
|
|
162
|
+
if not os.path.isfile(abs_input_path):
|
|
163
|
+
console.print(f"[bold red][ERROR][/] File not found: {abs_input_path}")
|
|
164
|
+
raise typer.Exit(code=1)
|
|
165
|
+
|
|
166
|
+
ext = os.path.splitext(abs_input_path)[1].lower()
|
|
167
|
+
|
|
168
|
+
# Lazy import to keep CLI snappy and avoid bpy issues in help
|
|
169
|
+
from notso_glb.exporters import optimize_and_export
|
|
170
|
+
|
|
171
|
+
# Determine if we need to import
|
|
172
|
+
# GLB/glTF files: pass to optimize_and_export for import with timing
|
|
173
|
+
# .blend files: already loaded if running via `blender --python`
|
|
174
|
+
input_for_import: str | None = None
|
|
175
|
+
if ext in (".glb", ".gltf"):
|
|
176
|
+
input_for_import = abs_input_path
|
|
177
|
+
elif ext != ".blend":
|
|
178
|
+
console.print(f"[bold red][ERROR][/] Unsupported format: {ext}")
|
|
179
|
+
console.print(" Supported: .blend, .glb, .gltf")
|
|
180
|
+
raise typer.Exit(code=1)
|
|
181
|
+
|
|
182
|
+
# Map CLI format to Blender format
|
|
183
|
+
format_map = {
|
|
184
|
+
"glb": "GLB",
|
|
185
|
+
"gltf": "GLTF_SEPARATE",
|
|
186
|
+
"gltf-embedded": "GLTF_EMBEDDED",
|
|
187
|
+
}
|
|
188
|
+
blender_export_format = format_map.get(export_format.value, "GLB")
|
|
189
|
+
out_ext = ".gltf" if export_format.value.startswith("gltf") else ".glb"
|
|
190
|
+
|
|
191
|
+
# Determine output path
|
|
192
|
+
final_output_path: Path
|
|
193
|
+
if output is None:
|
|
194
|
+
base = os.path.splitext(abs_input_path)[0]
|
|
195
|
+
final_output_path = Path(f"{base}_optimized{out_ext}")
|
|
196
|
+
else:
|
|
197
|
+
final_output_path = output.resolve()
|
|
198
|
+
|
|
199
|
+
# Run optimization
|
|
200
|
+
result = optimize_and_export(
|
|
201
|
+
output_path=final_output_path,
|
|
202
|
+
export_format=blender_export_format,
|
|
203
|
+
use_draco=use_draco,
|
|
204
|
+
use_webp=use_webp,
|
|
205
|
+
max_texture_size=max_texture_size,
|
|
206
|
+
force_pot_textures=force_pot,
|
|
207
|
+
analyze_animations=analyze_animations,
|
|
208
|
+
check_bloat=check_bloat,
|
|
209
|
+
experimental_autofix=autofix,
|
|
210
|
+
quiet=quiet,
|
|
211
|
+
input_path=input_for_import,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
if not result:
|
|
215
|
+
raise typer.Exit(code=1)
|
|
216
|
+
|
|
217
|
+
# Post-process with gltfpack if enabled
|
|
218
|
+
if use_gltfpack:
|
|
219
|
+
from notso_glb.utils.gltfpack import run_gltfpack
|
|
220
|
+
from notso_glb.utils.logging import format_bytes
|
|
221
|
+
from notso_glb.wasm import is_available as wasm_available
|
|
222
|
+
|
|
223
|
+
native_available = find_gltfpack() is not None
|
|
224
|
+
wasm_ok = wasm_available()
|
|
225
|
+
|
|
226
|
+
if not native_available and not wasm_ok:
|
|
227
|
+
console.print(
|
|
228
|
+
"[bold yellow][WARN][/] gltfpack not found and WASM unavailable, skipping"
|
|
229
|
+
)
|
|
230
|
+
else:
|
|
231
|
+
backend = "native" if native_available else "WASM"
|
|
232
|
+
console.print(f"\n[bold cyan]Running gltfpack ({backend})...[/]")
|
|
233
|
+
original_size = Path(result).stat().st_size
|
|
234
|
+
|
|
235
|
+
success, packed_path, msg = run_gltfpack(
|
|
236
|
+
Path(result),
|
|
237
|
+
output_path=Path(result), # Overwrite original
|
|
238
|
+
texture_compress=True,
|
|
239
|
+
mesh_compress=True,
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
if success:
|
|
243
|
+
new_size = packed_path.stat().st_size
|
|
244
|
+
if original_size == 0:
|
|
245
|
+
reduction = 0.0
|
|
246
|
+
else:
|
|
247
|
+
reduction = ((original_size - new_size) / original_size) * 100
|
|
248
|
+
console.print(
|
|
249
|
+
f" [green]gltfpack:[/] {format_bytes(original_size)} -> "
|
|
250
|
+
f"{format_bytes(new_size)} ([bold green]-{reduction:.0f}%[/])"
|
|
251
|
+
)
|
|
252
|
+
else:
|
|
253
|
+
console.print(f" [bold red][ERROR][/] {msg}")
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def main() -> None:
|
|
257
|
+
"""Entry point - handles both CLI and Blender UI execution."""
|
|
258
|
+
# Detect execution mode:
|
|
259
|
+
# 1. Direct: ./script.py input.glb
|
|
260
|
+
# 2. Blender: blender --python script.py -- input.glb
|
|
261
|
+
|
|
262
|
+
args = sys.argv[1:]
|
|
263
|
+
|
|
264
|
+
# Check for '--' separator used in Blender execution
|
|
265
|
+
if "--" in sys.argv:
|
|
266
|
+
idx = sys.argv.index("--")
|
|
267
|
+
args = sys.argv[idx + 1 :]
|
|
268
|
+
elif len(sys.argv) > 0:
|
|
269
|
+
# Check if we are inside Blender (executable check)
|
|
270
|
+
# If running as `blender script.py`, argv[0] is blender.
|
|
271
|
+
executable = os.path.basename(sys.argv[0]).lower()
|
|
272
|
+
if "blender" in executable:
|
|
273
|
+
# Running in Blender without '--' -> UI mode, run with defaults?
|
|
274
|
+
# Or just show help?
|
|
275
|
+
# Original code ran with defaults if no args.
|
|
276
|
+
# Let's try to run the command with empty args if that was the intent,
|
|
277
|
+
# or just pass empty list which might trigger help if arg is required.
|
|
278
|
+
pass
|
|
279
|
+
|
|
280
|
+
# Typer expects the program name as the first "arg" in sys.argv usually,
|
|
281
|
+
# but when calling app() directly with list of args, it processes that list.
|
|
282
|
+
# app(args) processes the arguments.
|
|
283
|
+
|
|
284
|
+
# However, if we run `notso-glb --help`, args is ['--help'].
|
|
285
|
+
|
|
286
|
+
# If we are in "Blender UI mode" (no args provided to script),
|
|
287
|
+
# original code ran `optimize_and_export` with defaults.
|
|
288
|
+
# The 'optimize' command REQUIRES 'input_path'.
|
|
289
|
+
# So if we have no args, we can't run the CLI command easily.
|
|
290
|
+
# But original code handled "UI mode" by running defaults?
|
|
291
|
+
# Wait, original code: `if args is None: optimize_and_export(...)`
|
|
292
|
+
# That assumed `bpy.data.filepath` was set (i.e. open file in Blender).
|
|
293
|
+
|
|
294
|
+
if not args and "blender" in os.path.basename(sys.argv[0]).lower():
|
|
295
|
+
# UI Mode fallback (Running inside blender with open file, no CLI args)
|
|
296
|
+
from notso_glb.exporters import optimize_and_export
|
|
297
|
+
|
|
298
|
+
optimize_and_export(
|
|
299
|
+
output_path=DEFAULT_CONFIG["output_path"],
|
|
300
|
+
use_draco=DEFAULT_CONFIG["use_draco"],
|
|
301
|
+
use_webp=DEFAULT_CONFIG["use_webp"],
|
|
302
|
+
max_texture_size=DEFAULT_CONFIG["max_texture_size"],
|
|
303
|
+
force_pot_textures=DEFAULT_CONFIG["force_pot_textures"],
|
|
304
|
+
analyze_animations=DEFAULT_CONFIG["analyze_animations"],
|
|
305
|
+
check_bloat=DEFAULT_CONFIG["check_bloat"],
|
|
306
|
+
experimental_autofix=DEFAULT_CONFIG["experimental_autofix"],
|
|
307
|
+
)
|
|
308
|
+
return
|
|
309
|
+
|
|
310
|
+
if not args:
|
|
311
|
+
# If no arguments provided, print help to stderr preserving colors
|
|
312
|
+
# We do this by invoking the 'optimize' command's help but redirected to stderr
|
|
313
|
+
old_stdout = sys.stdout
|
|
314
|
+
sys.stdout = sys.stderr
|
|
315
|
+
try:
|
|
316
|
+
# invoke 'optimize --help' so we see the command args, not the group help
|
|
317
|
+
app(args=["optimize", "--help"], standalone_mode=False)
|
|
318
|
+
except (SystemExit, typer.Exit):
|
|
319
|
+
pass
|
|
320
|
+
finally:
|
|
321
|
+
sys.stdout = old_stdout
|
|
322
|
+
sys.exit(1)
|
|
323
|
+
|
|
324
|
+
# Single-command Typer apps automatically use the command as default
|
|
325
|
+
# No need to prepend 'optimize' - Typer handles this
|
|
326
|
+
app(args=args, standalone_mode=True)
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
if __name__ == "__main__":
|
|
330
|
+
main()
|