model-resolver 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- model_resolver-0.1.0/LICENSE +21 -0
- model_resolver-0.1.0/PKG-INFO +96 -0
- model_resolver-0.1.0/README.md +73 -0
- model_resolver-0.1.0/model_resolver/__init__.py +1 -0
- model_resolver-0.1.0/model_resolver/__main__.py +4 -0
- model_resolver-0.1.0/model_resolver/cli.py +48 -0
- model_resolver-0.1.0/model_resolver/my_glutinit.py +47 -0
- model_resolver-0.1.0/model_resolver/plugin.py +402 -0
- model_resolver-0.1.0/model_resolver/render.py +547 -0
- model_resolver-0.1.0/model_resolver/utils.py +40 -0
- model_resolver-0.1.0/pyproject.toml +43 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Erwan DAYOT
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: model-resolver
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary:
|
|
5
|
+
License: MIT
|
|
6
|
+
Author: edayot
|
|
7
|
+
Author-email: pro.e.dayot@gmail.com
|
|
8
|
+
Requires-Python: >=3.10,<4.0
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
+
Requires-Dist: beet (>=0.104.1)
|
|
15
|
+
Requires-Dist: black (>=24.4.2,<25.0.0)
|
|
16
|
+
Requires-Dist: pillow (>=10.3.0,<11.0.0)
|
|
17
|
+
Requires-Dist: pyopengl @ git+https://github.com/mcfletch/pyopengl.git@29b79e8966ba2930a5c44829b02dffc1ca600752
|
|
18
|
+
Requires-Dist: rich (>=13.7.1,<14.0.0)
|
|
19
|
+
Requires-Dist: tqdm (>=4.66.2,<5.0.0)
|
|
20
|
+
Requires-Dist: typer (>=0.12.3,<0.13.0)
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
|
|
23
|
+
# Model Resolver
|
|
24
|
+
|
|
25
|
+
A beet plugin that render all models in the beet project.
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
## Usage
|
|
29
|
+
|
|
30
|
+
Add the plugin to your pipeline:
|
|
31
|
+
|
|
32
|
+
```yaml
|
|
33
|
+
# beet.yaml
|
|
34
|
+
pipeline:
|
|
35
|
+
(...) # other plugins you may have
|
|
36
|
+
- model_resolver
|
|
37
|
+
|
|
38
|
+
# setup an output directory
|
|
39
|
+
output: build
|
|
40
|
+
|
|
41
|
+
meta:
|
|
42
|
+
model_resolver:
|
|
43
|
+
# load vanilla item models
|
|
44
|
+
load_vanilla: true
|
|
45
|
+
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
Renders are now available in your ctx !
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
## Installation
|
|
52
|
+
|
|
53
|
+
### Windows
|
|
54
|
+
|
|
55
|
+
Install https://visualstudio.microsoft.com/fr/visual-cpp-build-tools/ and add C++ build tools in the installation.
|
|
56
|
+
|
|
57
|
+
### Ubuntu
|
|
58
|
+
|
|
59
|
+
Generally, you don't need to install anything, but if you have an error, you can try to install the following packages:
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
sudo apt-get -y install \
|
|
63
|
+
freeglut3-dev \
|
|
64
|
+
libgl1-mesa-dev \
|
|
65
|
+
libxcursor-dev \
|
|
66
|
+
libpulse-dev \
|
|
67
|
+
libxinerama-dev \
|
|
68
|
+
libxrandr-dev \
|
|
69
|
+
libxv-dev \
|
|
70
|
+
mesa-utils \
|
|
71
|
+
libgl1-mesa-glx \
|
|
72
|
+
mesa-common-dev \
|
|
73
|
+
libglapi-mesa \
|
|
74
|
+
libgbm1 \
|
|
75
|
+
libgl1-mesa-dri \
|
|
76
|
+
libsdl1.2-dev \
|
|
77
|
+
libfreetype6-dev \
|
|
78
|
+
xvfb \
|
|
79
|
+
x11-utils
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
This is particularly useful in CI, see [the github action](./.github/workflows/artifact.yml) for an example.
|
|
83
|
+
|
|
84
|
+
### Common installation
|
|
85
|
+
|
|
86
|
+
Using poetry, add this to your pyproject.toml file:
|
|
87
|
+
|
|
88
|
+
```toml
|
|
89
|
+
[tool.poetry.dependencies]
|
|
90
|
+
# (other dependencies ...)
|
|
91
|
+
model-resolver = {git = "https://github.com/edayot/model_resolver.git", branch = "master"}
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# Model Resolver
|
|
2
|
+
|
|
3
|
+
A beet plugin that render all models in the beet project.
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
## Usage
|
|
7
|
+
|
|
8
|
+
Add the plugin to your pipeline:
|
|
9
|
+
|
|
10
|
+
```yaml
|
|
11
|
+
# beet.yaml
|
|
12
|
+
pipeline:
|
|
13
|
+
(...) # other plugins you may have
|
|
14
|
+
- model_resolver
|
|
15
|
+
|
|
16
|
+
# setup an output directory
|
|
17
|
+
output: build
|
|
18
|
+
|
|
19
|
+
meta:
|
|
20
|
+
model_resolver:
|
|
21
|
+
# load vanilla item models
|
|
22
|
+
load_vanilla: true
|
|
23
|
+
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
Renders are now available in your ctx !
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
## Installation
|
|
30
|
+
|
|
31
|
+
### Windows
|
|
32
|
+
|
|
33
|
+
Install https://visualstudio.microsoft.com/fr/visual-cpp-build-tools/ and add C++ build tools in the installation.
|
|
34
|
+
|
|
35
|
+
### Ubuntu
|
|
36
|
+
|
|
37
|
+
Generally, you don't need to install anything, but if you have an error, you can try to install the following packages:
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
sudo apt-get -y install \
|
|
41
|
+
freeglut3-dev \
|
|
42
|
+
libgl1-mesa-dev \
|
|
43
|
+
libxcursor-dev \
|
|
44
|
+
libpulse-dev \
|
|
45
|
+
libxinerama-dev \
|
|
46
|
+
libxrandr-dev \
|
|
47
|
+
libxv-dev \
|
|
48
|
+
mesa-utils \
|
|
49
|
+
libgl1-mesa-glx \
|
|
50
|
+
mesa-common-dev \
|
|
51
|
+
libglapi-mesa \
|
|
52
|
+
libgbm1 \
|
|
53
|
+
libgl1-mesa-dri \
|
|
54
|
+
libsdl1.2-dev \
|
|
55
|
+
libfreetype6-dev \
|
|
56
|
+
xvfb \
|
|
57
|
+
x11-utils
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
This is particularly useful in CI, see [the github action](./.github/workflows/artifact.yml) for an example.
|
|
61
|
+
|
|
62
|
+
### Common installation
|
|
63
|
+
|
|
64
|
+
Using poetry, add this to your pyproject.toml file:
|
|
65
|
+
|
|
66
|
+
```toml
|
|
67
|
+
[tool.poetry.dependencies]
|
|
68
|
+
# (other dependencies ...)
|
|
69
|
+
model-resolver = {git = "https://github.com/edayot/model_resolver.git", branch = "master"}
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from model_resolver.plugin import *
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import typer
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from beet import run_beet, ProjectConfig
|
|
4
|
+
from time import perf_counter
|
|
5
|
+
from rich import print
|
|
6
|
+
|
|
7
|
+
app = typer.Typer(
|
|
8
|
+
rich_markup_mode="markdown",
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# a simple command
|
|
13
|
+
@app.command()
|
|
14
|
+
def main(
|
|
15
|
+
# fmt: off
|
|
16
|
+
load_vanilla: bool = typer.Option(False, help="Load vanilla model"),
|
|
17
|
+
use_cache: bool = typer.Option(False, help="Use cache for model rendering)"),
|
|
18
|
+
render_size: int = typer.Option(256, help="Size of the rendered image"),
|
|
19
|
+
load_dir: Path = typer.Option(Path.cwd(), help="Directory where the resourcepack is located"),
|
|
20
|
+
output_dir: Path = typer.Option(Path.cwd() / "build", help="Where you want to save the new resourcepack, with new textures corresponding to the model"),
|
|
21
|
+
minecraft_version: str = typer.Option("latest", help="Minecraft version to use for vanilla models")
|
|
22
|
+
# fmt: on
|
|
23
|
+
):
|
|
24
|
+
"""
|
|
25
|
+
A simple CLI to render models from a resourcepack, can also load vanilla models.
|
|
26
|
+
"""
|
|
27
|
+
t_start = perf_counter()
|
|
28
|
+
config = ProjectConfig(
|
|
29
|
+
pipeline=["model_resolver"],
|
|
30
|
+
output=output_dir,
|
|
31
|
+
resource_pack={"load": load_dir, "name": load_dir.name},
|
|
32
|
+
meta={
|
|
33
|
+
"model_resolver": {
|
|
34
|
+
"load_vanilla": load_vanilla,
|
|
35
|
+
"use_cache": use_cache,
|
|
36
|
+
"render_size": render_size,
|
|
37
|
+
"minecraft_version": minecraft_version,
|
|
38
|
+
},
|
|
39
|
+
},
|
|
40
|
+
)
|
|
41
|
+
with run_beet(config=config) as ctx:
|
|
42
|
+
pass
|
|
43
|
+
t_end = perf_counter()
|
|
44
|
+
print(f"[green][bold]✔️[/bold] Finished in {t_end - t_start:.2f} seconds [/green]")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
if __name__ == "__main__":
|
|
48
|
+
app()
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# fmt: off
|
|
2
|
+
from OpenGL.GLUT import special
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
INITIALIZED = False
|
|
7
|
+
def glutInit( *args ):
|
|
8
|
+
"""Initialise the GLUT library"""
|
|
9
|
+
INITIALIZED = True
|
|
10
|
+
if args:
|
|
11
|
+
arg,args = args[0],args[1:]
|
|
12
|
+
count = None
|
|
13
|
+
if isinstance(arg, special.integer_types):
|
|
14
|
+
# raw API style, (count, values)
|
|
15
|
+
count = arg
|
|
16
|
+
if count != len(args):
|
|
17
|
+
raise ValueError( """Specified count of %s does not match length (%s) of argument list %s"""%(
|
|
18
|
+
count, len(args), args,
|
|
19
|
+
))
|
|
20
|
+
elif isinstance( arg, (bytes,special.unicode)):
|
|
21
|
+
# passing in a sequence of strings as individual arguments
|
|
22
|
+
args = (arg,)+args
|
|
23
|
+
count = len(args)
|
|
24
|
+
else:
|
|
25
|
+
args = arg
|
|
26
|
+
count = len(args)
|
|
27
|
+
else:
|
|
28
|
+
count=0
|
|
29
|
+
args = []
|
|
30
|
+
args = [special.as_8_bit(x) for x in args]
|
|
31
|
+
if not count:
|
|
32
|
+
count, args = 1, [special.as_8_bit('foo')]
|
|
33
|
+
holder = (special.ctypes.c_char_p * len(args))()
|
|
34
|
+
for i,arg in enumerate(args):
|
|
35
|
+
holder[i] = arg
|
|
36
|
+
count = special.ctypes.c_int( count )
|
|
37
|
+
import os
|
|
38
|
+
currentDirectory = os.getcwd()
|
|
39
|
+
try:
|
|
40
|
+
# XXX need to check for error condition here...
|
|
41
|
+
special._base_glutInit( special.ctypes.byref(count), holder )
|
|
42
|
+
finally:
|
|
43
|
+
os.chdir( currentDirectory )
|
|
44
|
+
return [
|
|
45
|
+
holder[i] for i in range( count.value )
|
|
46
|
+
]
|
|
47
|
+
glutInit.wrappedOperation = special._simple.glutInit
|
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
from beet import Context, Model, Texture
|
|
2
|
+
from beet.contrib.vanilla import Vanilla
|
|
3
|
+
from beet.core.cache import Cache
|
|
4
|
+
from beet import NamespaceProxyDescriptor
|
|
5
|
+
from rich import print
|
|
6
|
+
from model_resolver.render import Render
|
|
7
|
+
from copy import deepcopy
|
|
8
|
+
from typing import TypedDict
|
|
9
|
+
from PIL import Image
|
|
10
|
+
import json
|
|
11
|
+
from model_resolver.utils import load_textures
|
|
12
|
+
import numpy as np
|
|
13
|
+
import hashlib
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def beet_default(ctx: Context):
|
|
17
|
+
load_vanilla = ctx.meta.get("model_resolver", {}).get("load_vanilla", False)
|
|
18
|
+
use_cache = ctx.meta.get("model_resolver", {}).get("use_cache", False)
|
|
19
|
+
render_size = ctx.meta.get("model_resolver", {}).get("render_size", 1024)
|
|
20
|
+
minecraft_version = ctx.meta.get("model_resolver", {}).get(
|
|
21
|
+
"minecraft_version", "latest"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
vanilla = ctx.inject(Vanilla)
|
|
25
|
+
if not minecraft_version == "latest":
|
|
26
|
+
vanilla = vanilla.releases[minecraft_version]
|
|
27
|
+
generated_models = set()
|
|
28
|
+
generated_textures = set()
|
|
29
|
+
|
|
30
|
+
for atlas in ctx.assets.atlases:
|
|
31
|
+
resolve_atlas(ctx, vanilla, ctx, atlas, generated_textures)
|
|
32
|
+
if load_vanilla:
|
|
33
|
+
for atlas in vanilla.assets.atlases:
|
|
34
|
+
resolve_atlas(ctx, vanilla, vanilla, atlas, generated_textures)
|
|
35
|
+
render_vanilla(ctx, vanilla, generated_models)
|
|
36
|
+
|
|
37
|
+
cache = ctx.cache.get("model_resolver")
|
|
38
|
+
if not "models" in cache.json:
|
|
39
|
+
cache.json["models"] = {}
|
|
40
|
+
cache.json["render_size"] = render_size
|
|
41
|
+
cache.json["minecraft_version"] = minecraft_version
|
|
42
|
+
use_cache = False
|
|
43
|
+
if (
|
|
44
|
+
not cache.json["render_size"] == render_size
|
|
45
|
+
or not cache.json["minecraft_version"] == minecraft_version
|
|
46
|
+
):
|
|
47
|
+
use_cache = False
|
|
48
|
+
cache.json["render_size"] = render_size
|
|
49
|
+
cache.json["minecraft_version"] = minecraft_version
|
|
50
|
+
|
|
51
|
+
models = {}
|
|
52
|
+
for model in set(ctx.assets.models.keys()):
|
|
53
|
+
resolved_model = resolve_model(ctx.assets.models[model], vanilla.assets.models)
|
|
54
|
+
resolved_model = bake_model(
|
|
55
|
+
resolved_model, ctx, vanilla, model, generated_textures
|
|
56
|
+
)
|
|
57
|
+
if not "textures" in resolved_model.data:
|
|
58
|
+
continue
|
|
59
|
+
if model in cache.json["models"] and use_cache:
|
|
60
|
+
img = handle_cache(cache, model, resolved_model, ctx, vanilla)
|
|
61
|
+
if img is not None:
|
|
62
|
+
# load cached image in ctx
|
|
63
|
+
model_name = model.split(":")
|
|
64
|
+
texture_path = f"{model_name[0]}:render/{model_name[1]}"
|
|
65
|
+
ctx.assets.textures[texture_path] = Texture(img)
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
models[model] = resolved_model.data
|
|
69
|
+
|
|
70
|
+
models = handle_animations(models, ctx, vanilla, generated_textures)
|
|
71
|
+
|
|
72
|
+
if len(models) > 0:
|
|
73
|
+
Render(models, ctx, vanilla).render()
|
|
74
|
+
|
|
75
|
+
clean_generated(ctx, generated_textures, generated_models)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def handle_cache(cache: Cache, model, resolved_model, ctx, vanilla):
|
|
79
|
+
model_hash = hashlib.sha256(str(resolved_model.data).encode()).hexdigest()
|
|
80
|
+
cached_model_hash = cache.json["models"][model]["model"]
|
|
81
|
+
if model_hash != cached_model_hash:
|
|
82
|
+
return None
|
|
83
|
+
|
|
84
|
+
textures = load_textures(resolved_model.data["textures"], ctx, vanilla)
|
|
85
|
+
textures_hash = {}
|
|
86
|
+
for key in resolved_model.data["textures"]:
|
|
87
|
+
textures_hash[key] = hashlib.sha256(textures[key].tobytes()).hexdigest()
|
|
88
|
+
cached_textures_hash = cache.json["models"][model]["textures"]
|
|
89
|
+
if textures_hash != cached_textures_hash:
|
|
90
|
+
return None
|
|
91
|
+
|
|
92
|
+
# load cached image
|
|
93
|
+
img_path = cache.get_path(f"{model}.png")
|
|
94
|
+
img = Image.open(img_path)
|
|
95
|
+
return img
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def render_vanilla(ctx: Context, vanilla: Vanilla, models: set[str]):
|
|
99
|
+
vanilla_models = vanilla.assets.models
|
|
100
|
+
|
|
101
|
+
for model in vanilla_models.match("minecraft:*"):
|
|
102
|
+
if "parent" in vanilla_models[model].data:
|
|
103
|
+
if vanilla_models[model].data["parent"] == "builtin/entity":
|
|
104
|
+
continue
|
|
105
|
+
if model not in ctx.assets.models:
|
|
106
|
+
ctx.assets.models[model] = vanilla_models[model]
|
|
107
|
+
models.add(model)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class Atlas(TypedDict):
|
|
111
|
+
type: str
|
|
112
|
+
textures: list[str]
|
|
113
|
+
palette_key: str
|
|
114
|
+
permutations: dict[str, str]
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def clean_generated(
|
|
118
|
+
ctx: Context, generated_textures: set[str], generated_models: set[str]
|
|
119
|
+
):
|
|
120
|
+
for texture in generated_textures:
|
|
121
|
+
if texture in ctx.assets.textures:
|
|
122
|
+
del ctx.assets.textures[texture]
|
|
123
|
+
for model in generated_models:
|
|
124
|
+
if model in ctx.assets.models:
|
|
125
|
+
del ctx.assets.models[model]
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def resolve_atlas(
|
|
129
|
+
ctx: Context,
|
|
130
|
+
vanilla: Vanilla,
|
|
131
|
+
used_ctx: Context | Vanilla,
|
|
132
|
+
atlas: str,
|
|
133
|
+
generated_textures: set[str],
|
|
134
|
+
):
|
|
135
|
+
for source in used_ctx.assets.atlases[atlas].data["sources"]:
|
|
136
|
+
if source["type"] != "paletted_permutations":
|
|
137
|
+
continue
|
|
138
|
+
source: Atlas
|
|
139
|
+
for texture in source["textures"]:
|
|
140
|
+
for variant, color_palette in source["permutations"].items():
|
|
141
|
+
new_texture_path = f"{texture}_{variant}"
|
|
142
|
+
new_texture_path = resolve_key(new_texture_path)
|
|
143
|
+
|
|
144
|
+
palette_key = resolve_key(source["palette_key"])
|
|
145
|
+
if palette_key in ctx.assets.textures:
|
|
146
|
+
palette = ctx.assets.textures[palette_key].image
|
|
147
|
+
elif palette_key in vanilla.assets.textures:
|
|
148
|
+
palette = vanilla.assets.textures[palette_key].image
|
|
149
|
+
|
|
150
|
+
color_palette_key = resolve_key(color_palette)
|
|
151
|
+
if color_palette_key in ctx.assets.textures:
|
|
152
|
+
color_palette = ctx.assets.textures[
|
|
153
|
+
color_palette_key
|
|
154
|
+
].image # color palette
|
|
155
|
+
elif color_palette_key in vanilla.assets.textures:
|
|
156
|
+
color_palette = vanilla.assets.textures[
|
|
157
|
+
color_palette_key
|
|
158
|
+
].image # color palette
|
|
159
|
+
|
|
160
|
+
grayscale_key = resolve_key(texture)
|
|
161
|
+
if grayscale_key in ctx.assets.textures:
|
|
162
|
+
grayscale = ctx.assets.textures[grayscale_key].image
|
|
163
|
+
elif grayscale_key in vanilla.assets.textures:
|
|
164
|
+
grayscale = vanilla.assets.textures[grayscale_key].image
|
|
165
|
+
|
|
166
|
+
new_texture = apply_palette(grayscale, palette, color_palette)
|
|
167
|
+
|
|
168
|
+
ctx.assets.textures[new_texture_path] = Texture(new_texture)
|
|
169
|
+
generated_textures.add(new_texture_path)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def apply_palette(
|
|
173
|
+
texture: Image.Image, palette: Image.Image, color_palette: Image.Image
|
|
174
|
+
) -> Image.Image:
|
|
175
|
+
new_image = Image.new("RGBA", texture.size)
|
|
176
|
+
texture = texture.convert("RGBA")
|
|
177
|
+
palette = palette.convert("RGB")
|
|
178
|
+
color_palette = color_palette.convert("RGB")
|
|
179
|
+
for x in range(texture.width):
|
|
180
|
+
for y in range(texture.height):
|
|
181
|
+
pixel = texture.getpixel((x, y))
|
|
182
|
+
color = pixel[:3]
|
|
183
|
+
alpha = pixel[3]
|
|
184
|
+
# if the color is in palette_key, replace it with the color from color_palette
|
|
185
|
+
found = False
|
|
186
|
+
for i in range(palette.width):
|
|
187
|
+
for j in range(palette.height):
|
|
188
|
+
if palette.getpixel((i, j)) == color:
|
|
189
|
+
new_color = color_palette.getpixel((i, j))
|
|
190
|
+
new_image.putpixel((x, y), new_color + (alpha,))
|
|
191
|
+
found = True
|
|
192
|
+
break
|
|
193
|
+
if found:
|
|
194
|
+
break
|
|
195
|
+
if not found:
|
|
196
|
+
new_image.putpixel((x, y), pixel)
|
|
197
|
+
return new_image
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def resolve_key(key: str) -> str:
|
|
201
|
+
return f"minecraft:{key}" if ":" not in key else key
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def merge_model(child: Model, parent: Model) -> Model:
|
|
205
|
+
merged = parent.data.copy()
|
|
206
|
+
|
|
207
|
+
if "textures" in child.data:
|
|
208
|
+
merged["textures"] = {} if "textures" not in merged else merged["textures"]
|
|
209
|
+
merged["textures"].update(child.data["textures"])
|
|
210
|
+
if "elements" in child.data:
|
|
211
|
+
merged["elements"] = child.data["elements"]
|
|
212
|
+
if "display" in child.data:
|
|
213
|
+
for key in child.data["display"].keys():
|
|
214
|
+
merged["display"][key] = child.data["display"][key]
|
|
215
|
+
if "ambientocclusion" in child.data:
|
|
216
|
+
merged["ambientocclusion"] = child.data["ambientocclusion"]
|
|
217
|
+
if "overrides" in child.data:
|
|
218
|
+
merged["overrides"] = child.data["overrides"]
|
|
219
|
+
if "gui_light" in child.data:
|
|
220
|
+
merged["gui_light"] = child.data["gui_light"]
|
|
221
|
+
|
|
222
|
+
return Model(merged)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def resolve_model(model: Model, vanilla_models: dict[str, Model]) -> Model:
|
|
226
|
+
# Do something with the model
|
|
227
|
+
if "parent" in model.data:
|
|
228
|
+
resolved_key = resolve_key(model.data["parent"])
|
|
229
|
+
if resolved_key in [
|
|
230
|
+
"minecraft:builtin/generated",
|
|
231
|
+
"minecraft:builtin/entity",
|
|
232
|
+
]:
|
|
233
|
+
return model
|
|
234
|
+
parent_model = vanilla_models[resolved_key]
|
|
235
|
+
parent_model = deepcopy(parent_model)
|
|
236
|
+
parent_model_resolved = resolve_model(parent_model, vanilla_models)
|
|
237
|
+
|
|
238
|
+
return merge_model(model, parent_model_resolved)
|
|
239
|
+
else:
|
|
240
|
+
return model
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def bake_model(
|
|
244
|
+
model: Model,
|
|
245
|
+
ctx: Context,
|
|
246
|
+
vanilla: Vanilla,
|
|
247
|
+
model_name: str,
|
|
248
|
+
generated_textures: set[str],
|
|
249
|
+
):
|
|
250
|
+
if "parent" in model.data:
|
|
251
|
+
if model.data["parent"] in ["builtin/generated"]:
|
|
252
|
+
if "textures" in model.data:
|
|
253
|
+
textures = load_textures(model.data["textures"], ctx, vanilla)
|
|
254
|
+
max = 0
|
|
255
|
+
for key in textures.keys():
|
|
256
|
+
if not key.startswith("layer"):
|
|
257
|
+
continue
|
|
258
|
+
index = int(key[5:])
|
|
259
|
+
if index > max:
|
|
260
|
+
max = index
|
|
261
|
+
img = Image.new("RGBA", (16, 16), (0, 0, 0, 0))
|
|
262
|
+
for i in range(max + 1):
|
|
263
|
+
texture = textures.get(f"layer{i}")
|
|
264
|
+
img.paste(texture, (0, 0), texture)
|
|
265
|
+
new_texture = f"debug:{model_name.replace(':', '/')}"
|
|
266
|
+
ctx.assets.textures[new_texture] = Texture(img)
|
|
267
|
+
generated_textures.add(new_texture)
|
|
268
|
+
return Model(generate_item_model(new_texture))
|
|
269
|
+
return model
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def generate_item_model(texture: str):
|
|
273
|
+
res = {
|
|
274
|
+
"credit": "Made with Blockbench",
|
|
275
|
+
"textures": {"particle": texture, "layer": texture},
|
|
276
|
+
"elements": [
|
|
277
|
+
{
|
|
278
|
+
"from": [0, 0, 0],
|
|
279
|
+
"to": [16, 16, 0],
|
|
280
|
+
"faces": {"north": {"uv": [0, 0, 16, 16], "texture": "#layer"}},
|
|
281
|
+
}
|
|
282
|
+
],
|
|
283
|
+
"display": {
|
|
284
|
+
"thirdperson_righthand": {
|
|
285
|
+
"rotation": [0, -90, 55],
|
|
286
|
+
"translation": [0, 4, 0.5],
|
|
287
|
+
"scale": [0.85, 0.85, 0.85],
|
|
288
|
+
},
|
|
289
|
+
"thirdperson_lefthand": {
|
|
290
|
+
"rotation": [0, 90, -55],
|
|
291
|
+
"translation": [0, 4, 0.5],
|
|
292
|
+
"scale": [0.85, 0.85, 0.85],
|
|
293
|
+
},
|
|
294
|
+
"firstperson_righthand": {
|
|
295
|
+
"rotation": [0, -90, 25],
|
|
296
|
+
"translation": [1.13, 3.2, 1.13],
|
|
297
|
+
"scale": [0.68, 0.68, 0.68],
|
|
298
|
+
},
|
|
299
|
+
"firstperson_lefthand": {
|
|
300
|
+
"rotation": [0, 90, -25],
|
|
301
|
+
"translation": [1.13, 3.2, 1.13],
|
|
302
|
+
"scale": [0.68, 0.68, 0.68],
|
|
303
|
+
},
|
|
304
|
+
"ground": {"translation": [0, 2, 0], "scale": [0.5, 0.5, 0.5]},
|
|
305
|
+
"gui": {"rotation": [180, 0, 180]},
|
|
306
|
+
"head": {"rotation": [0, 180, 0], "translation": [0, 13, 7]},
|
|
307
|
+
"fixed": {"rotation": [0, 180, 0]},
|
|
308
|
+
},
|
|
309
|
+
}
|
|
310
|
+
return res
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def is_animated(texture_path: str, ctx: Context, vanilla: Vanilla):
|
|
314
|
+
if texture_path in ctx.assets.textures_mcmeta:
|
|
315
|
+
return True
|
|
316
|
+
if texture_path in vanilla.assets.textures_mcmeta:
|
|
317
|
+
return True
|
|
318
|
+
return False
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def get_thing(
|
|
322
|
+
path, ctx_proxy: NamespaceProxyDescriptor, vanilla_proxy: NamespaceProxyDescriptor
|
|
323
|
+
):
|
|
324
|
+
if path in ctx_proxy:
|
|
325
|
+
return ctx_proxy[path]
|
|
326
|
+
if path in vanilla_proxy:
|
|
327
|
+
return vanilla_proxy[path]
|
|
328
|
+
raise ValueError(f"Texture {path} not found in ctx or vanilla")
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
def handle_animations(
|
|
332
|
+
models: dict[str, dict],
|
|
333
|
+
ctx: Context,
|
|
334
|
+
vanilla: Vanilla,
|
|
335
|
+
generated_textures: set[str],
|
|
336
|
+
):
|
|
337
|
+
for model in set(models.keys()):
|
|
338
|
+
if not "textures" in models[model]:
|
|
339
|
+
continue
|
|
340
|
+
textures = models[model]["textures"]
|
|
341
|
+
if not any(
|
|
342
|
+
[is_animated(textures[key], ctx, vanilla) for key in textures.keys()]
|
|
343
|
+
):
|
|
344
|
+
continue
|
|
345
|
+
frametimes = []
|
|
346
|
+
animated_cache = {}
|
|
347
|
+
for key, value in textures.items():
|
|
348
|
+
if not is_animated(value, ctx, vanilla):
|
|
349
|
+
continue
|
|
350
|
+
texture = get_thing(value, ctx.assets.textures, vanilla.assets.textures)
|
|
351
|
+
texture_mcmeta = get_thing(
|
|
352
|
+
value, ctx.assets.textures_mcmeta, vanilla.assets.textures_mcmeta
|
|
353
|
+
)
|
|
354
|
+
frametime = texture_mcmeta.data["animation"].get("frametime", 1)
|
|
355
|
+
|
|
356
|
+
img = texture.image
|
|
357
|
+
# generate all possible frames for the animation
|
|
358
|
+
width = img.width
|
|
359
|
+
height = img.height
|
|
360
|
+
frames = []
|
|
361
|
+
for i in range(height // width):
|
|
362
|
+
cropped = img.crop((0, i * width, width, (i + 1) * width))
|
|
363
|
+
texture_temp_path = f"debug:{model.replace(':', '/')}/{key}/{i}"
|
|
364
|
+
ctx.assets.textures[texture_temp_path] = Texture(cropped)
|
|
365
|
+
generated_textures.add(texture_temp_path)
|
|
366
|
+
frames.append(texture_temp_path)
|
|
367
|
+
|
|
368
|
+
frametimes.append(frametime * len(frames))
|
|
369
|
+
|
|
370
|
+
animated_cache[key] = {"frames": frames, "frametime": frametime}
|
|
371
|
+
total_number_of_frames = np.lcm.reduce(frametimes)
|
|
372
|
+
L = []
|
|
373
|
+
for tick in range(total_number_of_frames):
|
|
374
|
+
current_textures = {}
|
|
375
|
+
for key, value in animated_cache.items():
|
|
376
|
+
frametime = value[
|
|
377
|
+
"frametime"
|
|
378
|
+
] # the number of ticks a frame is displayed
|
|
379
|
+
frame_index = (tick // frametime) % len(value["frames"])
|
|
380
|
+
frame = value["frames"][frame_index]
|
|
381
|
+
current_textures[key] = frame
|
|
382
|
+
L.append(current_textures)
|
|
383
|
+
# group L into chunks where current_textures are the same
|
|
384
|
+
# for each chunk, create a new model with the textures
|
|
385
|
+
|
|
386
|
+
L_grouped = []
|
|
387
|
+
for i in range(len(L)):
|
|
388
|
+
if i == 0:
|
|
389
|
+
L_grouped.append([L[i], 1])
|
|
390
|
+
continue
|
|
391
|
+
if L[i] == L[i - 1]:
|
|
392
|
+
L_grouped[-1][1] += 1
|
|
393
|
+
else:
|
|
394
|
+
L_grouped.append([L[i], 1])
|
|
395
|
+
for i, (current_textures, count) in enumerate(L_grouped):
|
|
396
|
+
new_model_path = f"{model}/{i}_{count}"
|
|
397
|
+
new_model = deepcopy(models[model])
|
|
398
|
+
new_model["textures"].update(current_textures)
|
|
399
|
+
models[new_model_path] = new_model
|
|
400
|
+
del models[model]
|
|
401
|
+
|
|
402
|
+
return models
|
|
@@ -0,0 +1,547 @@
|
|
|
1
|
+
from OpenGL.GL import *
|
|
2
|
+
from OpenGL.GLUT import *
|
|
3
|
+
from OpenGL.GLU import *
|
|
4
|
+
from model_resolver.my_glutinit import glutInit
|
|
5
|
+
|
|
6
|
+
from PIL import Image
|
|
7
|
+
|
|
8
|
+
from beet import Context, Texture
|
|
9
|
+
from beet.contrib.vanilla import Vanilla
|
|
10
|
+
|
|
11
|
+
from math import cos, sin, pi
|
|
12
|
+
from rich import print
|
|
13
|
+
import hashlib
|
|
14
|
+
from model_resolver.utils import load_textures
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class RenderError(Exception):
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Render:
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
model = {"model:model":{
|
|
25
|
+
'gui_light': 'side',
|
|
26
|
+
'display': {
|
|
27
|
+
'gui': {'rotation': [30, 225, 0], 'translation': [0, 0, 0], 'scale': [0.625, 0.625, 0.625]},
|
|
28
|
+
'ground': {'rotation': [0, 0, 0], 'translation': [0, 3, 0], 'scale': [0.25, 0.25, 0.25]},
|
|
29
|
+
'fixed': {'rotation': [-90, 0, 0], 'translation': [0, 0, -16], 'scale': [2.001, 2.001, 2.001]},
|
|
30
|
+
'thirdperson_righthand': {'rotation': [75, 45, 0], 'translation': [0, 2.5, 0], 'scale': [0.375, 0.375, 0.375]},
|
|
31
|
+
'firstperson_righthand': {'rotation': [0, 135, 0], 'translation': [0, 0, 0], 'scale': [0.4, 0.4, 0.4]},
|
|
32
|
+
'firstperson_lefthand': {'rotation': [0, 225, 0], 'translation': [0, 0, 0], 'scale': [0.4, 0.4, 0.4]}
|
|
33
|
+
},
|
|
34
|
+
'elements': [
|
|
35
|
+
{
|
|
36
|
+
'from': [0, 0, 0],
|
|
37
|
+
'to': [16, 16, 16],
|
|
38
|
+
'faces': {
|
|
39
|
+
'down': {'texture': '#down', 'cullface': 'down'},
|
|
40
|
+
'up': {'texture': '#up', 'cullface': 'up'},
|
|
41
|
+
'north': {'texture': '#north', 'cullface': 'north'},
|
|
42
|
+
'south': {'texture': '#south', 'cullface': 'south'},
|
|
43
|
+
'west': {'texture': '#west', 'cullface': 'west'},
|
|
44
|
+
'east': {'texture': '#east', 'cullface': 'east'}
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
],
|
|
48
|
+
'textures': {
|
|
49
|
+
'particle': 'simpledrawer:block/drawers_wood_side',
|
|
50
|
+
'down': '#bottom',
|
|
51
|
+
'up': '#top',
|
|
52
|
+
'north': '#front',
|
|
53
|
+
'east': '#side',
|
|
54
|
+
'south': '#side',
|
|
55
|
+
'west': '#side',
|
|
56
|
+
'top': 'simpledrawer:block/drawers_wood_side',
|
|
57
|
+
'bottom': 'simpledrawer:block/drawers_wood_side',
|
|
58
|
+
'side': 'simpledrawer:block/drawers_wood_side',
|
|
59
|
+
'front': 'simpledrawer:block/drawers_wood_front'
|
|
60
|
+
},
|
|
61
|
+
'ambientocclusion': False
|
|
62
|
+
}}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(self, models: dict[dict], ctx: Context, vanilla: Vanilla):
|
|
68
|
+
self.models = models
|
|
69
|
+
self.ctx = ctx
|
|
70
|
+
self.vanilla = vanilla
|
|
71
|
+
self.size = ctx.meta.get("model_resolver", {}).get("render_size", 1024)
|
|
72
|
+
|
|
73
|
+
self.model_list = list(self.models.keys())
|
|
74
|
+
self.model_list.sort()
|
|
75
|
+
self.current_model_index = 0
|
|
76
|
+
self.textures_bindings = {}
|
|
77
|
+
self.textures_size = {}
|
|
78
|
+
self.textures = load_textures(
|
|
79
|
+
self.models[self.model_list[self.current_model_index]]["textures"],
|
|
80
|
+
self.ctx,
|
|
81
|
+
self.vanilla,
|
|
82
|
+
)
|
|
83
|
+
self.reset_camera()
|
|
84
|
+
self.frame_count = 0
|
|
85
|
+
|
|
86
|
+
def reset_camera(self):
|
|
87
|
+
self.translate = [0, 0, 0]
|
|
88
|
+
self.rotate = [0, 0, 0]
|
|
89
|
+
|
|
90
|
+
def reload(self):
|
|
91
|
+
self.textures_bindings = {}
|
|
92
|
+
self.textures = load_textures(
|
|
93
|
+
self.models[self.model_list[self.current_model_index]]["textures"],
|
|
94
|
+
self.ctx,
|
|
95
|
+
self.vanilla,
|
|
96
|
+
)
|
|
97
|
+
self.generate_textures_bindings()
|
|
98
|
+
|
|
99
|
+
def generate_textures_bindings(self):
|
|
100
|
+
self.textures_bindings = {}
|
|
101
|
+
for key, value in self.textures.items():
|
|
102
|
+
tex_id = glGenTextures(1)
|
|
103
|
+
glBindTexture(GL_TEXTURE_2D, tex_id)
|
|
104
|
+
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
|
|
105
|
+
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
|
|
106
|
+
img_data = value.tobytes("raw", "RGBA")
|
|
107
|
+
glTexImage2D(
|
|
108
|
+
GL_TEXTURE_2D,
|
|
109
|
+
0,
|
|
110
|
+
GL_RGBA,
|
|
111
|
+
value.width,
|
|
112
|
+
value.height,
|
|
113
|
+
0,
|
|
114
|
+
GL_RGBA,
|
|
115
|
+
GL_UNSIGNED_BYTE,
|
|
116
|
+
img_data,
|
|
117
|
+
)
|
|
118
|
+
self.textures_bindings[key] = tex_id
|
|
119
|
+
self.textures_size[key] = value.size
|
|
120
|
+
|
|
121
|
+
def render(self):
|
|
122
|
+
glutInit()
|
|
123
|
+
|
|
124
|
+
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH)
|
|
125
|
+
glutInitWindowSize(self.size, self.size)
|
|
126
|
+
glutInitWindowPosition(100, 100)
|
|
127
|
+
glutCreateWindow(b"Isometric View")
|
|
128
|
+
glutHideWindow()
|
|
129
|
+
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_GLUTMAINLOOP_RETURNS)
|
|
130
|
+
glClearColor(0.0, 0.0, 0.0, 0.0)
|
|
131
|
+
|
|
132
|
+
# Enable lighting
|
|
133
|
+
|
|
134
|
+
glLightfv(GL_LIGHT0, GL_POSITION, [-0.7, -1.0, 0.7, 0.0])
|
|
135
|
+
glLightfv(GL_LIGHT0, GL_DIFFUSE, [1.0, 1.0, 1.0, 1.0])
|
|
136
|
+
glLightfv(GL_LIGHT0, GL_SPECULAR, [0.0, 0.0, 0.0, 1.0])
|
|
137
|
+
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, [0.5, 0.5, 0.5, 1.0])
|
|
138
|
+
|
|
139
|
+
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
|
|
140
|
+
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, [1.0, 1.0, 1.0, 1.0])
|
|
141
|
+
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50.0)
|
|
142
|
+
|
|
143
|
+
self.reload()
|
|
144
|
+
|
|
145
|
+
glutDisplayFunc(self.display)
|
|
146
|
+
glutReshapeFunc(self.reshape)
|
|
147
|
+
glutIdleFunc(self.display)
|
|
148
|
+
|
|
149
|
+
glutMainLoop()
|
|
150
|
+
|
|
151
|
+
def cache_in_ctx(self, img: Image.Image):
|
|
152
|
+
use_cache = self.ctx.meta.get("model_resolver", {}).get("use_cache", False)
|
|
153
|
+
if not use_cache:
|
|
154
|
+
return
|
|
155
|
+
|
|
156
|
+
current_model = self.model_list[self.current_model_index]
|
|
157
|
+
model_hash = hashlib.sha256(
|
|
158
|
+
str(self.models[current_model]).encode()
|
|
159
|
+
).hexdigest()
|
|
160
|
+
|
|
161
|
+
textures_hash = {}
|
|
162
|
+
for key, value in self.textures.items():
|
|
163
|
+
textures_hash[key] = hashlib.sha256(value.tobytes()).hexdigest()
|
|
164
|
+
|
|
165
|
+
cache = self.ctx.cache.get("model_resolver")
|
|
166
|
+
|
|
167
|
+
cache.json["models"][current_model] = {
|
|
168
|
+
"model": model_hash,
|
|
169
|
+
"textures": textures_hash,
|
|
170
|
+
}
|
|
171
|
+
save_path = cache.get_path(f"{current_model}.png")
|
|
172
|
+
with open(save_path, "wb") as f:
|
|
173
|
+
img.save(f, "PNG")
|
|
174
|
+
|
|
175
|
+
def display(self):
|
|
176
|
+
try:
|
|
177
|
+
glClearColor(0.0, 0.0, 0.0, 0.0)
|
|
178
|
+
img = self.draw_buffer()
|
|
179
|
+
model_name = self.model_list[self.current_model_index].split(":")
|
|
180
|
+
texture_path = f"{model_name[0]}:render/{model_name[1]}"
|
|
181
|
+
self.ctx.assets.textures[texture_path] = Texture(img)
|
|
182
|
+
|
|
183
|
+
self.cache_in_ctx(img)
|
|
184
|
+
self.current_model_index += 1
|
|
185
|
+
if self.current_model_index >= len(self.model_list):
|
|
186
|
+
glutLeaveMainLoop()
|
|
187
|
+
return
|
|
188
|
+
self.reload()
|
|
189
|
+
self.reset_camera()
|
|
190
|
+
|
|
191
|
+
# glutSwapBuffers()
|
|
192
|
+
except BaseException as e:
|
|
193
|
+
glutLeaveMainLoop()
|
|
194
|
+
raise e
|
|
195
|
+
|
|
196
|
+
def reshape(self, width, height):
|
|
197
|
+
glViewport(0, 0, width, height)
|
|
198
|
+
glMatrixMode(GL_PROJECTION)
|
|
199
|
+
|
|
200
|
+
zoom = 8
|
|
201
|
+
|
|
202
|
+
glOrtho(zoom, -zoom, -zoom, zoom, self.size, -self.size)
|
|
203
|
+
glMatrixMode(GL_MODELVIEW)
|
|
204
|
+
|
|
205
|
+
def draw_buffer(self):
|
|
206
|
+
|
|
207
|
+
glClearColor(0.0, 0.0, 0.0, 0.0) # Set clear color to black with alpha 0
|
|
208
|
+
glEnable(GL_DEPTH_TEST)
|
|
209
|
+
# add ambient light
|
|
210
|
+
glEnable(GL_COLOR_MATERIAL)
|
|
211
|
+
|
|
212
|
+
glEnable(GL_NORMALIZE)
|
|
213
|
+
glEnable(GL_LIGHTING)
|
|
214
|
+
glEnable(GL_LIGHT0)
|
|
215
|
+
|
|
216
|
+
# Create a framebuffer object (FBO) for off-screen rendering
|
|
217
|
+
fbo = glGenFramebuffers(1)
|
|
218
|
+
glBindFramebuffer(GL_FRAMEBUFFER, fbo)
|
|
219
|
+
|
|
220
|
+
# Create a renderbuffer for depth testing
|
|
221
|
+
depth_buffer = glGenRenderbuffers(1)
|
|
222
|
+
glBindRenderbuffer(GL_RENDERBUFFER, depth_buffer)
|
|
223
|
+
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, self.size, self.size)
|
|
224
|
+
glFramebufferRenderbuffer(
|
|
225
|
+
GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depth_buffer
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
# Create a texture to render into
|
|
229
|
+
render_texture = glGenTextures(1)
|
|
230
|
+
glBindTexture(GL_TEXTURE_2D, render_texture)
|
|
231
|
+
glTexImage2D(
|
|
232
|
+
GL_TEXTURE_2D,
|
|
233
|
+
0,
|
|
234
|
+
GL_RGBA,
|
|
235
|
+
self.size,
|
|
236
|
+
self.size,
|
|
237
|
+
0,
|
|
238
|
+
GL_RGBA,
|
|
239
|
+
GL_UNSIGNED_BYTE,
|
|
240
|
+
None,
|
|
241
|
+
)
|
|
242
|
+
glFramebufferTexture2D(
|
|
243
|
+
GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, render_texture, 0
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
# Check framebuffer status
|
|
247
|
+
if glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE:
|
|
248
|
+
glBindFramebuffer(GL_FRAMEBUFFER, 0)
|
|
249
|
+
raise RenderError("Framebuffer is not complete")
|
|
250
|
+
|
|
251
|
+
# Render the scene
|
|
252
|
+
glViewport(0, 0, self.size, self.size)
|
|
253
|
+
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
|
|
254
|
+
|
|
255
|
+
model = self.models[self.model_list[self.current_model_index]]
|
|
256
|
+
if "elements" in model:
|
|
257
|
+
for element in model["elements"]:
|
|
258
|
+
self.draw_element(element)
|
|
259
|
+
|
|
260
|
+
# Read the pixel data, including alpha channel
|
|
261
|
+
pixel_data = glReadPixels(0, 0, self.size, self.size, GL_RGBA, GL_UNSIGNED_BYTE)
|
|
262
|
+
|
|
263
|
+
# Create an image from pixel data
|
|
264
|
+
img = Image.frombytes("RGBA", (self.size, self.size), pixel_data)
|
|
265
|
+
img = img.transpose(Image.FLIP_TOP_BOTTOM)
|
|
266
|
+
|
|
267
|
+
# Release resources
|
|
268
|
+
glDeleteTextures(1, [render_texture])
|
|
269
|
+
glDeleteRenderbuffers(1, [depth_buffer])
|
|
270
|
+
glDeleteFramebuffers(1, [fbo])
|
|
271
|
+
glDisable(GL_COLOR_MATERIAL)
|
|
272
|
+
glDisable(GL_NORMALIZE)
|
|
273
|
+
glDisable(GL_DEPTH_TEST)
|
|
274
|
+
glDisable(GL_LIGHTING)
|
|
275
|
+
glDisable(GL_LIGHT0)
|
|
276
|
+
|
|
277
|
+
return img
|
|
278
|
+
|
|
279
|
+
def draw_element(self, element: dict):
|
|
280
|
+
glEnable(GL_TEXTURE_2D)
|
|
281
|
+
from_element = element["from"]
|
|
282
|
+
to_element = element["to"]
|
|
283
|
+
rotation = element.get("rotation", None)
|
|
284
|
+
|
|
285
|
+
from_element_centered, to_element_centered = self.center_element(
|
|
286
|
+
from_element, to_element
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
vertices = self.get_vertices(
|
|
290
|
+
from_element_centered, to_element_centered, rotation
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
# transform the vertices
|
|
294
|
+
gui = (
|
|
295
|
+
self.models[self.model_list[self.current_model_index]]
|
|
296
|
+
.get("display", {})
|
|
297
|
+
.get(
|
|
298
|
+
"gui",
|
|
299
|
+
{
|
|
300
|
+
"rotation": [30, 225, 0],
|
|
301
|
+
"translation": [0, 0, 0],
|
|
302
|
+
"scale": [0.625, 0.625, 0.625],
|
|
303
|
+
},
|
|
304
|
+
)
|
|
305
|
+
)
|
|
306
|
+
scale = gui.get("scale", [1, 1, 1])
|
|
307
|
+
translation = gui.get("translation", [0, 0, 0])
|
|
308
|
+
rotation = gui.get("rotation", [0, 0, 0])
|
|
309
|
+
|
|
310
|
+
# reset the matrix
|
|
311
|
+
glLoadIdentity()
|
|
312
|
+
glTranslatef(translation[0] / 16, translation[1] / 16, translation[2] / 16)
|
|
313
|
+
glTranslatef(self.translate[0], self.translate[1], self.translate[2])
|
|
314
|
+
glRotatef(-rotation[0], 1, 0, 0)
|
|
315
|
+
glRotatef(rotation[1] + 180, 0, 1, 0)
|
|
316
|
+
glRotatef(rotation[2], 0, 0, 1)
|
|
317
|
+
glRotatef(self.rotate[0], 1, 0, 0)
|
|
318
|
+
glRotatef(self.rotate[1], 0, 1, 0)
|
|
319
|
+
glRotatef(self.rotate[2], 0, 0, 1)
|
|
320
|
+
glScalef(scale[0], scale[1], scale[2])
|
|
321
|
+
|
|
322
|
+
texture_used = [
|
|
323
|
+
element["faces"].get("down", None),
|
|
324
|
+
element["faces"].get("up", None),
|
|
325
|
+
element["faces"].get("north", None),
|
|
326
|
+
element["faces"].get("south", None),
|
|
327
|
+
element["faces"].get("west", None),
|
|
328
|
+
element["faces"].get("east", None),
|
|
329
|
+
]
|
|
330
|
+
texture_used = [x["texture"].lstrip("#") for x in texture_used if x is not None]
|
|
331
|
+
texture_used = list(set(texture_used))
|
|
332
|
+
|
|
333
|
+
for texture in texture_used:
|
|
334
|
+
if texture not in self.textures_bindings:
|
|
335
|
+
continue
|
|
336
|
+
glBindTexture(GL_TEXTURE_2D, self.textures_bindings[texture])
|
|
337
|
+
glColor3f(1.0, 1.0, 1.0)
|
|
338
|
+
# get all the faces with the same texture
|
|
339
|
+
for face, data in element["faces"].items():
|
|
340
|
+
if data["texture"].lstrip("#") == texture:
|
|
341
|
+
self.draw_face(face, data, vertices, from_element, to_element)
|
|
342
|
+
|
|
343
|
+
glDisable(GL_TEXTURE_2D)
|
|
344
|
+
|
|
345
|
+
def get_vertices(
|
|
346
|
+
self, from_element: list, to_element: list, rotation: dict | None
|
|
347
|
+
) -> list:
|
|
348
|
+
x1, y1, z1 = from_element
|
|
349
|
+
x2, y2, z2 = to_element
|
|
350
|
+
res = [
|
|
351
|
+
[x1, y1, z1],
|
|
352
|
+
[x2, y1, z1],
|
|
353
|
+
[x2, y2, z1],
|
|
354
|
+
[x1, y2, z1],
|
|
355
|
+
[x1, y2, z2],
|
|
356
|
+
[x2, y2, z2],
|
|
357
|
+
[x2, y1, z2],
|
|
358
|
+
[x1, y1, z2],
|
|
359
|
+
]
|
|
360
|
+
if rotation is None:
|
|
361
|
+
return res
|
|
362
|
+
|
|
363
|
+
origin = rotation["origin"]
|
|
364
|
+
axis = rotation["axis"]
|
|
365
|
+
angle = rotation["angle"]
|
|
366
|
+
angle = angle * pi / 180
|
|
367
|
+
|
|
368
|
+
for point in res:
|
|
369
|
+
x, y, z = point
|
|
370
|
+
x -= origin[0]
|
|
371
|
+
y -= origin[1]
|
|
372
|
+
z -= origin[2]
|
|
373
|
+
if axis == "x":
|
|
374
|
+
y, z = y * cos(angle) - z * sin(angle), y * sin(angle) + z * cos(angle)
|
|
375
|
+
elif axis == "y":
|
|
376
|
+
x, z = x * cos(-angle) - z * sin(-angle), x * sin(-angle) + z * cos(
|
|
377
|
+
-angle
|
|
378
|
+
)
|
|
379
|
+
elif axis == "z":
|
|
380
|
+
x, y = x * cos(angle) - y * sin(angle), x * sin(angle) + y * cos(angle)
|
|
381
|
+
x += origin[0]
|
|
382
|
+
y += origin[1]
|
|
383
|
+
z += origin[2]
|
|
384
|
+
point[0], point[1], point[2] = x, y, z
|
|
385
|
+
return res
|
|
386
|
+
|
|
387
|
+
def center_element(self, from_element: list, to_element: list) -> tuple[list, list]:
|
|
388
|
+
# return from_element, to_element
|
|
389
|
+
x1, y1, z1 = from_element
|
|
390
|
+
x2, y2, z2 = to_element
|
|
391
|
+
|
|
392
|
+
center = (8, 8, 8)
|
|
393
|
+
|
|
394
|
+
# compute the new from and to
|
|
395
|
+
from_element = (x1 - center[0], y1 - center[1], z1 - center[2])
|
|
396
|
+
to_element = (x2 - center[0], y2 - center[1], z2 - center[2])
|
|
397
|
+
return from_element, to_element
|
|
398
|
+
|
|
399
|
+
def draw_face(
|
|
400
|
+
self,
|
|
401
|
+
face: str,
|
|
402
|
+
data: dict,
|
|
403
|
+
vertices: tuple,
|
|
404
|
+
from_element: list,
|
|
405
|
+
to_element: list,
|
|
406
|
+
):
|
|
407
|
+
|
|
408
|
+
if "uv" in data:
|
|
409
|
+
uv = data["uv"]
|
|
410
|
+
uv = [x / 16 for x in uv]
|
|
411
|
+
rotation = data.get("rotation", 0)
|
|
412
|
+
|
|
413
|
+
else:
|
|
414
|
+
uv = self.get_uv(face, from_element, to_element)
|
|
415
|
+
rotation = 0
|
|
416
|
+
|
|
417
|
+
match face:
|
|
418
|
+
case "down":
|
|
419
|
+
vertices_order = [7, 6, 1, 0]
|
|
420
|
+
case "up":
|
|
421
|
+
vertices_order = [3, 2, 5, 4]
|
|
422
|
+
case "south":
|
|
423
|
+
vertices_order = [4, 5, 6, 7]
|
|
424
|
+
case "north":
|
|
425
|
+
vertices_order = [2, 3, 0, 1]
|
|
426
|
+
case "east":
|
|
427
|
+
vertices_order = [5, 2, 1, 6]
|
|
428
|
+
case "west":
|
|
429
|
+
vertices_order = [3, 4, 7, 0]
|
|
430
|
+
case _:
|
|
431
|
+
raise RenderError(f"Unknown face {face}")
|
|
432
|
+
|
|
433
|
+
match rotation:
|
|
434
|
+
case 0:
|
|
435
|
+
pass
|
|
436
|
+
case 90:
|
|
437
|
+
vertices_order = [
|
|
438
|
+
vertices_order[1],
|
|
439
|
+
vertices_order[2],
|
|
440
|
+
vertices_order[3],
|
|
441
|
+
vertices_order[0],
|
|
442
|
+
]
|
|
443
|
+
case 180:
|
|
444
|
+
vertices_order = [
|
|
445
|
+
vertices_order[2],
|
|
446
|
+
vertices_order[3],
|
|
447
|
+
vertices_order[0],
|
|
448
|
+
vertices_order[1],
|
|
449
|
+
]
|
|
450
|
+
case 270:
|
|
451
|
+
vertices_order = [
|
|
452
|
+
vertices_order[3],
|
|
453
|
+
vertices_order[0],
|
|
454
|
+
vertices_order[1],
|
|
455
|
+
vertices_order[2],
|
|
456
|
+
]
|
|
457
|
+
case _:
|
|
458
|
+
raise RenderError(f"Unknown rotation {rotation}")
|
|
459
|
+
|
|
460
|
+
rotated_vertices = [vertices[i] for i in vertices_order]
|
|
461
|
+
texcoords = [(0, 1), (2, 1), (2, 3), (0, 3)]
|
|
462
|
+
triangulated_vertices = [
|
|
463
|
+
(rotated_vertices[0], rotated_vertices[1], rotated_vertices[2]),
|
|
464
|
+
(rotated_vertices[0], rotated_vertices[2], rotated_vertices[3]),
|
|
465
|
+
]
|
|
466
|
+
normals = []
|
|
467
|
+
for v0, v1, v2 in triangulated_vertices:
|
|
468
|
+
u = [v1[i] - v0[i] for i in range(3)]
|
|
469
|
+
v = [v2[i] - v0[i] for i in range(3)]
|
|
470
|
+
normal = [
|
|
471
|
+
u[1] * v[2] - u[2] * v[1],
|
|
472
|
+
u[2] * v[0] - u[0] * v[2],
|
|
473
|
+
u[0] * v[1] - u[1] * v[0],
|
|
474
|
+
]
|
|
475
|
+
normals.append(normal)
|
|
476
|
+
|
|
477
|
+
# glUseProgram(self.program)
|
|
478
|
+
# print(glGetError())
|
|
479
|
+
# self.set_uniforms(self.program)
|
|
480
|
+
|
|
481
|
+
glBegin(GL_QUADS)
|
|
482
|
+
for i, (v0, v1, v2) in enumerate(triangulated_vertices):
|
|
483
|
+
normal = normals[i]
|
|
484
|
+
glNormal3fv(normal)
|
|
485
|
+
|
|
486
|
+
for i, (uv0, uv1) in enumerate(texcoords):
|
|
487
|
+
glTexCoord2f(uv[uv0], uv[uv1])
|
|
488
|
+
glVertex3fv(rotated_vertices[i])
|
|
489
|
+
glEnd()
|
|
490
|
+
# glUseProgram(0)
|
|
491
|
+
|
|
492
|
+
def get_uv(self, face: str, from_element: list, to_element: list):
|
|
493
|
+
|
|
494
|
+
x1, y1, z1 = from_element
|
|
495
|
+
x2, y2, z2 = to_element
|
|
496
|
+
|
|
497
|
+
div = 16
|
|
498
|
+
|
|
499
|
+
x1, y1, z1 = x1 / div, y1 / div, z1 / div
|
|
500
|
+
x2, y2, z2 = x2 / div, y2 / div, z2 / div
|
|
501
|
+
|
|
502
|
+
match face:
|
|
503
|
+
case "east":
|
|
504
|
+
return (z1, y1, z2, y2)
|
|
505
|
+
case "west":
|
|
506
|
+
return (z1, y1, z2, y2)
|
|
507
|
+
case "up":
|
|
508
|
+
return (x1, z1, x2, z2)
|
|
509
|
+
case "down":
|
|
510
|
+
return (x1, z1, x2, z2)
|
|
511
|
+
case "south":
|
|
512
|
+
return (x1, y1, x2, y2)
|
|
513
|
+
case "north":
|
|
514
|
+
return (x1, y1, x2, y2)
|
|
515
|
+
|
|
516
|
+
def keyboard(self, key, x, y):
|
|
517
|
+
# increment the current model index on each click
|
|
518
|
+
if key == b"\x1b":
|
|
519
|
+
glutLeaveMainLoop()
|
|
520
|
+
elif key == b"r":
|
|
521
|
+
self.current_model_index += 1
|
|
522
|
+
self.current_model_index = self.current_model_index % len(self.models)
|
|
523
|
+
self.reload()
|
|
524
|
+
self.reset_camera()
|
|
525
|
+
elif key == b"z":
|
|
526
|
+
self.translate[1] += 1
|
|
527
|
+
elif key == b"s":
|
|
528
|
+
self.translate[1] -= 1
|
|
529
|
+
elif key == b"q":
|
|
530
|
+
self.translate[0] -= 1
|
|
531
|
+
elif key == b"d":
|
|
532
|
+
self.translate[0] += 1
|
|
533
|
+
# use ijklm to rotate the model
|
|
534
|
+
elif key == b"i":
|
|
535
|
+
self.rotate[0] += 1
|
|
536
|
+
elif key == b"k":
|
|
537
|
+
self.rotate[0] -= 1
|
|
538
|
+
elif key == b"j":
|
|
539
|
+
self.rotate[1] += 1
|
|
540
|
+
elif key == b"l":
|
|
541
|
+
self.rotate[1] -= 1
|
|
542
|
+
elif key == b"u":
|
|
543
|
+
self.rotate[2] += 1
|
|
544
|
+
elif key == b"m":
|
|
545
|
+
self.rotate[2] -= 1
|
|
546
|
+
|
|
547
|
+
glutPostRedisplay()
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from PIL import Image
|
|
2
|
+
from beet import Context
|
|
3
|
+
from beet.contrib.vanilla import Vanilla
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def load_textures(
|
|
7
|
+
textures: dict, ctx: Context, vanilla: Vanilla
|
|
8
|
+
) -> dict[str, Image.Image]:
|
|
9
|
+
res = {}
|
|
10
|
+
for key in textures.keys():
|
|
11
|
+
value = get_real_key(key, textures)
|
|
12
|
+
if value == "__not_found__":
|
|
13
|
+
res[key] = Image.new("RGBA", (16, 16), (0, 0, 0, 0))
|
|
14
|
+
else:
|
|
15
|
+
res[key] = load_texture(value, ctx, vanilla)
|
|
16
|
+
return res
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def load_texture(path: str, ctx: Context, vanilla: Vanilla) -> Image.Image:
|
|
20
|
+
path = f"minecraft:{path}" if ":" not in path else path
|
|
21
|
+
if path in ctx.assets.textures:
|
|
22
|
+
texture = ctx.assets.textures[path]
|
|
23
|
+
elif path in vanilla.assets.textures:
|
|
24
|
+
texture = vanilla.assets.textures[path]
|
|
25
|
+
else:
|
|
26
|
+
raise KeyError(f"Texture {path} not found")
|
|
27
|
+
img: Image.Image = texture.image
|
|
28
|
+
img = img.convert("RGBA")
|
|
29
|
+
return img
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def get_real_key(key: str, textures: dict, max_depth: int = 10) -> str:
|
|
33
|
+
if max_depth == 0:
|
|
34
|
+
return "__not_found__"
|
|
35
|
+
if key not in textures:
|
|
36
|
+
return "__not_found__"
|
|
37
|
+
if textures[key][0] == "#":
|
|
38
|
+
return get_real_key(textures[key][1:], textures, max_depth - 1)
|
|
39
|
+
else:
|
|
40
|
+
return textures[key]
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "model-resolver"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = ""
|
|
5
|
+
authors = ["edayot <pro.e.dayot@gmail.com>"]
|
|
6
|
+
license = "MIT"
|
|
7
|
+
readme = "README.md"
|
|
8
|
+
|
|
9
|
+
[tool.poetry.dependencies]
|
|
10
|
+
python = "^3.10"
|
|
11
|
+
beet = ">=0.104.1"
|
|
12
|
+
tqdm = "^4.66.2"
|
|
13
|
+
rich = "^13.7.1"
|
|
14
|
+
pyopengl = {git = "https://github.com/mcfletch/pyopengl.git", rev = "29b79e8966ba2930a5c44829b02dffc1ca600752"}
|
|
15
|
+
pillow = "^10.3.0"
|
|
16
|
+
black = "^24.4.2"
|
|
17
|
+
typer = "^0.12.3"
|
|
18
|
+
|
|
19
|
+
[tool.poetry.scripts]
|
|
20
|
+
model_resolver = "model_resolver.cli:app"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
[build-system]
|
|
24
|
+
requires = ["poetry-core"]
|
|
25
|
+
build-backend = "poetry.core.masonry.api"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
[tool.beet]
|
|
29
|
+
output="build"
|
|
30
|
+
pipeline=[
|
|
31
|
+
"model_resolver"
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
[tool.beet.resource_pack]
|
|
36
|
+
load = "."
|
|
37
|
+
|
|
38
|
+
[tool.beet.meta.model_resolver]
|
|
39
|
+
load_vanilla = true
|
|
40
|
+
use_cache = false
|
|
41
|
+
render_size = 256
|
|
42
|
+
minecraft_version = "latest"
|
|
43
|
+
|