vuer 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vuer might be problematic. Click here for more details.
- vuer/addons/nerf_vuer/mixins.py +87 -44
- vuer/addons/nerf_vuer/nerf_vuer.py +24 -8
- vuer/addons/nerf_vuer/render_components.py +20 -21
- vuer/addons/nerf_vuer/render_nodes.py +354 -46
- vuer/base.py +25 -19
- vuer/events.py +1 -21
- vuer/schemas.py +127 -24
- vuer/serdes.py +20 -0
- vuer/server.py +27 -7
- vuer/types.py +0 -2
- {vuer-0.0.3.dist-info → vuer-0.0.5.dist-info}/METADATA +6 -4
- vuer-0.0.5.dist-info/RECORD +22 -0
- {vuer-0.0.3.dist-info → vuer-0.0.5.dist-info}/WHEEL +1 -1
- vuer-0.0.3.dist-info/RECORD +0 -21
- {vuer-0.0.3.dist-info → vuer-0.0.5.dist-info}/LICENSE +0 -0
- {vuer-0.0.3.dist-info → vuer-0.0.5.dist-info}/entry_points.txt +0 -0
- {vuer-0.0.3.dist-info → vuer-0.0.5.dist-info}/top_level.txt +0 -0
|
@@ -3,6 +3,10 @@ RenderComponents are view components that respond to camera movements in the fro
|
|
|
3
3
|
that returns rendered images
|
|
4
4
|
"""
|
|
5
5
|
import base64
|
|
6
|
+
import json
|
|
7
|
+
from abc import abstractmethod
|
|
8
|
+
from collections import deque
|
|
9
|
+
from copy import deepcopy
|
|
6
10
|
from io import BytesIO
|
|
7
11
|
|
|
8
12
|
import numpy as np
|
|
@@ -12,15 +16,26 @@ from PIL import Image
|
|
|
12
16
|
from torch import Tensor
|
|
13
17
|
from torchtyping import TensorType
|
|
14
18
|
|
|
15
|
-
from
|
|
19
|
+
from vuer.events import ClientEvent
|
|
16
20
|
|
|
17
21
|
|
|
18
|
-
class Chainer:
|
|
22
|
+
class Chainer(list):
|
|
19
23
|
def __init__(self, *fns):
|
|
20
|
-
|
|
24
|
+
"""Chainer is a function that chains multiple functions together
|
|
25
|
+
|
|
26
|
+
Example:
|
|
27
|
+
|
|
28
|
+
I have functions fn_1 and fn_2, both are "render nodes" for processing the rendered output from the nerf.
|
|
29
|
+
I can use Chainer to chain them together:
|
|
30
|
+
|
|
31
|
+
chained = Chainer(fn_1, fn_2)
|
|
32
|
+
chained(**pipeline) gives fn_1(fn_2(**pipeline))
|
|
33
|
+
|
|
34
|
+
"""
|
|
35
|
+
super().__init__(fns)
|
|
21
36
|
|
|
22
37
|
def thunk(self, **pipe):
|
|
23
|
-
for fn in self
|
|
38
|
+
for fn in self:
|
|
24
39
|
try:
|
|
25
40
|
pipe = fn(**pipe)
|
|
26
41
|
except Exception as e:
|
|
@@ -32,54 +47,344 @@ class Chainer:
|
|
|
32
47
|
return self.thunk(**pipe)
|
|
33
48
|
|
|
34
49
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
50
|
+
class TrajectoryCache(deque):
|
|
51
|
+
"""
|
|
52
|
+
A cache to capture the camera trajectories.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(self, **kwargs):
|
|
56
|
+
self.selections = deque(**kwargs)
|
|
57
|
+
super().__init__(**kwargs)
|
|
58
|
+
|
|
59
|
+
async def click_handler(self, event: ClientEvent, _):
|
|
60
|
+
camera_pose = deepcopy(self[-1])
|
|
61
|
+
self.selections.append(camera_pose)
|
|
62
|
+
print(camera_pose)
|
|
63
|
+
|
|
64
|
+
async def cache_camera(self, event: ClientEvent, _):
|
|
65
|
+
# do NOT mutate this object
|
|
66
|
+
camera_pose = deepcopy(event.value["camera"])
|
|
67
|
+
self.append(camera_pose)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class Singleton(deque):
|
|
71
|
+
def __init__(self, sequence=None):
|
|
72
|
+
super().__init__(sequence or [], maxlen=1)
|
|
73
|
+
self.__post_init__()
|
|
74
|
+
|
|
75
|
+
@abstractmethod
|
|
76
|
+
def __post_init__(self):
|
|
77
|
+
pass
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class RGBA(Singleton):
|
|
81
|
+
@staticmethod
|
|
82
|
+
def rgb(raw_rgb: TensorType["hwc"], **pipeline):
|
|
83
|
+
# only needed for PNG images
|
|
84
|
+
# image_c = torch.cat([raw_rgb, raw_accumulation], dim=-1)
|
|
85
|
+
# encoded = b64jpg(image_c)
|
|
86
|
+
encoded = b64jpg(raw_rgb)
|
|
87
|
+
return {"rgb": encoded, "raw_rgb": raw_rgb, **pipeline}
|
|
88
|
+
|
|
89
|
+
@staticmethod
|
|
90
|
+
def alpha(raw_accumulation: Tensor, alpha_threshold=None, **pipeline):
|
|
91
|
+
if alpha_threshold is not None:
|
|
92
|
+
raw_accumulation[raw_accumulation < alpha_threshold] = 0
|
|
93
|
+
|
|
94
|
+
return {
|
|
95
|
+
"alpha": b64jpg(raw_accumulation),
|
|
96
|
+
"raw_accumulation": raw_accumulation,
|
|
97
|
+
**pipeline,
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
@staticmethod
|
|
101
|
+
def depth(raw_depth: TensorType["hwc"], raw_accumulation, settings, **pipeline):
|
|
102
|
+
# con
|
|
103
|
+
cmap = _get_colormap(**settings)
|
|
104
|
+
|
|
105
|
+
alphaThreshold = settings.get("alphaThreshold", None)
|
|
106
|
+
|
|
107
|
+
if alphaThreshold is None:
|
|
108
|
+
mask = None
|
|
109
|
+
else:
|
|
110
|
+
mask = (raw_accumulation > alphaThreshold).squeeze().cpu()
|
|
111
|
+
|
|
112
|
+
depthmap = cmap(raw_depth.squeeze().cpu().numpy(), mask)[:, :, :3]
|
|
113
|
+
depthmap = torch.Tensor(depthmap).to(raw_depth.device)
|
|
114
|
+
|
|
115
|
+
return {
|
|
116
|
+
"depth": b64png(depthmap),
|
|
117
|
+
"raw_depth": raw_depth,
|
|
118
|
+
"raw_accumulation": raw_accumulation,
|
|
119
|
+
**pipeline,
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
def cache(self, raw_rgb, raw_accumulation, **pipeline):
|
|
123
|
+
rgba = torch.cat([raw_rgb, 255 * raw_accumulation], dim=-1).clip(0, 255).cpu().numpy().astype(np.uint8)
|
|
124
|
+
self.append(rgba)
|
|
125
|
+
|
|
126
|
+
return {
|
|
127
|
+
"raw_rgb": raw_rgb,
|
|
128
|
+
"raw_accumulation": raw_accumulation,
|
|
129
|
+
**pipeline,
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
def cache_depth(self, raw_depth, raw_accumulation, **pipeline):
|
|
133
|
+
depth_a = torch.cat([raw_depth, 255 * raw_accumulation], dim=-1).clip(0, 255).cpu().numpy().astype(np.uint8)
|
|
134
|
+
self.append(depth_a)
|
|
135
|
+
|
|
136
|
+
return {
|
|
137
|
+
"raw_depth": raw_depth,
|
|
138
|
+
"raw_accumulation": raw_accumulation,
|
|
139
|
+
**pipeline,
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _get_colormap(colormap, invert=False, useClip=True, clip: tuple = None, gain=1.0, offset=0.0, normalize=False, **_):
|
|
144
|
+
"""
|
|
145
|
+
https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
|
|
146
|
+
get color map from matplotlib
|
|
147
|
+
returns color_map function with signature (x, mask=None),
|
|
148
|
+
where mask is the mask-in for the colormap.
|
|
149
|
+
|
|
150
|
+
"""
|
|
151
|
+
import matplotlib.cm as cm
|
|
152
|
+
|
|
153
|
+
cmap = cm.get_cmap(colormap + "_r" if invert else colormap)
|
|
154
|
+
|
|
155
|
+
def map_color(x, mask=None):
|
|
156
|
+
|
|
157
|
+
if normalize:
|
|
158
|
+
if mask is None or mask.sum() == 0:
|
|
159
|
+
min, max = x.min(), x.max()
|
|
160
|
+
else:
|
|
161
|
+
min, max = x[mask].min(), x[mask].max()
|
|
38
162
|
|
|
163
|
+
x -= min
|
|
164
|
+
x /= max - min + 1e-6
|
|
165
|
+
x[x < 0] = 0
|
|
39
166
|
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
encoded = b64jpg(image_c)
|
|
43
|
-
return {"rgb": encoded, "raw_rgb": raw_rgb, "raw_accumulation": raw_accumulation, **pipeline}
|
|
167
|
+
if useClip and clip is not None:
|
|
168
|
+
x = x.clip(*clip)
|
|
44
169
|
|
|
170
|
+
if offset is not None:
|
|
171
|
+
x -= offset
|
|
45
172
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
raw_accumulation[raw_accumulation < alpha_threshold] = 0
|
|
173
|
+
if gain is not None:
|
|
174
|
+
x *= gain
|
|
49
175
|
|
|
50
|
-
|
|
51
|
-
"alpha": b64jpg(raw_accumulation),
|
|
52
|
-
"raw_accumulation": raw_accumulation,
|
|
53
|
-
**pipeline,
|
|
54
|
-
}
|
|
176
|
+
return cmap(x)
|
|
55
177
|
|
|
178
|
+
return map_color
|
|
56
179
|
|
|
57
|
-
def monochrome(image, colormap, normalize, clip, gain, alpha_np, **pipeline):
|
|
58
|
-
# assert image.shape[-1] == 1 and colormap is not None, "Invalid colormap for depth"
|
|
59
|
-
# cmap = get_colormap(colormap, normalize=normalize, clip=clip, gain=gain)
|
|
60
|
-
# # Need to ignore nans and infs
|
|
61
|
-
# mask = ~torch.isnan(image) & ~torch.isinf(image)
|
|
62
|
-
# mask = mask.squeeze().cpu().numpy()
|
|
63
|
-
# image_c = cmap(image.cpu().numpy(), mask=mask)[:, :, 0, :]
|
|
64
|
-
# # Set alphas
|
|
65
|
-
# image_c[:, :, 3] = alpha_np.squeeze()
|
|
66
|
-
# # Set unmasked pixels to 0 RGBA
|
|
67
|
-
# image_c[~mask] = 0
|
|
68
|
-
# return image_c
|
|
69
|
-
pass
|
|
70
180
|
|
|
181
|
+
def nan_prune(t):
|
|
182
|
+
"""Drop all rows containing any nan:"""
|
|
183
|
+
t = t[~torch.any(t.isnan(), dim=-1)]
|
|
184
|
+
return t
|
|
71
185
|
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
186
|
+
|
|
187
|
+
class PCA:
|
|
188
|
+
proj = None
|
|
189
|
+
|
|
190
|
+
def clear(self):
|
|
191
|
+
self.proj = None
|
|
192
|
+
|
|
193
|
+
def __call__(self, raw_features, dim=3, center=True):
|
|
194
|
+
"""no batch dimension."""
|
|
195
|
+
*shape, c = raw_features.shape
|
|
196
|
+
feat_flat = raw_features.reshape(-1, c)
|
|
197
|
+
|
|
198
|
+
if self.proj is None:
|
|
199
|
+
print("Computing the PCA")
|
|
200
|
+
feat_nan_free = nan_prune(feat_flat)
|
|
201
|
+
# we can not use this u because it is potentially has the nans removed.
|
|
202
|
+
u, diag, self.proj = torch.pca_lowrank(feat_nan_free, q=dim, center=center)
|
|
203
|
+
else:
|
|
204
|
+
print("Using cached PCA")
|
|
205
|
+
|
|
206
|
+
u = raw_features @ self.proj
|
|
207
|
+
return u
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
class FeatA(Singleton):
|
|
211
|
+
"""Cache for Feature and Alpha Channel. Contains two caches, self, and low_res_cache.
|
|
212
|
+
The low-res cache is for the low-res feature map.
|
|
213
|
+
|
|
214
|
+
Self is the flat cache.
|
|
215
|
+
"""
|
|
216
|
+
|
|
217
|
+
def __post_init__(self):
|
|
218
|
+
self.pca = PCA()
|
|
219
|
+
self.low_res_cache = deque(maxlen=1)
|
|
220
|
+
self.features_cache = deque(maxlen=1)
|
|
221
|
+
|
|
222
|
+
def features_pca(self, raw_features, raw_accumulation, alpha=None, **pipeline):
|
|
223
|
+
# print("feat_pca.shape:", raw_features.shape)
|
|
224
|
+
feat_pca = self.pca(raw_features, dim=3)
|
|
225
|
+
feat_pca_flat = feat_pca.reshape(-1, 3)
|
|
226
|
+
|
|
227
|
+
# these should be configurations
|
|
228
|
+
low = torch.nanquantile(feat_pca_flat, 0.02, dim=0)
|
|
229
|
+
top = torch.nanquantile(feat_pca_flat, 0.98, dim=0)
|
|
230
|
+
|
|
231
|
+
feat_pca_normalized = 0.02 + 0.96 * (feat_pca - low) / (top - low)
|
|
232
|
+
feat_pca_normalized.clip_(0, 1)
|
|
233
|
+
|
|
234
|
+
if alpha is None:
|
|
235
|
+
alpha = b64jpg(raw_accumulation)
|
|
236
|
+
|
|
237
|
+
return {
|
|
238
|
+
"features": b64jpg(feat_pca_normalized),
|
|
239
|
+
"features_pca": feat_pca_normalized,
|
|
240
|
+
"alpha": alpha,
|
|
241
|
+
"raw_features": raw_features,
|
|
242
|
+
"raw_accumulation": raw_accumulation,
|
|
243
|
+
**pipeline,
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
def probe_feature(self, raw_features, **pipeline):
|
|
247
|
+
return {"raw_features": raw_features, **pipeline}
|
|
248
|
+
|
|
249
|
+
def features_heatmap(self, raw_features, raw_accumulation, settings, **pipeline):
|
|
250
|
+
|
|
251
|
+
print("raw_features.shape:", raw_features.shape)
|
|
252
|
+
print(settings)
|
|
253
|
+
|
|
254
|
+
x, y = settings.pop("xy", [None, None])
|
|
255
|
+
size = settings.pop("size", None)
|
|
256
|
+
n = max(1, int(size))
|
|
257
|
+
|
|
258
|
+
# remove clip so that lerf_text_map does not complain about hashability in the lru_cache.
|
|
259
|
+
clip = settings.pop("clip", None)
|
|
260
|
+
|
|
261
|
+
h, w = raw_features.shape[:2]
|
|
262
|
+
i, j = int(x * h), int(y * h)
|
|
263
|
+
feat_probe = raw_features[i : i + n, j : j + n, :].to(raw_accumulation.device)
|
|
264
|
+
feat_probe = feat_probe.mean(dim=[0, 1])
|
|
265
|
+
feat_probe /= feat_probe.norm(dim=-1, keepdim=True)
|
|
266
|
+
|
|
267
|
+
raw_features_cuda = raw_features.to(raw_accumulation.device)
|
|
268
|
+
raw_features_cuda /= raw_features_cuda.norm(dim=-1, keepdim=True)
|
|
269
|
+
raw_features_cuda.shape
|
|
270
|
+
# heatmap
|
|
271
|
+
heatmap = raw_features_cuda @ feat_probe
|
|
272
|
+
del raw_features_cuda
|
|
273
|
+
mask = ~torch.isnan(heatmap)
|
|
274
|
+
|
|
275
|
+
cmap = _get_colormap(clip=clip, **settings)
|
|
276
|
+
heatmap_rgb = cmap(heatmap.cpu(), mask.cpu())[:, :, :3]
|
|
277
|
+
heatmap_rgb = torch.FloatTensor(heatmap_rgb).to(raw_accumulation.device)
|
|
278
|
+
|
|
279
|
+
# mask_float = mask.float()[..., None] * raw_accumulation
|
|
280
|
+
mask_float = raw_accumulation
|
|
281
|
+
print("raw_accumulation", raw_accumulation.shape)
|
|
282
|
+
|
|
283
|
+
return {
|
|
284
|
+
"heatmap": b64jpg(heatmap_rgb),
|
|
285
|
+
"heatmap_mask": b64jpg(mask_float),
|
|
286
|
+
"heatmap_rgb": heatmap_rgb,
|
|
287
|
+
"raw_heatmap_mask": mask_float,
|
|
288
|
+
"raw_accumulation": raw_accumulation,
|
|
289
|
+
**pipeline,
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
def cache_heatmap(self, heatmap_rgb, raw_heatmap_mask, **pipeline):
|
|
293
|
+
heat_alpha = torch.cat([heatmap_rgb, 255 * raw_heatmap_mask], dim=-1).clip(0, 255).cpu().numpy().astype(np.uint8)
|
|
294
|
+
self.append(heat_alpha)
|
|
295
|
+
# remove those from the pipeline
|
|
296
|
+
return {**pipeline}
|
|
297
|
+
|
|
298
|
+
def cache_pca(self, features_pca, raw_accumulation, **pipeline):
|
|
299
|
+
fpca_cuda = features_pca.to(raw_accumulation.device)
|
|
300
|
+
feat_alpha = torch.cat([fpca_cuda, 255 * raw_accumulation], dim=-1).clip(0, 255).cpu().numpy().astype(np.uint8)
|
|
301
|
+
self.append(feat_alpha)
|
|
302
|
+
return {"raw_accumulation": raw_accumulation, **pipeline}
|
|
303
|
+
|
|
304
|
+
def cache_features(self, raw_features, **pipeline):
|
|
305
|
+
"""this cache is very expensive."""
|
|
306
|
+
self.features_cache.append(raw_features)
|
|
307
|
+
return {"raw_features": raw_features, **pipeline}
|
|
308
|
+
|
|
309
|
+
def cache_low_res(self, raw_features, **pipeline):
|
|
310
|
+
from einops import rearrange
|
|
311
|
+
|
|
312
|
+
h, w = raw_features.shape[:2]
|
|
313
|
+
size = h * w - torch.isnan(raw_features).any(dim=-1).sum()
|
|
314
|
+
ratio = float(120 / size) ** 0.5
|
|
315
|
+
feat = rearrange(raw_features, "h w c -> 1 c h w")
|
|
316
|
+
try:
|
|
317
|
+
feat = torch.nn.functional.upsample(feat, scale_factor=ratio, mode="bilinear")
|
|
318
|
+
feat = rearrange(feat, "1 c h w -> h w c")
|
|
319
|
+
feat_flat = feat[~torch.isnan(feat).any(dim=-1)]
|
|
320
|
+
self.low_res_cache.append(feat_flat)
|
|
321
|
+
return {
|
|
322
|
+
# note: remove [ raw_features, and features_pca ] from the flow to conserve memory
|
|
323
|
+
**pipeline,
|
|
324
|
+
}
|
|
325
|
+
except Exception as e:
|
|
326
|
+
print("upsample failed", e)
|
|
327
|
+
return pipeline
|
|
328
|
+
|
|
329
|
+
def reset_pca(self):
|
|
330
|
+
self.pca.clear()
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
class CLIPQueryMap(Singleton):
|
|
334
|
+
"""Cache for Feature and Alpha Channel. Contains two caches, self, and low_res_cache.
|
|
335
|
+
The low-res cache is for the low-res feature map.
|
|
336
|
+
|
|
337
|
+
Self is the flat cache."""
|
|
338
|
+
|
|
339
|
+
def set_query_vector(self, query_vector):
|
|
340
|
+
self.query_vector = query_vector
|
|
341
|
+
|
|
342
|
+
def text_heatmap(self, raw_features, raw_accumulation, settings, **pipeline):
|
|
343
|
+
from instant_feature.viewer.nerf_vuer.clip.clip_heatmap import get_lerf_text_map
|
|
344
|
+
|
|
345
|
+
print("raw_features.shape:", raw_features.shape)
|
|
346
|
+
print(settings)
|
|
347
|
+
|
|
348
|
+
# remove clip so that lerf_text_map does not complain about hashability in the lru_cache.
|
|
349
|
+
clip = settings.pop("clip", None)
|
|
350
|
+
|
|
351
|
+
text_map = get_lerf_text_map(
|
|
352
|
+
**settings,
|
|
353
|
+
device=raw_accumulation.device,
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
raw_features_cuda = raw_features.to(raw_accumulation.device)
|
|
357
|
+
heatmap, mask = text_map(raw_features_cuda)
|
|
358
|
+
del raw_features_cuda
|
|
359
|
+
|
|
360
|
+
heatmap_normalized = heatmap
|
|
361
|
+
# heatmap_normalized = deepcopy(raw_features)
|
|
362
|
+
# heatmap_normalized.clip_(0, 1)
|
|
363
|
+
print("heatmap_normalized.shape:", heatmap_normalized.shape)
|
|
364
|
+
|
|
365
|
+
# final_mask = raw_accumulation * mask.float()[..., None]
|
|
366
|
+
|
|
367
|
+
cmap = _get_colormap(clip=clip, **settings)
|
|
368
|
+
heatmap_rgb = cmap(heatmap_normalized.cpu(), mask.cpu())[:, :, :3]
|
|
369
|
+
heatmap_rgb = torch.FloatTensor(heatmap_rgb).to(raw_accumulation.device)
|
|
370
|
+
|
|
371
|
+
mask_float = mask.float()[..., None]
|
|
372
|
+
|
|
373
|
+
return {
|
|
374
|
+
"heatmap": b64jpg(heatmap_rgb),
|
|
375
|
+
"heatmap_mask": b64jpg(mask_float),
|
|
376
|
+
"raw_heatmap": heatmap_rgb,
|
|
377
|
+
"raw_heatmap_mask": mask_float,
|
|
378
|
+
"raw_accumulation": raw_accumulation,
|
|
379
|
+
**pipeline,
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
def cache(self, raw_heatmap, raw_heatmap_mask, **pipeline):
|
|
383
|
+
print("cache")
|
|
384
|
+
feat_alpha = torch.cat([raw_heatmap, 255 * raw_heatmap_mask], dim=-1).clip(0, 255).cpu().numpy().astype(np.uint8)
|
|
385
|
+
print("done caching")
|
|
386
|
+
self.append(feat_alpha)
|
|
387
|
+
return {**pipeline}
|
|
83
388
|
|
|
84
389
|
|
|
85
390
|
def b64jpg(image: Tensor, quality: int = 90):
|
|
@@ -93,8 +398,11 @@ def b64jpg(image: Tensor, quality: int = 90):
|
|
|
93
398
|
image = image[:, :, :3]
|
|
94
399
|
|
|
95
400
|
image *= 255
|
|
96
|
-
|
|
97
|
-
|
|
401
|
+
|
|
402
|
+
if isinstance(image, np.ndarray):
|
|
403
|
+
image_np = image.astype(np.uint8)
|
|
404
|
+
else:
|
|
405
|
+
image_np = image.cpu().numpy().astype(np.uint8)
|
|
98
406
|
|
|
99
407
|
C = image_np.shape[-1]
|
|
100
408
|
if C == 1:
|
|
@@ -119,7 +427,7 @@ def b64png(image: Tensor):
|
|
|
119
427
|
rgb_pil = Image.fromarray(image_np)
|
|
120
428
|
rgb_pil.save(buff, format="PNG")
|
|
121
429
|
img64 = base64.b64encode(buff.getbuffer().tobytes()).decode("utf-8")
|
|
122
|
-
return img64
|
|
430
|
+
return "data:image/png;base64," + img64
|
|
123
431
|
|
|
124
432
|
|
|
125
433
|
def b64png_depth(depth):
|
vuer/base.py
CHANGED
|
@@ -15,13 +15,13 @@ async def default_handler(request, ws):
|
|
|
15
15
|
print(msg)
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
async def websocket_handler(request, handler):
|
|
19
|
-
print(
|
|
18
|
+
async def websocket_handler(request, handler, **ws_kwargs):
|
|
19
|
+
print("New connection!!!")
|
|
20
20
|
|
|
21
|
-
ws = web.WebSocketResponse()
|
|
21
|
+
ws = web.WebSocketResponse(**ws_kwargs)
|
|
22
22
|
await ws.prepare(request)
|
|
23
23
|
|
|
24
|
-
print(
|
|
24
|
+
print("Socket stored")
|
|
25
25
|
|
|
26
26
|
try:
|
|
27
27
|
await handler(request, ws)
|
|
@@ -30,18 +30,18 @@ async def websocket_handler(request, handler):
|
|
|
30
30
|
print("Connection reset")
|
|
31
31
|
|
|
32
32
|
except CancelledError as exp:
|
|
33
|
-
print(f
|
|
33
|
+
print(f"WebSocket Canceled")
|
|
34
34
|
|
|
35
35
|
except Exception as exp:
|
|
36
|
-
print(f
|
|
36
|
+
print(f"Error:\n{exp}\n{traceback.print_exc()}")
|
|
37
37
|
|
|
38
38
|
finally:
|
|
39
39
|
await ws.close()
|
|
40
|
-
print(
|
|
40
|
+
print("WebSocket connection closed")
|
|
41
41
|
|
|
42
42
|
|
|
43
43
|
async def handle_file_request(request, root):
|
|
44
|
-
filename = request.match_info[
|
|
44
|
+
filename = request.match_info["filename"]
|
|
45
45
|
filepath = Path(root) / filename
|
|
46
46
|
|
|
47
47
|
if not filepath.is_file():
|
|
@@ -64,20 +64,26 @@ class Server:
|
|
|
64
64
|
|
|
65
65
|
default = aiohttp_cors.ResourceOptions(
|
|
66
66
|
allow_credentials=True,
|
|
67
|
-
expose_headers="*",
|
|
67
|
+
expose_headers="*",
|
|
68
|
+
allow_headers="*",
|
|
69
|
+
allow_methods="*",
|
|
68
70
|
)
|
|
69
|
-
cors_config = {k: default for k in self.cors.split(
|
|
71
|
+
cors_config = {k: default for k in self.cors.split(",")}
|
|
70
72
|
|
|
71
73
|
self.cors_context = aiohttp_cors.setup(self.app, defaults=cors_config)
|
|
72
74
|
|
|
73
|
-
def _route(
|
|
75
|
+
def _route(
|
|
76
|
+
self,
|
|
77
|
+
path: str,
|
|
78
|
+
handler: callable,
|
|
79
|
+
method: str = "GET",
|
|
80
|
+
):
|
|
74
81
|
route = self.app.router.add_resource(path).add_route(method, handler)
|
|
75
82
|
self.cors_context.add(route)
|
|
76
83
|
|
|
77
84
|
def _socket(self, path: str, handler: callable):
|
|
78
|
-
ws_handler = partial(websocket_handler, handler=handler)
|
|
79
|
-
|
|
80
|
-
self.cors_context.add(route)
|
|
85
|
+
ws_handler = partial(websocket_handler, handler=handler, max_msg_size=self.WEBSOCKET_MAX_SIZE)
|
|
86
|
+
self._route(path, ws_handler, method="GET")
|
|
81
87
|
|
|
82
88
|
def _add_task(self, fn):
|
|
83
89
|
loop = asyncio.get_event_loop()
|
|
@@ -85,7 +91,7 @@ class Server:
|
|
|
85
91
|
|
|
86
92
|
def _static(self, path, root):
|
|
87
93
|
_fn = partial(handle_file_request, root=root)
|
|
88
|
-
self.
|
|
94
|
+
self._route(f"{path}/{{filename}}", _fn, method="GET")
|
|
89
95
|
|
|
90
96
|
def run(self):
|
|
91
97
|
async def init_server():
|
|
@@ -94,7 +100,7 @@ class Server:
|
|
|
94
100
|
site = web.TCPSite(runner, self.host, self.port)
|
|
95
101
|
await site.start()
|
|
96
102
|
|
|
97
|
-
print(f
|
|
103
|
+
print(f"Serving on http://{self.host}:{self.port}")
|
|
98
104
|
|
|
99
105
|
event_loop = asyncio.get_event_loop()
|
|
100
106
|
|
|
@@ -102,8 +108,8 @@ class Server:
|
|
|
102
108
|
event_loop.run_forever()
|
|
103
109
|
|
|
104
110
|
|
|
105
|
-
if __name__ ==
|
|
111
|
+
if __name__ == "__main__":
|
|
106
112
|
app = Server()
|
|
107
|
-
app._route(
|
|
108
|
-
app._static(
|
|
113
|
+
app._route("", websocket_handler)
|
|
114
|
+
app._static("/static", handle_file_request, root=".")
|
|
109
115
|
app.run()
|
vuer/events.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from vuer.schemas import Element
|
|
2
|
+
from vuer.serdes import serializer
|
|
2
3
|
|
|
3
4
|
|
|
4
5
|
class Event:
|
|
@@ -54,27 +55,6 @@ NULL = NullEvent()
|
|
|
54
55
|
# instance = cls.__new__(cls)
|
|
55
56
|
# return cls.__init__(instance, data)
|
|
56
57
|
|
|
57
|
-
from typing import Sequence
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def serializer(data):
|
|
61
|
-
if hasattr(data, "serialize"):
|
|
62
|
-
return data.serialize()
|
|
63
|
-
|
|
64
|
-
if isinstance(data, str):
|
|
65
|
-
# return Text(data)
|
|
66
|
-
return data
|
|
67
|
-
|
|
68
|
-
# this could be dangerous.
|
|
69
|
-
if isinstance(data, Sequence):
|
|
70
|
-
return [serializer(d) for d in data]
|
|
71
|
-
|
|
72
|
-
# this could be dangerous
|
|
73
|
-
if isinstance(data, dict):
|
|
74
|
-
return {k: serializer(v) for k, v in data.items()}
|
|
75
|
-
|
|
76
|
-
NotImplementedError(f"Cannot serialize {data}")
|
|
77
|
-
|
|
78
58
|
|
|
79
59
|
class ServerEvent(Event): # , metaclass=Meta):
|
|
80
60
|
def __init__(self, data, etype=None, **kwargs):
|