vuer 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vuer might be problematic. Click here for more details.

@@ -1,7 +1,8 @@
1
1
  from collections import defaultdict
2
+ from copy import deepcopy
2
3
  from dataclasses import dataclass
3
4
  from functools import wraps
4
- from typing import Dict, Union, List, DefaultDict, Optional, Generator
5
+ from typing import Dict, Union, List, DefaultDict, Optional, Generator, Callable, Sequence
5
6
 
6
7
  from instant_feature.cameras.cameras import Cameras
7
8
  from instant_feature.viewer.se3 import rotation_matrix
@@ -106,7 +107,16 @@ def process_aabb(render_gen):
106
107
  render,
107
108
  **kwargs,
108
109
  ):
109
- if render.get("use_aabb", None):
110
+
111
+ if settings.get("use_aabb", None):
112
+ # use_aabb = settings.get("use_aabb", None)
113
+ # aabb_min = settings.get("aabb_min", None)
114
+ # aabb_max = settings.get("aabb_max", None)
115
+ scene_box = AABB(
116
+ aabb_min=Vector3(**settings.get("aabb_min", None)),
117
+ aabb_max=Vector3(**settings.get("aabb_max", None)),
118
+ )
119
+ elif render.get("use_aabb", None):
110
120
  scene_box = AABB(
111
121
  aabb_min=Vector3(**render.get("aabb_min", None)),
112
122
  aabb_max=Vector3(**render.get("aabb_max", None)),
@@ -114,35 +124,32 @@ def process_aabb(render_gen):
114
124
  else:
115
125
  scene_box = None
116
126
 
117
- # if settings.get("use_aabb", None):
118
- # use_aabb = settings.get("use_aabb", None)
119
- # aabb_min = settings.get("aabb_min", None)
120
- # aabb_max = settings.get("aabb_max", None)
121
-
122
127
  async for event in render_gen(*args, aabb=scene_box, settings=settings, **kwargs):
123
128
  yield event
124
129
 
125
130
  return wrap_gen
126
131
 
127
132
 
128
- def _se3(position, rotation, scale, **rest) -> Dict[str, Union[Vector3, EulerDeg, float]]:
129
- return dict(
130
- position=Vector3(**position),
131
- rotation=EulerDeg(**rotation).to_rad(),
132
- scale=scale,
133
- **rest,
134
- )
133
+ def _se3(position=None, rotation=None, scale=None, **rest) -> Dict[str, Union[Vector3, EulerDeg, float]]:
134
+ """
135
+ Add handling of undefined parameters.
136
+ """
137
+ output = rest
138
+ if position:
139
+ output["position"] = Vector3(**position)
140
+ if rotation:
141
+ output["rotation"] = EulerDeg(**rotation).to_rad()
142
+ if isinstance(scale, dict):
143
+ output["scale"] = Vector3(**scale)
144
+ elif scale:
145
+ output["scale"] = scale
146
+ return output
135
147
 
136
148
 
137
149
  # Need more encapsulated handling. Transformations should be propagated down.
138
150
  def process_world(render_gen):
139
151
  # @wraps(render_gen)
140
152
  async def wrap_gen(*args, camera, world, settings, **kwargs):
141
- # position = Vector3(**world["position"])
142
- # # sets the unit to "deg" internally.
143
- # rotation = EulerDeg(**world["rotation"])
144
- # scale = world["scale"]
145
-
146
153
  async for event in render_gen(
147
154
  *args,
148
155
  parent=_se3(**world),
@@ -205,6 +212,17 @@ def _lie_action(ray_bundle, position: Vector3, rotation: Euler, scale: float, **
205
212
  ray_bundle.origins /= scale
206
213
 
207
214
 
215
+ def _transformation(rotation: Euler, position: Vector3, scale: Union[float, Vector3], **_) -> torch.Tensor:
216
+ rot_mat = torch.from_numpy(rotation_matrix(*rotation))
217
+ transform = torch.eye(4)
218
+ if isinstance(scale, Vector3):
219
+ transform[:3, :3] = rot_mat @ torch.DoubleTensor(scale).diag()
220
+ else:
221
+ transform[:3, :3] = rot_mat * scale
222
+ transform[:3, 3] = torch.FloatTensor(position)
223
+ return transform
224
+
225
+
208
226
  def collect_rays(render_bundle):
209
227
  # @wraps(render_bundle)
210
228
  async def ray_gen(
@@ -212,24 +230,30 @@ def collect_rays(render_bundle):
212
230
  camera: Cameras,
213
231
  parent: Dict = None,
214
232
  settings: Dict = None,
215
- # position: Vector3 = None,
216
- # # use degree convention on the front end
217
- # rotation: EulerDeg = None,
218
- # # Non-scalar scale is current not supported.
219
- # scale: Union[Vector3, float] = None,
220
233
  aabb: Optional[AABB] = None,
221
234
  **kwargs,
222
235
  ) -> Dict[str, torch.Tensor]:
223
236
  assert len(camera) == 1
224
- # todo: to correctly handle things, we need to transform the camera instead.
225
- ray_bundle = camera.generate_rays(camera_indices=0, aabb_box=aabb)
226
237
 
227
- ray_bundle = ray_bundle.to(camera.device)
238
+ parent2world = _transformation(**parent)
239
+
240
+ if settings and "rotation" in settings:
241
+ settings2parent = _transformation(**settings)
242
+ settings2world = parent2world @ settings2parent
243
+ world2setting = torch.linalg.inv(settings2world)
244
+
245
+ else:
246
+ world2setting = torch.linalg.inv(parent2world)
247
+
248
+ camera2world = torch.eye(4)
249
+ camera2world[:3, :] = camera.camera_to_worlds[0]
250
+ camera2setting = world2setting @ camera2world
228
251
 
229
- # note: Ge: this needs to be moved up the stack.
230
- _lie_action(ray_bundle, **parent)
231
- # apply render specific transformations
232
- _lie_action(ray_bundle, **settings)
252
+ camera = deepcopy(camera)
253
+ camera.camera_to_worlds[0] = camera2setting[:3]
254
+
255
+ ray_bundle = camera.generate_rays(camera_indices=0, aabb_box=aabb)
256
+ ray_bundle = ray_bundle.to(camera.device)
233
257
 
234
258
  # timing is not accurate
235
259
  # with torch.no_grad(), logger.time("rendering", fmt=lambda s: f"{1000 * s:.1f}ms"):
@@ -242,22 +266,27 @@ def collect_rays(render_bundle):
242
266
  return ray_gen
243
267
 
244
268
 
245
- # from .render_nodes import rgb, alpha, chainer
246
- # this is not a mix-in
247
-
248
-
249
269
  def collector(
250
270
  pipe: Chainer = None,
251
271
  channels: List[str] = None,
252
- # nodes=List[RenderNode], # tuple(rgb, alpha),
272
+ channel_prefix: Union[str, Callable] = None,
253
273
  ):
254
274
  """
255
275
  Decorator for processing the rendered images.
276
+
277
+ Params:
278
+ pipe: this is the chain of operations that forms the processing flow.
279
+ channels: keys sent over the wire to the client. We use whitelisting to recude bandwidth.
280
+ layer_prefix: when this is set, the output channels are attached a prefix, to accommodate
281
+ conflicting keys in the pipeline.
282
+ Also takes in a function, to allow more dynamics generation of prefixes
256
283
  """
257
284
 
258
285
  def decorator(async_render):
259
286
  # @wraps(async_render)
287
+ # adding self argument to allow passing into the prefix function
260
288
  async def render_event(
289
+ self,
261
290
  *args,
262
291
  camera,
263
292
  pipe=pipe,
@@ -266,11 +295,8 @@ def collector(
266
295
 
267
296
  outputs = defaultdict(list)
268
297
 
269
- async for output_chunk in async_render(*args, channels=channels, camera=camera, **kwargs):
298
+ async for output_chunk in async_render(self, *args, channels=channels, camera=camera, **kwargs):
270
299
  signal = yield None
271
- # not needed. Outer can simply delete.
272
- # if signal == "TERMINATE":
273
- # return
274
300
 
275
301
  # collect the results
276
302
  for key, value in output_chunk.items():
@@ -283,11 +309,24 @@ def collector(
283
309
  height=camera.height,
284
310
  prefix="raw_",
285
311
  )
286
-
287
312
  # pipe = chainer(*nodes)
288
- flow = pipe(**flow)
289
- # filter out the non-requested channels
290
- flow = {k: v for k, v in flow.items() if k in channels}
313
+ flow = pipe(
314
+ **flow,
315
+ render=kwargs["render"],
316
+ settings=kwargs["settings"],
317
+ )
318
+
319
+ # info: do NOT mutate channel_prefix. This is inside the closure and is shared between function calls.
320
+ prefix = channel_prefix
321
+ if callable(channel_prefix):
322
+ prefix = channel_prefix(self)
323
+
324
+ if prefix:
325
+ # select channels, and prefix with
326
+ flow = {prefix + k: v for k, v in flow.items() if k in channels}
327
+ else:
328
+ # filter out the non-requested channels
329
+ flow = {k: v for k, v in flow.items() if k in channels}
291
330
 
292
331
  yield ServerEvent(
293
332
  etype="RENDER",
@@ -296,6 +335,10 @@ def collector(
296
335
  data={**flow},
297
336
  )
298
337
 
338
+ # holds the current pipe.
339
+ render_event._pipeline = pipe
340
+ render_event._channels = channels
341
+
299
342
  return render_event
300
343
 
301
344
  return decorator
@@ -1,4 +1,5 @@
1
1
  from asyncio import sleep
2
+ from collections import defaultdict
2
3
  from copy import deepcopy
3
4
 
4
5
  import numpy as np
@@ -17,13 +18,17 @@ from vuer.schemas import (
17
18
  class RenderVuer(Vuer):
18
19
  device = "cuda:0"
19
20
 
20
- def __init__(self, render: Render = None, scene: Scene = None, **args):
21
- super().__init__()
21
+ WEBSOCKET_MAX_SIZE = 2**28
22
+
23
+ def __init__(self, render: Render = None, scene: Scene = None, **kwargs):
24
+ super().__init__(**kwargs)
22
25
 
23
26
  self.scene = scene
24
27
  self.render = render
25
28
 
26
- self.spawn(self.on_connect, start=True)
29
+ self.handlers = defaultdict(dict)
30
+
31
+ self.spawn(self.on_connect)
27
32
 
28
33
  async def on_connect(self, ws_id):
29
34
  # from ml_logger import logger
@@ -40,8 +45,6 @@ class RenderVuer(Vuer):
40
45
 
41
46
  self.set @ scene
42
47
 
43
- cprint("Need to allow for mulitple event handlers", color="yellow")
44
-
45
48
  # need to explicitly terminate the coroutine
46
49
  while ws_id in self.ws:
47
50
  await sleep(0)
@@ -51,11 +54,24 @@ class RenderVuer(Vuer):
51
54
  if not event:
52
55
  continue
53
56
 
57
+ if event.etype in self.handlers:
58
+ handlers = self.handlers[event.etype]
59
+ for fn_factory in handlers.values():
60
+ # todo: see if we want to add throttling here.
61
+ # also pass in an event handler.
62
+ # Use an arrow function to avoid exposing the server instance.
63
+ my_task = self.spawn_task(fn_factory(event, lambda e: self @ e))
64
+ await sleep(0.0)
65
+
54
66
  if event == "CAMERA_MOVE":
55
67
  value = event.value
56
68
  self.clear()
57
69
 
58
- world = value.pop("world")
70
+ world = value.pop("world", None)
71
+ if world is None:
72
+ print("initial camera movement does not contain world params.")
73
+ continue
74
+
59
75
  camera = value.pop("camera")
60
76
  render_params = value.pop("render", {})
61
77
  # initially the values are non.
@@ -88,7 +104,7 @@ class RenderVuer(Vuer):
88
104
  async for render_response in self.render.render(
89
105
  camera=quick_cam,
90
106
  world=world,
91
- render=render_params,
107
+ render=deepcopy(render_params),
92
108
  # other params
93
109
  chunk_size=8096,
94
110
  to_cpu="features",
@@ -107,7 +123,7 @@ class RenderVuer(Vuer):
107
123
  async for render_response in self.render.render(
108
124
  camera=camera,
109
125
  world=world,
110
- render=render_params,
126
+ render=deepcopy(render_params),
111
127
  # other params
112
128
  chunk_size=8096,
113
129
  to_cpu="features",
@@ -1,12 +1,11 @@
1
1
  from typing import List, Generator
2
2
 
3
3
  from vuer.addons.nerf_vuer.mixins import collector, process_aabb, process_world, collect_rays, chunk_rays
4
- from vuer.addons.nerf_vuer.render_nodes import rgb, Chainer, alpha
5
- from vuer.addons.nerf_vuer.control_components import Controls
4
+ from vuer.addons.nerf_vuer.render_nodes import Chainer, RGBA
6
5
  from vuer.events import ServerEvent
7
6
  from vuer.schemas import SceneElement
8
7
 
9
- from instant_feature.viewer.neko.constants.default_settings import RENDER_DEFAULT, RGB_DEFAULT
8
+ from instant_feature.viewer.nerf_vuer.constants.default_settings import RENDER_DEFAULT, RGB_DEFAULT
10
9
 
11
10
 
12
11
  # =============== Leva Components ===============
@@ -55,12 +54,6 @@ class Render(SceneElement):
55
54
  **kwargs,
56
55
  )
57
56
 
58
- # def serialize(self):
59
- # obj = super().serialize()
60
- # # if self.settings:
61
- # # obj["settings"] = self.settings
62
- # return obj
63
-
64
57
  def __post_init__(self):
65
58
  self._fields = {}
66
59
  for child in self.children:
@@ -90,21 +83,30 @@ class Render(SceneElement):
90
83
  class RenderLayer(SceneElement):
91
84
  tag = "RenderLayer"
92
85
 
86
+ channel = "rgb"
87
+ alphaChannel = "alpha"
88
+ displacementMap = None
89
+ distance = 10.1
90
+
91
+ settings = RGB_DEFAULT
92
+
93
93
  def __init__(
94
94
  self,
95
- channel="rgb",
96
- alphaChannel="alpha",
95
+ channel=None,
96
+ alphaChannel=None,
97
97
  displacementMap=None,
98
98
  geometry="plane",
99
- settings=RGB_DEFAULT,
99
+ settings=None,
100
+ distance=None,
100
101
  **kwargs,
101
102
  ):
102
103
  super().__init__(
103
- channel=channel,
104
- alphaChannel=alphaChannel,
105
- displacementMap=displacementMap,
104
+ channel=channel or self.channel,
105
+ alphaChannel=alphaChannel or self.alphaChannel,
106
+ displacementMap=displacementMap or self.displacementMap,
106
107
  geometry=geometry,
107
- settings=settings,
108
+ settings=settings or self.settings,
109
+ distance=distance or self.distance,
108
110
  **kwargs,
109
111
  )
110
112
 
@@ -112,7 +114,7 @@ class RenderLayer(SceneElement):
112
114
  self._render = _render
113
115
 
114
116
  @collector(
115
- pipe=Chainer(rgb, alpha),
117
+ pipe=Chainer(RGBA.rgb, RGBA.alpha),
116
118
  channels=["rgb", "alpha"],
117
119
  )
118
120
  @process_aabb
@@ -120,10 +122,7 @@ class RenderLayer(SceneElement):
120
122
  @collect_rays
121
123
  @chunk_rays
122
124
  def render(self, ray_bundle, **kwargs) -> Generator[ServerEvent, None, None]:
123
- import torch
124
-
125
- with torch.no_grad():
126
- return self._render(ray_bundle=ray_bundle, **kwargs)
125
+ return self._render(ray_bundle=ray_bundle, **kwargs)
127
126
 
128
127
 
129
128
  class Heatmap(RenderLayer):