vispy 0.12.0__cp310-cp310-win_amd64.whl → 0.14.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vispy might be problematic. Click here for more details.
- vispy/app/backends/_glfw.py +1 -1
- vispy/app/backends/_qt.py +78 -53
- vispy/color/color_array.py +8 -1
- vispy/color/colormap.py +4 -24
- vispy/geometry/meshdata.py +76 -38
- vispy/geometry/tests/test_meshdata.py +72 -0
- vispy/gloo/gl/_constants.py +3 -3
- vispy/gloo/program.py +1 -1
- vispy/gloo/texture.py +9 -4
- vispy/scene/cameras/base_camera.py +4 -0
- vispy/scene/cameras/panzoom.py +4 -1
- vispy/scene/cameras/perspective.py +6 -0
- vispy/scene/cameras/tests/test_perspective.py +37 -0
- vispy/scene/canvas.py +9 -5
- vispy/scene/events.py +9 -0
- vispy/scene/node.py +18 -1
- vispy/scene/tests/test_visuals.py +113 -0
- vispy/scene/visuals.py +5 -1
- vispy/util/gallery_scraper.py +8 -0
- vispy/util/tests/test_gallery_scraper.py +2 -0
- vispy/version.py +2 -3
- vispy/visuals/__init__.py +1 -0
- vispy/visuals/filters/__init__.py +3 -2
- vispy/visuals/filters/base_filter.py +120 -0
- vispy/visuals/filters/markers.py +28 -0
- vispy/visuals/filters/mesh.py +61 -6
- vispy/visuals/filters/tests/test_primitive_picking_filters.py +70 -0
- vispy/visuals/instanced_mesh.py +152 -0
- vispy/visuals/line/dash_atlas.py +46 -41
- vispy/visuals/markers.py +63 -33
- vispy/visuals/mesh.py +2 -2
- vispy/visuals/tests/test_instanced_mesh.py +50 -0
- vispy/visuals/tests/test_markers.py +6 -0
- vispy/visuals/tests/test_mesh.py +17 -0
- vispy/visuals/text/_sdf_cpu.cp310-win_amd64.pyd +0 -0
- vispy/visuals/text/_sdf_cpu.pyx +21 -23
- vispy/visuals/tube.py +1 -1
- vispy/visuals/visual.py +142 -1
- vispy/visuals/volume.py +19 -10
- {vispy-0.12.0.dist-info → vispy-0.14.0.dist-info}/LICENSE.txt +1 -1
- {vispy-0.12.0.dist-info → vispy-0.14.0.dist-info}/METADATA +7 -6
- {vispy-0.12.0.dist-info → vispy-0.14.0.dist-info}/RECORD +44 -40
- {vispy-0.12.0.dist-info → vispy-0.14.0.dist-info}/WHEEL +1 -1
- {vispy-0.12.0.dist-info → vispy-0.14.0.dist-info}/top_level.txt +0 -0
vispy/app/backends/_glfw.py
CHANGED
|
@@ -250,7 +250,7 @@ class CanvasBackend(BaseCanvasBackend):
|
|
|
250
250
|
raise ValueError('fullscreen must be <= %s'
|
|
251
251
|
% len(monitor))
|
|
252
252
|
monitor = monitor[p.fullscreen]
|
|
253
|
-
use_size = glfw.get_video_mode(monitor)[:2]
|
|
253
|
+
use_size = glfw.get_video_mode(monitor)[0][:2]
|
|
254
254
|
if use_size != tuple(p.size):
|
|
255
255
|
logger.debug('Requested size %s, will be ignored to '
|
|
256
256
|
'use fullscreen mode %s' % (p.size, use_size))
|
vispy/app/backends/_qt.py
CHANGED
|
@@ -22,6 +22,7 @@ known to cause unpredictable behavior and segfaults.
|
|
|
22
22
|
from __future__ import division
|
|
23
23
|
|
|
24
24
|
from time import sleep, time
|
|
25
|
+
import math
|
|
25
26
|
import os
|
|
26
27
|
import sys
|
|
27
28
|
import atexit
|
|
@@ -410,17 +411,10 @@ class QtBaseCanvasBackend(BaseCanvasBackend):
|
|
|
410
411
|
# either not PyQt5 backend or no parent window available
|
|
411
412
|
pass
|
|
412
413
|
|
|
413
|
-
#
|
|
414
|
-
#
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
if sys.platform == 'darwin':
|
|
418
|
-
if PYQT6_API:
|
|
419
|
-
self.setAttribute(QtCore.Qt.WidgetAttribute.WA_AcceptTouchEvents)
|
|
420
|
-
self.grabGesture(QtCore.Qt.GestureType.PinchGesture)
|
|
421
|
-
else:
|
|
422
|
-
self.setAttribute(QtCore.Qt.WA_AcceptTouchEvents)
|
|
423
|
-
self.grabGesture(QtCore.Qt.PinchGesture)
|
|
414
|
+
# QNativeGestureEvent does not keep track of last or total
|
|
415
|
+
# values like QGestureEvent does
|
|
416
|
+
self._native_gesture_scale_values = []
|
|
417
|
+
self._native_gesture_rotation_values = []
|
|
424
418
|
|
|
425
419
|
def screen_changed(self, new_screen):
|
|
426
420
|
"""Window moved from one display to another, resize canvas.
|
|
@@ -563,50 +557,81 @@ class QtBaseCanvasBackend(BaseCanvasBackend):
|
|
|
563
557
|
def keyReleaseEvent(self, ev):
|
|
564
558
|
self._keyEvent(self._vispy_canvas.events.key_release, ev)
|
|
565
559
|
|
|
560
|
+
def _handle_native_gesture_event(self, ev):
|
|
561
|
+
if self._vispy_canvas is None:
|
|
562
|
+
return
|
|
563
|
+
t = ev.gestureType()
|
|
564
|
+
# this is a workaround for what looks like a Qt bug where
|
|
565
|
+
# QNativeGestureEvent gives the wrong local position.
|
|
566
|
+
# See: https://bugreports.qt.io/browse/QTBUG-59595
|
|
567
|
+
try:
|
|
568
|
+
pos = self.mapFromGlobal(ev.globalPosition().toPoint())
|
|
569
|
+
except AttributeError:
|
|
570
|
+
# globalPos is deprecated in Qt6
|
|
571
|
+
pos = self.mapFromGlobal(ev.globalPos())
|
|
572
|
+
pos = pos.x(), pos.y()
|
|
573
|
+
|
|
574
|
+
if t == QtCore.Qt.NativeGestureType.BeginNativeGesture:
|
|
575
|
+
self._vispy_canvas.events.touch(
|
|
576
|
+
type='gesture_begin',
|
|
577
|
+
pos=_get_event_xy(ev),
|
|
578
|
+
)
|
|
579
|
+
elif t == QtCore.Qt.NativeGestureType.EndNativeGesture:
|
|
580
|
+
self._native_touch_total_rotation = []
|
|
581
|
+
self._native_touch_total_scale = []
|
|
582
|
+
self._vispy_canvas.events.touch(
|
|
583
|
+
type='gesture_end',
|
|
584
|
+
pos=_get_event_xy(ev),
|
|
585
|
+
)
|
|
586
|
+
elif t == QtCore.Qt.NativeGestureType.RotateNativeGesture:
|
|
587
|
+
angle = ev.value()
|
|
588
|
+
last_angle = (
|
|
589
|
+
self._native_gesture_rotation_values[-1]
|
|
590
|
+
if self._native_gesture_rotation_values
|
|
591
|
+
else None
|
|
592
|
+
)
|
|
593
|
+
self._native_gesture_rotation_values.append(angle)
|
|
594
|
+
total_rotation_angle = math.fsum(self._native_gesture_rotation_values)
|
|
595
|
+
self._vispy_canvas.events.touch(
|
|
596
|
+
type="gesture_rotate",
|
|
597
|
+
pos=pos,
|
|
598
|
+
rotation=angle,
|
|
599
|
+
last_rotation=last_angle,
|
|
600
|
+
total_rotation_angle=total_rotation_angle,
|
|
601
|
+
)
|
|
602
|
+
elif t == QtCore.Qt.NativeGestureType.ZoomNativeGesture:
|
|
603
|
+
scale = ev.value()
|
|
604
|
+
last_scale = (
|
|
605
|
+
self._native_gesture_scale_values[-1]
|
|
606
|
+
if self._native_gesture_scale_values
|
|
607
|
+
else None
|
|
608
|
+
)
|
|
609
|
+
self._native_gesture_scale_values.append(scale)
|
|
610
|
+
total_scale_factor = math.fsum(self._native_gesture_scale_values)
|
|
611
|
+
self._vispy_canvas.events.touch(
|
|
612
|
+
type="gesture_zoom",
|
|
613
|
+
pos=pos,
|
|
614
|
+
last_scale=last_scale,
|
|
615
|
+
scale=scale,
|
|
616
|
+
total_scale_factor=total_scale_factor,
|
|
617
|
+
)
|
|
618
|
+
# QtCore.Qt.NativeGestureType.PanNativeGesture
|
|
619
|
+
# Qt6 docs seem to imply this is only supported on Wayland but I have
|
|
620
|
+
# not been able to test it.
|
|
621
|
+
# Two finger pan events are anyway converted to scroll/wheel events.
|
|
622
|
+
# On macOS, more fingers are usually swallowed by the OS (by spaces,
|
|
623
|
+
# mission control, etc.).
|
|
624
|
+
|
|
566
625
|
def event(self, ev):
|
|
567
626
|
out = super(QtBaseCanvasBackend, self).event(ev)
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
if t == qt_event_types.Gesture:
|
|
577
|
-
pinch_gesture = QtCore.Qt.GestureType.PinchGesture if PYQT6_API else QtCore.Qt.PinchGesture
|
|
578
|
-
gesture = ev.gesture(pinch_gesture)
|
|
579
|
-
if gesture:
|
|
580
|
-
(x, y) = _get_qpoint_pos(gesture.centerPoint())
|
|
581
|
-
scale = gesture.scaleFactor()
|
|
582
|
-
last_scale = gesture.lastScaleFactor()
|
|
583
|
-
rotation = gesture.rotationAngle()
|
|
584
|
-
self._vispy_canvas.events.touch(
|
|
585
|
-
type="pinch",
|
|
586
|
-
pos=(x, y),
|
|
587
|
-
last_pos=None,
|
|
588
|
-
scale=scale,
|
|
589
|
-
last_scale=last_scale,
|
|
590
|
-
rotation=rotation,
|
|
591
|
-
total_rotation_angle=gesture.totalRotationAngle(),
|
|
592
|
-
total_scale_factor=gesture.totalScaleFactor(),
|
|
593
|
-
)
|
|
594
|
-
# General touch event.
|
|
595
|
-
elif t == qt_event_types.TouchUpdate:
|
|
596
|
-
if qt_lib == 'pyqt6' or qt_lib == 'pyside6':
|
|
597
|
-
points = ev.points()
|
|
598
|
-
# These variables are lists of (x, y) coordinates.
|
|
599
|
-
pos = [_get_qpoint_pos(p.position()) for p in points]
|
|
600
|
-
lpos = [_get_qpoint_pos(p.lastPosition()) for p in points]
|
|
601
|
-
else:
|
|
602
|
-
points = ev.touchPoints()
|
|
603
|
-
# These variables are lists of (x, y) coordinates.
|
|
604
|
-
pos = [_get_qpoint_pos(p.pos()) for p in points]
|
|
605
|
-
lpos = [_get_qpoint_pos(p.lastPos()) for p in points]
|
|
606
|
-
self._vispy_canvas.events.touch(type='touch',
|
|
607
|
-
pos=pos,
|
|
608
|
-
last_pos=lpos,
|
|
609
|
-
)
|
|
627
|
+
|
|
628
|
+
# QNativeGestureEvent is Qt 5+
|
|
629
|
+
if (
|
|
630
|
+
(QT5_NEW_API or PYSIDE6_API or PYQT6_API)
|
|
631
|
+
and isinstance(ev, QtGui.QNativeGestureEvent)
|
|
632
|
+
):
|
|
633
|
+
self._handle_native_gesture_event(ev)
|
|
634
|
+
|
|
610
635
|
return out
|
|
611
636
|
|
|
612
637
|
def _keyEvent(self, func, ev):
|
vispy/color/color_array.py
CHANGED
|
@@ -21,7 +21,7 @@ def _string_to_rgb(color):
|
|
|
21
21
|
if not color.startswith('#'):
|
|
22
22
|
if color.lower() not in _color_dict:
|
|
23
23
|
raise ValueError('Color "%s" unknown' % color)
|
|
24
|
-
color = _color_dict[color]
|
|
24
|
+
color = _color_dict[color.lower()]
|
|
25
25
|
assert color[0] == '#'
|
|
26
26
|
# hex color
|
|
27
27
|
color = color[1:]
|
|
@@ -163,6 +163,13 @@ class ColorArray(object):
|
|
|
163
163
|
"""Helper to get the class name once it's been created"""
|
|
164
164
|
return cls.__name__
|
|
165
165
|
|
|
166
|
+
def __array__(self, dtype=None):
|
|
167
|
+
"""Get a standard numpy array representing RGBA."""
|
|
168
|
+
rgba = self.rgba
|
|
169
|
+
if dtype is not None:
|
|
170
|
+
rgba = rgba.astype(dtype)
|
|
171
|
+
return rgba
|
|
172
|
+
|
|
166
173
|
def __len__(self):
|
|
167
174
|
return self._rgba.shape[0]
|
|
168
175
|
|
vispy/color/colormap.py
CHANGED
|
@@ -1092,17 +1092,13 @@ _colormaps = dict(
|
|
|
1092
1092
|
)
|
|
1093
1093
|
|
|
1094
1094
|
|
|
1095
|
-
def get_colormap(name
|
|
1096
|
-
"""Obtain a colormap.
|
|
1095
|
+
def get_colormap(name):
|
|
1096
|
+
"""Obtain a colormap by name.
|
|
1097
1097
|
|
|
1098
1098
|
Parameters
|
|
1099
1099
|
----------
|
|
1100
1100
|
name : str | Colormap
|
|
1101
1101
|
Colormap name. Can also be a Colormap for pass-through.
|
|
1102
|
-
*args:
|
|
1103
|
-
Deprecated.
|
|
1104
|
-
**kwargs
|
|
1105
|
-
Deprecated.
|
|
1106
1102
|
|
|
1107
1103
|
Examples
|
|
1108
1104
|
--------
|
|
@@ -1111,18 +1107,10 @@ def get_colormap(name, *args, **kwargs):
|
|
|
1111
1107
|
|
|
1112
1108
|
.. versionchanged: 0.7
|
|
1113
1109
|
|
|
1114
|
-
Additional args/kwargs are no longer accepted. Colormap
|
|
1115
|
-
no longer created on the fly.
|
|
1116
|
-
(``CubeHelixColormap``), ``single_hue`` (``SingleHue``), ``hsl``
|
|
1117
|
-
(``HSL``), ``husl`` (``HSLuv``), ``diverging`` (``Diverging``), or
|
|
1118
|
-
``RdYeBuCy`` (``RedYellowBlueCyan``) colormap you must import and
|
|
1119
|
-
instantiate it directly from the ``vispy.color.colormap`` module.
|
|
1110
|
+
Additional args/kwargs are no longer accepted. Colormap instances are
|
|
1111
|
+
no longer created on the fly.
|
|
1120
1112
|
|
|
1121
1113
|
"""
|
|
1122
|
-
if args or kwargs:
|
|
1123
|
-
warnings.warn("Creating a Colormap instance with 'get_colormap' is "
|
|
1124
|
-
"no longer supported. No additional arguments or "
|
|
1125
|
-
"keyword arguments should be passed.", DeprecationWarning)
|
|
1126
1114
|
if isinstance(name, BaseColormap):
|
|
1127
1115
|
return name
|
|
1128
1116
|
|
|
@@ -1130,14 +1118,6 @@ def get_colormap(name, *args, **kwargs):
|
|
|
1130
1118
|
raise TypeError('colormap must be a Colormap or string name')
|
|
1131
1119
|
if name in _colormaps: # vispy cmap
|
|
1132
1120
|
cmap = _colormaps[name]
|
|
1133
|
-
if name in ("cubehelix", "single_hue", "hsl", "husl", "diverging", "RdYeBuCy"):
|
|
1134
|
-
warnings.warn(
|
|
1135
|
-
f"Colormap '{name}' has been deprecated since vispy 0.7. "
|
|
1136
|
-
f"Please import and create 'vispy.color.colormap.{cmap.__class__.__name__}' "
|
|
1137
|
-
"directly instead.",
|
|
1138
|
-
DeprecationWarning,
|
|
1139
|
-
stacklevel=2,
|
|
1140
|
-
)
|
|
1141
1121
|
|
|
1142
1122
|
elif has_matplotlib(): # matplotlib cmap
|
|
1143
1123
|
try:
|
vispy/geometry/meshdata.py
CHANGED
|
@@ -19,6 +19,56 @@ def _fix_colors(colors):
|
|
|
19
19
|
return colors
|
|
20
20
|
|
|
21
21
|
|
|
22
|
+
def _compute_face_normals(vertices):
|
|
23
|
+
if vertices.shape[1:] != (3, 3):
|
|
24
|
+
raise ValueError("Expected (N, 3, 3) array of vertices repeated on"
|
|
25
|
+
f" the triangle corners, got {vertices.shape}.")
|
|
26
|
+
edges1 = vertices[:, 1] - vertices[:, 0]
|
|
27
|
+
edges2 = vertices[:, 2] - vertices[:, 0]
|
|
28
|
+
return np.cross(edges1, edges2)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _repeat_face_normals_on_corners(normals):
|
|
32
|
+
if normals.shape[1:] != (3,):
|
|
33
|
+
raise ValueError("Expected (F, 3) array of face normals, got"
|
|
34
|
+
f" {normals.shape}.")
|
|
35
|
+
n_corners_in_face = 3
|
|
36
|
+
new_shape = (normals.shape[0], n_corners_in_face, normals.shape[1])
|
|
37
|
+
return np.repeat(normals, n_corners_in_face, axis=0).reshape(new_shape)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _compute_vertex_normals(face_normals, faces, vertices):
|
|
41
|
+
if face_normals.shape[1:] != (3,):
|
|
42
|
+
raise ValueError("Expected (F, 3) array of face normals, got"
|
|
43
|
+
f" {face_normals.shape}.")
|
|
44
|
+
if faces.shape[1:] != (3,):
|
|
45
|
+
raise ValueError("Expected (F, 3) array of face vertex indices, got"
|
|
46
|
+
f" {faces.shape}.")
|
|
47
|
+
if vertices.shape[1:] != (3,):
|
|
48
|
+
raise ValueError("Expected (N, 3) array of vertices, got"
|
|
49
|
+
f" {vertices.shape}.")
|
|
50
|
+
|
|
51
|
+
vertex_normals = np.zeros_like(vertices)
|
|
52
|
+
n_corners_in_triangle = 3
|
|
53
|
+
face_normals_repeated_on_corners = np.repeat(face_normals,
|
|
54
|
+
n_corners_in_triangle,
|
|
55
|
+
axis=0)
|
|
56
|
+
# NOTE: The next line is equivalent to
|
|
57
|
+
#
|
|
58
|
+
# vertex_normals[self._faces.ravel()] += face_normals_repeated_on_corners
|
|
59
|
+
#
|
|
60
|
+
# except that it accumulates the values from the right hand side at
|
|
61
|
+
# repeated indices on the left hand side, instead of overwritting them,
|
|
62
|
+
# like in the above.
|
|
63
|
+
np.add.at(vertex_normals, faces.ravel(), face_normals_repeated_on_corners)
|
|
64
|
+
|
|
65
|
+
norms = np.sqrt((vertex_normals**2).sum(axis=1))
|
|
66
|
+
nonzero_norms = norms > 0
|
|
67
|
+
vertex_normals[nonzero_norms] /= norms[nonzero_norms][:, None]
|
|
68
|
+
|
|
69
|
+
return vertex_normals
|
|
70
|
+
|
|
71
|
+
|
|
22
72
|
class MeshData(object):
|
|
23
73
|
"""
|
|
24
74
|
Class for storing and operating on 3D mesh data.
|
|
@@ -141,7 +191,7 @@ class MeshData(object):
|
|
|
141
191
|
self._compute_edges(indexed='faces')
|
|
142
192
|
return self._edges_indexed_by_faces
|
|
143
193
|
else:
|
|
144
|
-
raise
|
|
194
|
+
raise ValueError("Invalid indexing mode. Accepts: None, 'faces'")
|
|
145
195
|
|
|
146
196
|
def set_faces(self, faces):
|
|
147
197
|
"""Set the faces
|
|
@@ -191,7 +241,7 @@ class MeshData(object):
|
|
|
191
241
|
self._vertices[self.get_faces()]
|
|
192
242
|
return self._vertices_indexed_by_faces
|
|
193
243
|
else:
|
|
194
|
-
raise
|
|
244
|
+
raise ValueError("Invalid indexing mode. Accepts: None, 'faces'")
|
|
195
245
|
|
|
196
246
|
def get_bounds(self):
|
|
197
247
|
"""Get the mesh bounds
|
|
@@ -234,7 +284,7 @@ class MeshData(object):
|
|
|
234
284
|
if verts is not None:
|
|
235
285
|
self._vertices_indexed_by_faces = verts
|
|
236
286
|
else:
|
|
237
|
-
raise
|
|
287
|
+
raise ValueError("Invalid indexing mode. Accepts: None, 'faces'")
|
|
238
288
|
|
|
239
289
|
if reset_normals:
|
|
240
290
|
self.reset_normals()
|
|
@@ -293,22 +343,19 @@ class MeshData(object):
|
|
|
293
343
|
normals : ndarray
|
|
294
344
|
The normals.
|
|
295
345
|
"""
|
|
346
|
+
if indexed not in (None, 'faces'):
|
|
347
|
+
raise ValueError("Invalid indexing mode. Accepts: None, 'faces'")
|
|
348
|
+
|
|
296
349
|
if self._face_normals is None:
|
|
297
|
-
|
|
298
|
-
self._face_normals =
|
|
299
|
-
v[:, 2] - v[:, 0])
|
|
350
|
+
vertices = self.get_vertices(indexed='faces')
|
|
351
|
+
self._face_normals = _compute_face_normals(vertices)
|
|
300
352
|
|
|
301
|
-
if indexed is None:
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
norms[:] = self._face_normals[:, np.newaxis, :]
|
|
308
|
-
self._face_normals_indexed_by_faces = norms
|
|
309
|
-
return self._face_normals_indexed_by_faces
|
|
310
|
-
else:
|
|
311
|
-
raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
|
|
353
|
+
if indexed == 'faces' and self._face_normals_indexed_by_faces is None:
|
|
354
|
+
self._face_normals_indexed_by_faces = \
|
|
355
|
+
_repeat_face_normals_on_corners(self._face_normals)
|
|
356
|
+
|
|
357
|
+
return (self._face_normals if indexed is None
|
|
358
|
+
else self._face_normals_indexed_by_faces)
|
|
312
359
|
|
|
313
360
|
def get_vertex_normals(self, indexed=None):
|
|
314
361
|
"""Get vertex normals
|
|
@@ -326,29 +373,20 @@ class MeshData(object):
|
|
|
326
373
|
normals : ndarray
|
|
327
374
|
The normals.
|
|
328
375
|
"""
|
|
376
|
+
if indexed not in (None, 'faces'):
|
|
377
|
+
raise ValueError("Invalid indexing mode. Accepts: None, 'faces'")
|
|
378
|
+
|
|
329
379
|
if self._vertex_normals is None:
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
faces = vertFaces[vindex]
|
|
336
|
-
if len(faces) == 0:
|
|
337
|
-
self._vertex_normals[vindex] = (0, 0, 0)
|
|
338
|
-
continue
|
|
339
|
-
norms = faceNorms[faces] # get all face normals
|
|
340
|
-
norm = norms.sum(axis=0) # sum normals
|
|
341
|
-
renorm = (norm**2).sum()**0.5
|
|
342
|
-
if renorm > 0:
|
|
343
|
-
norm /= renorm
|
|
344
|
-
self._vertex_normals[vindex] = norm
|
|
380
|
+
face_normals = self.get_face_normals()
|
|
381
|
+
faces = self.get_faces()
|
|
382
|
+
vertices = self.get_vertices()
|
|
383
|
+
self._vertex_normals = _compute_vertex_normals(face_normals, faces,
|
|
384
|
+
vertices)
|
|
345
385
|
|
|
346
386
|
if indexed is None:
|
|
347
387
|
return self._vertex_normals
|
|
348
388
|
elif indexed == 'faces':
|
|
349
389
|
return self._vertex_normals[self.get_faces()]
|
|
350
|
-
else:
|
|
351
|
-
raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
|
|
352
390
|
|
|
353
391
|
def get_vertex_colors(self, indexed=None):
|
|
354
392
|
"""Get vertex colors
|
|
@@ -373,7 +411,7 @@ class MeshData(object):
|
|
|
373
411
|
self._vertex_colors[self.get_faces()]
|
|
374
412
|
return self._vertex_colors_indexed_by_faces
|
|
375
413
|
else:
|
|
376
|
-
raise
|
|
414
|
+
raise ValueError("Invalid indexing mode. Accepts: None, 'faces'")
|
|
377
415
|
|
|
378
416
|
def get_vertex_values(self, indexed=None):
|
|
379
417
|
"""Get vertex colors
|
|
@@ -398,7 +436,7 @@ class MeshData(object):
|
|
|
398
436
|
self._vertex_values[self.get_faces()]
|
|
399
437
|
return self._vertex_values_indexed_by_faces
|
|
400
438
|
else:
|
|
401
|
-
raise
|
|
439
|
+
raise ValueError("Invalid indexing mode. Accepts: None, 'faces'")
|
|
402
440
|
|
|
403
441
|
def set_vertex_colors(self, colors, indexed=None):
|
|
404
442
|
"""Set the vertex color array
|
|
@@ -488,7 +526,7 @@ class MeshData(object):
|
|
|
488
526
|
self._face_colors.reshape(Nf, 1, 4)
|
|
489
527
|
return self._face_colors_indexed_by_faces
|
|
490
528
|
else:
|
|
491
|
-
raise
|
|
529
|
+
raise ValueError("Invalid indexing mode. Accepts: None, 'faces'")
|
|
492
530
|
|
|
493
531
|
def set_face_colors(self, colors, indexed=None):
|
|
494
532
|
"""Set the face color array
|
|
@@ -611,7 +649,7 @@ class MeshData(object):
|
|
|
611
649
|
raise Exception("MeshData cannot generate edges--no faces in "
|
|
612
650
|
"this data.")
|
|
613
651
|
else:
|
|
614
|
-
raise
|
|
652
|
+
raise ValueError("Invalid indexing mode. Accepts: None, 'faces'")
|
|
615
653
|
|
|
616
654
|
def save(self):
|
|
617
655
|
"""Serialize this mesh to a string appropriate for disk storage
|
|
@@ -31,4 +31,76 @@ def test_meshdata():
|
|
|
31
31
|
assert_array_equal(square_edges, mesh.get_edges())
|
|
32
32
|
|
|
33
33
|
|
|
34
|
+
def test_vertex_normals_indexed_none():
|
|
35
|
+
dtype_float = np.float32
|
|
36
|
+
dtype_int = np.int64
|
|
37
|
+
vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
|
|
38
|
+
dtype=dtype_float)
|
|
39
|
+
faces = np.array([[0, 2, 1], [0, 3, 2], [0, 1, 3]], dtype=dtype_int)
|
|
40
|
+
mesh = MeshData(vertices=vertices, faces=faces)
|
|
41
|
+
vertex_normals_unnormalized = np.array(
|
|
42
|
+
[[-1, -1, -1], [0, -1, -1], [-1, 0, -1], [-1, -1, 0]],
|
|
43
|
+
dtype=dtype_float)
|
|
44
|
+
norms = np.sqrt((vertex_normals_unnormalized**2).sum(axis=1,
|
|
45
|
+
keepdims=True))
|
|
46
|
+
expected_vertex_normals = vertex_normals_unnormalized / norms
|
|
47
|
+
|
|
48
|
+
computed_vertex_normals = mesh.get_vertex_normals(indexed=None)
|
|
49
|
+
|
|
50
|
+
assert_array_equal(expected_vertex_normals, computed_vertex_normals)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def test_vertex_normals_indexed_faces():
|
|
54
|
+
dtype_float = np.float32
|
|
55
|
+
dtype_int = np.int64
|
|
56
|
+
vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
|
|
57
|
+
dtype=dtype_float)
|
|
58
|
+
faces = np.array([[0, 2, 1], [0, 3, 2], [0, 1, 3]], dtype=dtype_int)
|
|
59
|
+
mesh = MeshData(vertices=vertices, faces=faces)
|
|
60
|
+
vertex_normals_unnormalized = np.array(
|
|
61
|
+
[[-1, -1, -1], [0, -1, -1], [-1, 0, -1], [-1, -1, 0]],
|
|
62
|
+
dtype=dtype_float)
|
|
63
|
+
norms = np.sqrt((vertex_normals_unnormalized**2).sum(axis=1,
|
|
64
|
+
keepdims=True))
|
|
65
|
+
vertex_normals = vertex_normals_unnormalized / norms
|
|
66
|
+
expected_vertex_normals = vertex_normals[faces]
|
|
67
|
+
|
|
68
|
+
computed_vertex_normals = mesh.get_vertex_normals(indexed="faces")
|
|
69
|
+
|
|
70
|
+
assert_array_equal(expected_vertex_normals, computed_vertex_normals)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def test_face_normals_indexed_none():
|
|
74
|
+
dtype_float = np.float32
|
|
75
|
+
dtype_int = np.int64
|
|
76
|
+
vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
|
|
77
|
+
dtype=dtype_float)
|
|
78
|
+
faces = np.array([[0, 2, 1], [0, 3, 2], [0, 1, 3]], dtype=dtype_int)
|
|
79
|
+
mesh = MeshData(vertices=vertices, faces=faces)
|
|
80
|
+
expected_face_normals = np.array([[0, 0, -1], [-1, 0, 0], [0, -1, 0]],
|
|
81
|
+
dtype=dtype_float)
|
|
82
|
+
|
|
83
|
+
computed_face_normals = mesh.get_face_normals(indexed=None)
|
|
84
|
+
|
|
85
|
+
assert_array_equal(expected_face_normals, computed_face_normals)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def test_face_normals_indexed_faces():
|
|
89
|
+
dtype_float = np.float32
|
|
90
|
+
dtype_int = np.int64
|
|
91
|
+
vertices = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
|
|
92
|
+
dtype=dtype_float)
|
|
93
|
+
faces = np.array([[0, 2, 1], [0, 3, 2], [0, 1, 3]], dtype=dtype_int)
|
|
94
|
+
mesh = MeshData(vertices=vertices, faces=faces)
|
|
95
|
+
expected_face_normals = np.array([
|
|
96
|
+
[[0, 0, -1], [0, 0, -1], [0, 0, -1]],
|
|
97
|
+
[[-1, 0, 0], [-1, 0, 0], [-1, 0, 0]],
|
|
98
|
+
[[0, -1, 0], [0, -1, 0], [0, -1, 0]]],
|
|
99
|
+
dtype=dtype_float)
|
|
100
|
+
|
|
101
|
+
computed_face_normals = mesh.get_face_normals(indexed="faces")
|
|
102
|
+
|
|
103
|
+
assert_array_equal(expected_face_normals, computed_face_normals)
|
|
104
|
+
|
|
105
|
+
|
|
34
106
|
run_tests_if_main()
|
vispy/gloo/gl/_constants.py
CHANGED
|
@@ -326,7 +326,7 @@ GL_ZERO = Enum('GL_ZERO', 0)
|
|
|
326
326
|
|
|
327
327
|
|
|
328
328
|
ENUM_MAP = {}
|
|
329
|
-
for ob in list(globals().
|
|
330
|
-
if
|
|
329
|
+
for var_name, ob in list(globals().items()):
|
|
330
|
+
if var_name.startswith('GL_'):
|
|
331
331
|
ENUM_MAP[int(ob)] = ob
|
|
332
|
-
del ob
|
|
332
|
+
del ob, var_name
|
vispy/gloo/program.py
CHANGED
|
@@ -502,7 +502,7 @@ class Program(GLObject):
|
|
|
502
502
|
sizes = [a.size for a in attrs]
|
|
503
503
|
if not all(s == sizes[0] for s in sizes[1:]):
|
|
504
504
|
msg = '\n'.join([f'{str(a)}: {a.size}' for a in attrs])
|
|
505
|
-
raise RuntimeError('All attributes must have the same size, got:\n{msg}')
|
|
505
|
+
raise RuntimeError(f'All attributes must have the same size, got:\n{msg}')
|
|
506
506
|
|
|
507
507
|
attrs_with_div = [a for a in attributes if a not in attrs]
|
|
508
508
|
if attrs_with_div:
|
vispy/gloo/texture.py
CHANGED
|
@@ -33,17 +33,22 @@ def convert_dtype_and_clip(data, dtype, copy=False):
|
|
|
33
33
|
else:
|
|
34
34
|
# to reduce copying, we clip into a pre-generated array of the right dtype
|
|
35
35
|
new_data = np.empty_like(data, dtype=dtype)
|
|
36
|
-
|
|
36
|
+
# allow "unsafe" casting here as we're explicitly clipping to the
|
|
37
|
+
# range of the new dtype - this was a default before numpy 1.25
|
|
38
|
+
np.clip(data, new_min, new_max, out=new_data, casting="unsafe")
|
|
37
39
|
return new_data
|
|
38
40
|
|
|
39
41
|
|
|
40
|
-
def downcast_to_32bit_if_needed(data, copy=False):
|
|
42
|
+
def downcast_to_32bit_if_needed(data, copy=False, dtype=None):
|
|
41
43
|
"""Downcast to 32bit dtype if necessary."""
|
|
42
|
-
dtype
|
|
44
|
+
if dtype is None:
|
|
45
|
+
dtype = data.dtype
|
|
46
|
+
dtype = np.dtype(dtype)
|
|
43
47
|
if dtype.itemsize > 4:
|
|
44
48
|
warnings.warn(
|
|
45
49
|
f"GPUs can't support dtypes bigger than 32-bit, but got '{dtype}'. "
|
|
46
|
-
"Precision will be lost due to downcasting to 32-bit."
|
|
50
|
+
"Precision will be lost due to downcasting to 32-bit.",
|
|
51
|
+
stacklevel=2,
|
|
47
52
|
)
|
|
48
53
|
|
|
49
54
|
size = min(dtype.itemsize, 4)
|
|
@@ -133,6 +133,8 @@ class BaseCamera(Node):
|
|
|
133
133
|
viewbox.events.mouse_release.connect(self.viewbox_mouse_event)
|
|
134
134
|
viewbox.events.mouse_move.connect(self.viewbox_mouse_event)
|
|
135
135
|
viewbox.events.mouse_wheel.connect(self.viewbox_mouse_event)
|
|
136
|
+
viewbox.events.gesture_zoom.connect(self.viewbox_mouse_event)
|
|
137
|
+
viewbox.events.gesture_rotate.connect(self.viewbox_mouse_event)
|
|
136
138
|
viewbox.events.resize.connect(self.viewbox_resize_event)
|
|
137
139
|
# todo: also add key events! (and also on viewbox (they're missing)
|
|
138
140
|
|
|
@@ -144,6 +146,8 @@ class BaseCamera(Node):
|
|
|
144
146
|
viewbox.events.mouse_release.disconnect(self.viewbox_mouse_event)
|
|
145
147
|
viewbox.events.mouse_move.disconnect(self.viewbox_mouse_event)
|
|
146
148
|
viewbox.events.mouse_wheel.disconnect(self.viewbox_mouse_event)
|
|
149
|
+
viewbox.events.gesture_zoom.disconnect(self.viewbox_mouse_event)
|
|
150
|
+
viewbox.events.gesture_rotate.disconnect(self.viewbox_mouse_event)
|
|
147
151
|
viewbox.events.resize.disconnect(self.viewbox_resize_event)
|
|
148
152
|
|
|
149
153
|
@property
|
vispy/scene/cameras/panzoom.py
CHANGED
|
@@ -207,7 +207,10 @@ class PanZoomCamera(BaseCamera):
|
|
|
207
207
|
center = self._scene_transform.imap(event.pos)
|
|
208
208
|
self.zoom((1 + self.zoom_factor)**(-event.delta[1] * 30), center)
|
|
209
209
|
event.handled = True
|
|
210
|
-
|
|
210
|
+
elif event.type == 'gesture_zoom':
|
|
211
|
+
center = self._scene_transform.imap(event.pos)
|
|
212
|
+
self.zoom(1 - event.scale, center)
|
|
213
|
+
event.handled = True
|
|
211
214
|
elif event.type == 'mouse_move':
|
|
212
215
|
if event.press_event is None:
|
|
213
216
|
return
|
|
@@ -62,6 +62,12 @@ class PerspectiveCamera(BaseCamera):
|
|
|
62
62
|
if self._distance is not None:
|
|
63
63
|
self._distance *= s
|
|
64
64
|
self.view_changed()
|
|
65
|
+
elif event.type == 'gesture_zoom':
|
|
66
|
+
s = 1 - event.scale
|
|
67
|
+
self._scale_factor *= s
|
|
68
|
+
if self._distance is not None:
|
|
69
|
+
self._distance *= s
|
|
70
|
+
self.view_changed()
|
|
65
71
|
|
|
66
72
|
@property
|
|
67
73
|
def scale_factor(self):
|
|
@@ -82,4 +82,41 @@ def test_panzoom_center():
|
|
|
82
82
|
assert v.camera.center == (-12.8, -12.8, 0)
|
|
83
83
|
|
|
84
84
|
|
|
85
|
+
@requires_application()
|
|
86
|
+
def test_panzoom_gesture_zoom():
|
|
87
|
+
with TestingCanvas(size=(120, 200)) as canvas:
|
|
88
|
+
view = canvas.central_widget.add_view()
|
|
89
|
+
imdata = io.load_crate().astype('float32') / 255
|
|
90
|
+
scene.visuals.Image(imdata, parent=view.scene)
|
|
91
|
+
view.camera = scene.PanZoomCamera(aspect=1)
|
|
92
|
+
|
|
93
|
+
assert view.camera.rect.size == (1, 1)
|
|
94
|
+
|
|
95
|
+
canvas.events.touch(
|
|
96
|
+
type="gesture_zoom",
|
|
97
|
+
pos=(60, 100),
|
|
98
|
+
scale=-1.0,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
assert view.camera.rect.size == (2, 2)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@requires_application()
|
|
105
|
+
def test_turntable_gesture_zoom():
|
|
106
|
+
with TestingCanvas(size=(120, 200)) as canvas:
|
|
107
|
+
view = canvas.central_widget.add_view()
|
|
108
|
+
imdata = io.load_crate().astype('float32') / 255
|
|
109
|
+
scene.visuals.Image(imdata, parent=view.scene)
|
|
110
|
+
view.camera = scene.TurntableCamera()
|
|
111
|
+
|
|
112
|
+
initial_scale_factor = view.camera.scale_factor
|
|
113
|
+
canvas.events.touch(
|
|
114
|
+
type="gesture_zoom",
|
|
115
|
+
pos=(60, 100),
|
|
116
|
+
scale=-1.0,
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
assert view.camera.scale_factor == 2 * initial_scale_factor
|
|
120
|
+
|
|
121
|
+
|
|
85
122
|
run_tests_if_main()
|