neuromeka-vfm 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuromeka_vfm/__init__.py +0 -2
- neuromeka_vfm/generate_mesh.py +122 -0
- {neuromeka_vfm-0.1.5.dist-info → neuromeka_vfm-0.1.6.dist-info}/METADATA +89 -73
- {neuromeka_vfm-0.1.5.dist-info → neuromeka_vfm-0.1.6.dist-info}/RECORD +8 -8
- {neuromeka_vfm-0.1.5.dist-info → neuromeka_vfm-0.1.6.dist-info}/WHEEL +1 -1
- neuromeka_vfm/grasp_gen.py +0 -79
- {neuromeka_vfm-0.1.5.dist-info → neuromeka_vfm-0.1.6.dist-info}/entry_points.txt +0 -0
- {neuromeka_vfm-0.1.5.dist-info → neuromeka_vfm-0.1.6.dist-info}/licenses/LICENSE +0 -0
- {neuromeka_vfm-0.1.5.dist-info → neuromeka_vfm-0.1.6.dist-info}/top_level.txt +0 -0
neuromeka_vfm/__init__.py
CHANGED
|
@@ -2,7 +2,6 @@ from .pose_estimation import PoseEstimation, FoundationPoseClient
|
|
|
2
2
|
from .upload_mesh import upload_mesh
|
|
3
3
|
from .segmentation import Segmentation, NrmkRealtimeSegmentation
|
|
4
4
|
from .compression import STRATEGIES as SEGMENTATION_COMPRESSION_STRATEGIES
|
|
5
|
-
from .grasp_gen import GraspPoseGeneration
|
|
6
5
|
|
|
7
6
|
__all__ = [
|
|
8
7
|
"PoseEstimation",
|
|
@@ -11,5 +10,4 @@ __all__ = [
|
|
|
11
10
|
"Segmentation",
|
|
12
11
|
"NrmkRealtimeSegmentation",
|
|
13
12
|
"SEGMENTATION_COMPRESSION_STRATEGIES",
|
|
14
|
-
"GraspPoseGeneration",
|
|
15
13
|
]
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility to generate simple parametric meshes (currently rectangular box) as binary STL.
|
|
3
|
+
|
|
4
|
+
Design goals
|
|
5
|
+
- Units: meters
|
|
6
|
+
- Origin: object center at (0, 0, 0)
|
|
7
|
+
- Axes: faces aligned to +/-X, +/-Y, +/-Z
|
|
8
|
+
- Output: binary STL saved to /opt/meshes (docker volume mount)
|
|
9
|
+
|
|
10
|
+
Usage (programmatic):
|
|
11
|
+
from backend.generate_mesh import write_box_stl
|
|
12
|
+
path = write_box_stl("custom_box.stl", width=0.054, depth=0.097, height=0.054)
|
|
13
|
+
|
|
14
|
+
CLI (optional):
|
|
15
|
+
python -m backend.generate_mesh box custom_box.stl 0.054 0.097 0.054
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from __future__ import annotations
|
|
19
|
+
|
|
20
|
+
import struct
|
|
21
|
+
import sys
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Iterable, Tuple
|
|
24
|
+
|
|
25
|
+
MESH_DIR = Path("/opt/meshes")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _ensure_dir(path: Path) -> None:
|
|
29
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _pack_triangle(normal: Iterable[float], v1: Iterable[float], v2: Iterable[float], v3: Iterable[float]) -> bytes:
|
|
33
|
+
"""Pack one triangle (normal + 3 vertices) into binary STL record."""
|
|
34
|
+
return struct.pack(
|
|
35
|
+
"<12fH",
|
|
36
|
+
*normal,
|
|
37
|
+
*v1,
|
|
38
|
+
*v2,
|
|
39
|
+
*v3,
|
|
40
|
+
0, # attribute byte count
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _box_triangles(width: float, depth: float, height: float):
|
|
45
|
+
"""Generate normals and vertices for a box centered at origin."""
|
|
46
|
+
hx, hy, hz = width / 2.0, depth / 2.0, height / 2.0
|
|
47
|
+
# 8 vertices
|
|
48
|
+
p = [
|
|
49
|
+
(-hx, -hy, -hz),
|
|
50
|
+
(hx, -hy, -hz),
|
|
51
|
+
(hx, hy, -hz),
|
|
52
|
+
(-hx, hy, -hz),
|
|
53
|
+
(-hx, -hy, hz),
|
|
54
|
+
(hx, -hy, hz),
|
|
55
|
+
(hx, hy, hz),
|
|
56
|
+
(-hx, hy, hz),
|
|
57
|
+
]
|
|
58
|
+
# Each face: two triangles (ccw when looking from outside)
|
|
59
|
+
faces = [
|
|
60
|
+
((-1, 0, 0), (0, 1, 3, 7, 4)), # -X
|
|
61
|
+
((1, 0, 0), (1, 2, 6, 5)), # +X
|
|
62
|
+
((0, -1, 0), (0, 1, 5, 4)), # -Y
|
|
63
|
+
((0, 1, 0), (3, 2, 6, 7)), # +Y
|
|
64
|
+
((0, 0, -1), (0, 1, 2, 3)), # -Z
|
|
65
|
+
((0, 0, 1), (4, 5, 6, 7)), # +Z
|
|
66
|
+
]
|
|
67
|
+
for normal, idx in faces:
|
|
68
|
+
if len(idx) == 4:
|
|
69
|
+
a, b, c, d = idx
|
|
70
|
+
# two triangles: (a,b,c) and (a,c,d)
|
|
71
|
+
yield normal, p[a], p[b], p[c]
|
|
72
|
+
yield normal, p[a], p[c], p[d]
|
|
73
|
+
else:
|
|
74
|
+
raise ValueError("Face index must have 4 vertices.")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def write_box_stl(filename: str, width: float, depth: float, height: float) -> Path:
|
|
78
|
+
"""
|
|
79
|
+
Create a rectangular box STL.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
filename: output file name (placed under /opt/meshes). If only a name is
|
|
83
|
+
given, it is resolved relative to MESH_DIR.
|
|
84
|
+
width, depth, height: box dimensions in meters (must be > 0).
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Path to the written STL file.
|
|
88
|
+
"""
|
|
89
|
+
if width <= 0 or depth <= 0 or height <= 0:
|
|
90
|
+
raise ValueError("width, depth, height must be positive.")
|
|
91
|
+
|
|
92
|
+
out_path = Path(filename)
|
|
93
|
+
if not out_path.is_absolute():
|
|
94
|
+
out_path = MESH_DIR / out_path
|
|
95
|
+
_ensure_dir(out_path)
|
|
96
|
+
|
|
97
|
+
triangles = list(_box_triangles(width, depth, height))
|
|
98
|
+
header = b"rect_box_stl" + b"\0" * (80 - len("rect_box_stl"))
|
|
99
|
+
with out_path.open("wb") as f:
|
|
100
|
+
f.write(header)
|
|
101
|
+
f.write(struct.pack("<I", len(triangles)))
|
|
102
|
+
for tri in triangles:
|
|
103
|
+
f.write(_pack_triangle(*tri))
|
|
104
|
+
return out_path
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _cli(args: list[str]) -> int:
|
|
108
|
+
if len(args) != 5 or args[0].lower() != "box":
|
|
109
|
+
print("Usage: python -m backend.generate_mesh box <filename> <width> <depth> <height>")
|
|
110
|
+
return 1
|
|
111
|
+
_, fname, w, d, h = args
|
|
112
|
+
try:
|
|
113
|
+
path = write_box_stl(fname, float(w), float(d), float(h))
|
|
114
|
+
except Exception as exc: # noqa: BLE001
|
|
115
|
+
print(f"Error: {exc}")
|
|
116
|
+
return 1
|
|
117
|
+
print(f"STL written to: {path}")
|
|
118
|
+
return 0
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
if __name__ == "__main__":
|
|
122
|
+
sys.exit(_cli(sys.argv[1:]))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: neuromeka_vfm
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.6
|
|
4
4
|
Summary: Client utilities for Neuromeka VFM FoundationPose RPC (upload meshes, call server)
|
|
5
5
|
Author: Neuromeka
|
|
6
6
|
License: MIT License
|
|
@@ -47,50 +47,50 @@ Dynamic: license-file
|
|
|
47
47
|
|
|
48
48
|
# neuromeka_vfm
|
|
49
49
|
|
|
50
|
-
|
|
50
|
+
A lightweight client SDK for communicating with Segmentation (SAM2, Grounding DINO) and Pose Estimation (NVIDIA FoundationPose) servers over RPC/ZeroMQ. It also provides SSH/SFTP utilities to upload mesh files to the host.
|
|
51
51
|
|
|
52
52
|
- Website: http://www.neuromeka.com
|
|
53
|
-
- Source code: https://github.com/neuromeka-robotics/neuromeka_vfm
|
|
54
53
|
- PyPI package: https://pypi.org/project/neuromeka_vfm/
|
|
55
54
|
- Documents: https://docs.neuromeka.com
|
|
56
55
|
|
|
57
|
-
## Web UI (VFM Tester)를 통해 사용 가능
|
|
58
|
-
|
|
59
|
-
- VFM Tester (Web UI): https://gitlab.com/neuromeka-group/nrmkq/nrmk_vfm_tester
|
|
60
|
-
|
|
61
|
-
|
|
62
56
|
## Installation
|
|
57
|
+
|
|
63
58
|
```bash
|
|
64
59
|
pip install neuromeka_vfm
|
|
65
60
|
```
|
|
66
61
|
|
|
67
|
-
## Python API (
|
|
62
|
+
## Python API (usage by example)
|
|
68
63
|
|
|
69
|
-
|
|
70
|
-
|
|
64
|
+
- Client PC: the machine running your application with this package installed.
|
|
65
|
+
- Host PC: the machine running Segmentation and Pose Estimation Docker servers. If you run Docker locally, use `localhost`.
|
|
71
66
|
|
|
72
67
|
### Segmentation
|
|
68
|
+
|
|
73
69
|
```python
|
|
74
70
|
from neuromeka_vfm import Segmentation
|
|
75
71
|
|
|
76
72
|
seg = Segmentation(
|
|
77
|
-
hostname="192.168.10.63",
|
|
73
|
+
hostname="192.168.10.63",
|
|
78
74
|
port=5432,
|
|
79
75
|
compression_strategy="png", # none | png | jpeg | h264
|
|
80
76
|
)
|
|
81
77
|
|
|
82
|
-
#
|
|
78
|
+
# Register using an image prompt
|
|
83
79
|
seg.add_image_prompt("drug_box", ref_rgb)
|
|
84
|
-
seg.register_first_frame(
|
|
85
|
-
|
|
86
|
-
|
|
80
|
+
seg.register_first_frame(
|
|
81
|
+
frame=first_rgb,
|
|
82
|
+
prompt="drug_box", # ID string
|
|
83
|
+
use_image_prompt=True,
|
|
84
|
+
)
|
|
87
85
|
|
|
88
|
-
#
|
|
89
|
-
seg.register_first_frame(
|
|
90
|
-
|
|
91
|
-
|
|
86
|
+
# Register using a text prompt
|
|
87
|
+
seg.register_first_frame(
|
|
88
|
+
frame=first_rgb,
|
|
89
|
+
prompt="box .", # Text prompt (must end with " .")
|
|
90
|
+
use_image_prompt=False,
|
|
91
|
+
)
|
|
92
92
|
|
|
93
|
-
#
|
|
93
|
+
# SAM2 tracking on the registered mask(s)
|
|
94
94
|
resp = seg.get_next(next_rgb)
|
|
95
95
|
if isinstance(resp, dict) and resp.get("result") == "ERROR":
|
|
96
96
|
print(f"Tracking error: {resp.get('message')}")
|
|
@@ -98,7 +98,7 @@ if isinstance(resp, dict) and resp.get("result") == "ERROR":
|
|
|
98
98
|
else:
|
|
99
99
|
masks = resp
|
|
100
100
|
|
|
101
|
-
# Segmentation
|
|
101
|
+
# Segmentation settings / model selection (nrmk_realtime_segmentation v0.2+)
|
|
102
102
|
caps = seg.get_capabilities()["data"]
|
|
103
103
|
current = seg.get_config()["data"]
|
|
104
104
|
seg.set_config(
|
|
@@ -126,15 +126,26 @@ seg.set_config(
|
|
|
126
126
|
}
|
|
127
127
|
)
|
|
128
128
|
|
|
129
|
-
#
|
|
129
|
+
# Remove an object (v0.2+, only when use_legacy=False)
|
|
130
130
|
seg.remove_object("cup_0")
|
|
131
131
|
|
|
132
|
-
|
|
133
132
|
seg.close()
|
|
134
133
|
```
|
|
135
134
|
|
|
136
|
-
|
|
137
|
-
|
|
135
|
+
Additional Segmentation APIs and behaviors
|
|
136
|
+
|
|
137
|
+
- `benchmark=True` in the constructor enables timing counters (`call_time`, `call_count`) for `add_image_prompt`, `register_first_frame`, and `get_next`.
|
|
138
|
+
- `switch_compression_strategy()` lets you change the compression strategy at runtime.
|
|
139
|
+
- `register_first_frame()` returns `True`/`False` and raises `ValueError` if image prompts are missing when `use_image_prompt=True`.
|
|
140
|
+
- `register_first_frame()` accepts a list of prompt IDs when `use_image_prompt=True`.
|
|
141
|
+
- `get_next()` returns `None` if called before registration; it can also return the server error dict when available.
|
|
142
|
+
- `reset()` performs a server-side reset, while `finish()` clears only local state.
|
|
143
|
+
- Exposed state: `tracking_object_ids`, `current_frame_masks`, `invisible_object_ids`.
|
|
144
|
+
- Backward-compat alias: `NrmkRealtimeSegmentation`.
|
|
145
|
+
|
|
146
|
+
#### Segmentation v0.2 config summary (defaults/choices)
|
|
147
|
+
`seg.get_capabilities()` can differ depending on server configuration. The following reflects v0.2 defaults.
|
|
148
|
+
|
|
138
149
|
```yaml
|
|
139
150
|
grounding_dino:
|
|
140
151
|
backbone:
|
|
@@ -187,70 +198,82 @@ sam2:
|
|
|
187
198
|
default: false
|
|
188
199
|
```
|
|
189
200
|
|
|
190
|
-
#### Segmentation v0.2
|
|
191
|
-
|
|
192
|
-
- SAM2 `
|
|
193
|
-
-
|
|
194
|
-
-
|
|
195
|
-
-
|
|
201
|
+
#### Segmentation v0.2 notes and changes
|
|
202
|
+
|
|
203
|
+
- If SAM2 VRAM estimation fails, `seg.get_next()` may return `{"result":"ERROR"}`. Handle the error and call `reset` before re-registering.
|
|
204
|
+
- `compile=True` can slow down first-frame registration and `reset`.
|
|
205
|
+
- CPU offloading is most effective when both `offload_state_to_cpu=True` and `offload_video_to_cpu=True` are set (legacy mode does not support `offload_video_to_cpu`).
|
|
206
|
+
- `remove_object` is supported only when `use_legacy=False`.
|
|
207
|
+
- GroundingDINO added the Swin-B backbone and fixed prompt-token merge issues.
|
|
196
208
|
|
|
197
209
|
### Pose Estimation
|
|
198
210
|
|
|
199
|
-
**Mesh
|
|
211
|
+
**Mesh upload**: Upload the mesh file (STL) to `/opt/meshes/` on the host PC. You can also use SSH directly.
|
|
212
|
+
|
|
200
213
|
```python
|
|
201
214
|
from neuromeka_vfm import upload_mesh
|
|
215
|
+
|
|
202
216
|
upload_mesh(
|
|
203
217
|
host="192.168.10.63",
|
|
204
218
|
user="user",
|
|
205
|
-
password="pass",
|
|
206
|
-
local="mesh/my_mesh.stl", #
|
|
207
|
-
remote="/opt/meshes/my_mesh.stl", #
|
|
219
|
+
password="pass",
|
|
220
|
+
local="mesh/my_mesh.stl", # local mesh path
|
|
221
|
+
remote="/opt/meshes/my_mesh.stl", # host mesh path (Docker volume)
|
|
208
222
|
)
|
|
209
223
|
```
|
|
210
224
|
|
|
211
|
-
|
|
225
|
+
Initialization
|
|
226
|
+
|
|
212
227
|
```python
|
|
213
228
|
from neuromeka_vfm import PoseEstimation
|
|
214
229
|
|
|
215
|
-
pose = PoseEstimation(host="192.168.10.72", port=5557)
|
|
230
|
+
pose = PoseEstimation(host="192.168.10.72", port=5557)
|
|
216
231
|
|
|
217
232
|
pose.init(
|
|
218
233
|
mesh_path="/app/modules/foundation_pose/mesh/my_mesh.stl",
|
|
219
|
-
apply_scale=1.0,
|
|
234
|
+
apply_scale=1.0,
|
|
220
235
|
track_refine_iter=3,
|
|
221
236
|
min_n_views=40,
|
|
222
|
-
inplane_step=60
|
|
237
|
+
inplane_step=60,
|
|
223
238
|
)
|
|
224
239
|
```
|
|
225
|
-
|
|
226
|
-
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
-
|
|
231
|
-
-
|
|
232
|
-
-
|
|
233
|
-
-
|
|
234
|
-
-
|
|
235
|
-
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
240
|
+
|
|
241
|
+
- mesh_path: path to the mesh file (STL/OBJ). Initialization fails if missing.
|
|
242
|
+
- apply_scale: scalar applied after loading the mesh.
|
|
243
|
+
- STL in meters: 1.0 (no scaling)
|
|
244
|
+
- STL in centimeters: 0.01 (1 cm -> 0.01 m)
|
|
245
|
+
- STL in millimeters: 0.001 (1 mm -> 0.001 m)
|
|
246
|
+
- force_apply_color: if True, forces a solid color when the mesh lacks color data.
|
|
247
|
+
- apply_color: RGB tuple (0-255) used when `force_apply_color=True`.
|
|
248
|
+
- est_refine_iter: number of refinement iterations during registration (higher = more accurate, slower).
|
|
249
|
+
- track_refine_iter: number of refinement iterations per frame during tracking.
|
|
250
|
+
- min_n_views: minimum number of sampled camera views (affects rotation candidates).
|
|
251
|
+
- inplane_step: in-plane rotation step in degrees (smaller = more candidates).
|
|
252
|
+
|
|
253
|
+
Registration and tracking
|
|
254
|
+
|
|
239
255
|
```python
|
|
240
|
-
#
|
|
256
|
+
# Registration (server defaults when iteration is omitted, check_vram=True pre-checks VRAM)
|
|
241
257
|
register_resp = pose.register(rgb=rgb0, depth=depth0, mask=mask0, K=cam_K, check_vram=True)
|
|
242
258
|
|
|
243
|
-
#
|
|
259
|
+
# Tracking (optionally limit search area with bbox_xywh)
|
|
244
260
|
track_resp = pose.track(rgb=rgb1, depth=depth1, K=cam_K, bbox_xywh=bbox_xywh)
|
|
261
|
+
|
|
245
262
|
pose.close()
|
|
246
263
|
```
|
|
247
|
-
- cam_K: camera intrinsic
|
|
248
|
-
- RGB resolution이 크거나, min_n_views 값이 크거나, inplane_step이 작을 경우 GPU VRAM 초과 에러 발생.
|
|
249
|
-
- register check_vram=True 일 경우 VRAM 초과 사전 검사하여 shutdown 방지.
|
|
250
264
|
|
|
265
|
+
- cam_K: camera intrinsics.
|
|
266
|
+
- Large RGB resolution, large `min_n_views`, or small `inplane_step` can cause GPU VRAM errors.
|
|
267
|
+
- `check_vram=True` in `register` performs a pre-check to prevent server shutdown due to OOM.
|
|
268
|
+
- `iteration` in `register`/`track` can override the server default if provided.
|
|
269
|
+
- `reset()` resets the server state; `reset_object()` reuses the cached mesh to rebuild the rotation grid.
|
|
270
|
+
- Default host/port can come from `FPOSE_HOST` and `FPOSE_PORT` environment variables.
|
|
271
|
+
- Backward-compat alias: `FoundationPoseClient`.
|
|
272
|
+
|
|
273
|
+
<!--
|
|
274
|
+
## Benchmark
|
|
251
275
|
|
|
252
|
-
|
|
253
|
-
로컬 서버 구동 시 측정. 빈칸은 아직 미측정 항목입니다.
|
|
276
|
+
Measured on local servers. Empty cells are not yet measured.
|
|
254
277
|
|
|
255
278
|
**RTX 5060**
|
|
256
279
|
| Task | Prompt | None (s) | JPEG (s) | PNG (s) | h264 (s) |
|
|
@@ -269,17 +292,10 @@ pose.close()
|
|
|
269
292
|
| SAM2 | - | | | | |
|
|
270
293
|
| FoundationPose registration | - | 0.4 | - | | |
|
|
271
294
|
| FoundationPose track | - | 0.03 | | | |
|
|
295
|
+
-->
|
|
272
296
|
|
|
273
|
-
|
|
274
|
-
| Task | Prompt | None (s) | JPEG (s) | PNG (s) | h264 (s) |
|
|
275
|
-
| --- | --- | --- | --- | --- | --- |
|
|
276
|
-
| Grounding DINO | text (human . cup .) | | | | |
|
|
277
|
-
| DINOv2 | image prompt | | | | |
|
|
278
|
-
| SAM2 | - | | | | |
|
|
279
|
-
| FoundationPose registration | - | 0.4 | - | | |
|
|
280
|
-
| FoundationPose track | - | 0.03 | | | |
|
|
297
|
+
## Release notes
|
|
281
298
|
|
|
282
|
-
|
|
283
|
-
- 0.1.
|
|
284
|
-
- 0.1.
|
|
285
|
-
- 0.1.0: 초기 공개 버전. FoundationPose RPC 클라이언트, 실시간 세그멘테이션 클라이언트, SSH 기반 mesh 업로드 CLI/API 포함.
|
|
299
|
+
- 0.1.2: Improved success detection for Segmentation responses (`result`/`success`/`status`), fixed image prompt registration/usage, added `check_vram` to PoseEstimation `register`.
|
|
300
|
+
- 0.1.1: Improved resource cleanup in PoseEstimation/Segmentation, use server defaults when iteration is omitted, added pose demo example.
|
|
301
|
+
- 0.1.0: Initial public release. Includes FoundationPose RPC client, real-time segmentation client, SSH-based mesh upload CLI/API.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
neuromeka_vfm/__init__.py,sha256=
|
|
1
|
+
neuromeka_vfm/__init__.py,sha256=h5ODdWFgN7a9TBzF6Qfdyx5VxUr2hG0pFTwq57jEvDo,422
|
|
2
2
|
neuromeka_vfm/compression.py,sha256=d2xOz4XBJZ60pPSXwQ5LPYwhpsaNORvNoY_0CUiAvt0,5191
|
|
3
|
-
neuromeka_vfm/
|
|
3
|
+
neuromeka_vfm/generate_mesh.py,sha256=HV2dUfVXROPQ9kDDPmkwdn5E5gelWIQDsPsZuvZxI6E,3634
|
|
4
4
|
neuromeka_vfm/pickle_client.py,sha256=Iw2fpxdnKB20oEUgsd0rJlvzOd5JhetphpKkF9qQcX0,591
|
|
5
5
|
neuromeka_vfm/point_cloud_utils.py,sha256=ZnCh8Xg6pLGoyi5ufZkz59HzE9RuRdihE8z-XNYT1PA,13261
|
|
6
6
|
neuromeka_vfm/pose_estimation.py,sha256=3MUVhL0nMcpHApZDAzutS7fINPHcb-tu_WoXvNGU33E,2625
|
|
@@ -8,9 +8,9 @@ neuromeka_vfm/segmentation.py,sha256=8kmMut_gNJ3wa9F0l7iEYFNqHJzHJ5KPBzs7vSiwjqg
|
|
|
8
8
|
neuromeka_vfm/upload_mesh.py,sha256=aW5G9aE5OeiDN5pEVKDzMeV538U-I2iRYZvVZTfGsr4,2728
|
|
9
9
|
neuromeka_vfm/examples/__init__.py,sha256=dEhb0FqhpEGNmg0pMunmrTlViIcxvd95fYEjZ49IOTQ,37
|
|
10
10
|
neuromeka_vfm/examples/pose_demo.py,sha256=zq1Z0_kxQc4CB-ltfwm_oMoC7JLoN5GyeE3C6jKGQKw,13658
|
|
11
|
-
neuromeka_vfm-0.1.
|
|
12
|
-
neuromeka_vfm-0.1.
|
|
13
|
-
neuromeka_vfm-0.1.
|
|
14
|
-
neuromeka_vfm-0.1.
|
|
15
|
-
neuromeka_vfm-0.1.
|
|
16
|
-
neuromeka_vfm-0.1.
|
|
11
|
+
neuromeka_vfm-0.1.6.dist-info/licenses/LICENSE,sha256=40cBWxFahhu0p_EB0GhU8oVIifVNmH1o2fZtx0bIif8,1076
|
|
12
|
+
neuromeka_vfm-0.1.6.dist-info/METADATA,sha256=zHBuQ5sHDJjOdY96dmxLiwtE8OHIRJg0VcyzUB0rV48,10628
|
|
13
|
+
neuromeka_vfm-0.1.6.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
14
|
+
neuromeka_vfm-0.1.6.dist-info/entry_points.txt,sha256=Wl4XqiUt_GLQ08oTJtsYjLW0iYxZ52ysVd1-cN0kYP4,72
|
|
15
|
+
neuromeka_vfm-0.1.6.dist-info/top_level.txt,sha256=uAH_yXikUvxXTSEnUC0M8Zl5ggxbnkYtXlmTfEG8MUk,14
|
|
16
|
+
neuromeka_vfm-0.1.6.dist-info/RECORD,,
|
neuromeka_vfm/grasp_gen.py
DELETED
|
@@ -1,79 +0,0 @@
|
|
|
1
|
-
from typing import Tuple
|
|
2
|
-
|
|
3
|
-
import numpy as np
|
|
4
|
-
import trimesh
|
|
5
|
-
|
|
6
|
-
from . import point_cloud_utils
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class GraspPoseGeneration:
|
|
10
|
-
"""
|
|
11
|
-
Wrapper class for point cloud utilities used in grasp pose workflows.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
def knn_points(self, X: np.ndarray, K: int, norm: int):
|
|
15
|
-
return point_cloud_utils.knn_points(X=X, K=K, norm=norm)
|
|
16
|
-
|
|
17
|
-
def point_cloud_outlier_removal(
|
|
18
|
-
self, obj_pc: np.ndarray, threshold: float = 0.014, K: int = 20
|
|
19
|
-
) -> Tuple[np.ndarray, np.ndarray]:
|
|
20
|
-
return point_cloud_utils.point_cloud_outlier_removal(
|
|
21
|
-
obj_pc=obj_pc, threshold=threshold, K=K
|
|
22
|
-
)
|
|
23
|
-
|
|
24
|
-
def point_cloud_outlier_removal_with_color(
|
|
25
|
-
self,
|
|
26
|
-
obj_pc: np.ndarray,
|
|
27
|
-
obj_pc_color: np.ndarray,
|
|
28
|
-
threshold: float = 0.014,
|
|
29
|
-
K: int = 20,
|
|
30
|
-
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
|
31
|
-
return point_cloud_utils.point_cloud_outlier_removal_with_color(
|
|
32
|
-
obj_pc=obj_pc,
|
|
33
|
-
obj_pc_color=obj_pc_color,
|
|
34
|
-
threshold=threshold,
|
|
35
|
-
K=K,
|
|
36
|
-
)
|
|
37
|
-
|
|
38
|
-
def depth_and_segmentation_to_point_clouds(
|
|
39
|
-
self,
|
|
40
|
-
depth_image: np.ndarray,
|
|
41
|
-
segmentation_mask: np.ndarray,
|
|
42
|
-
fx: float,
|
|
43
|
-
fy: float,
|
|
44
|
-
cx: float,
|
|
45
|
-
cy: float,
|
|
46
|
-
rgb_image: np.ndarray = None,
|
|
47
|
-
target_object_id: int = 1,
|
|
48
|
-
remove_object_from_scene: bool = False,
|
|
49
|
-
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
|
50
|
-
return point_cloud_utils.depth_and_segmentation_to_point_clouds(
|
|
51
|
-
depth_image=depth_image,
|
|
52
|
-
segmentation_mask=segmentation_mask,
|
|
53
|
-
fx=fx,
|
|
54
|
-
fy=fy,
|
|
55
|
-
cx=cx,
|
|
56
|
-
cy=cy,
|
|
57
|
-
rgb_image=rgb_image,
|
|
58
|
-
target_object_id=target_object_id,
|
|
59
|
-
remove_object_from_scene=remove_object_from_scene,
|
|
60
|
-
)
|
|
61
|
-
|
|
62
|
-
def filter_colliding_grasps(
|
|
63
|
-
self,
|
|
64
|
-
scene_pc: np.ndarray,
|
|
65
|
-
grasp_poses: np.ndarray,
|
|
66
|
-
gripper_collision_mesh: trimesh.Trimesh,
|
|
67
|
-
collision_threshold: float = 0.002,
|
|
68
|
-
num_collision_samples: int = 2000,
|
|
69
|
-
) -> np.ndarray:
|
|
70
|
-
return point_cloud_utils.filter_colliding_grasps(
|
|
71
|
-
scene_pc=scene_pc,
|
|
72
|
-
grasp_poses=grasp_poses,
|
|
73
|
-
gripper_collision_mesh=gripper_collision_mesh,
|
|
74
|
-
collision_threshold=collision_threshold,
|
|
75
|
-
num_collision_samples=num_collision_samples,
|
|
76
|
-
)
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
__all__ = ["GraspPoseGeneration"]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|