willow-runtime 5.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Willow Dynamics
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,133 @@
1
+ Metadata-Version: 2.4
2
+ Name: willow-runtime
3
+ Version: 5.1.0
4
+ Summary: Willow 5 Runtime: Action Recognition, Retargeting, & Edge Evaluator
5
+ Author: Willow Dynamics
6
+ License: MIT
7
+ Project-URL: Homepage, https://willowdynamics.com
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE.txt
11
+ Requires-Dist: numpy>=1.21.0
12
+ Requires-Dist: numba>=0.56.0
13
+ Requires-Dist: requests>=2.25.0
14
+ Dynamic: license-file
15
+
16
+ # Willow 5 Runtime SDK
17
+
18
+ The official Python SDK for **Willow Dynamics**.
19
+
20
+ This SDK acts as the bridge between the Willow Cloud Oracle and your local environment. It enables **Zero-Shot Action Recognition**, **Kinematic Retargeting**, and **Physics Evaluation** on edge devices, cloud pipelines, and simulation clusters.
21
+
22
+ It is designed to be dependency-light (No AWS/Boto3 required) and privacy-first (Models run in ephemeral RAM).
23
+
24
+ ---
25
+
26
+ ## Installation
27
+
28
+ ```bash
29
+ pip install willow-runtime
30
+ ```
31
+
32
+ ---
33
+
34
+ ## 1. Quick Start: Model Provisioning
35
+
36
+ Securely fetch your proprietary action models from the Willow Cloud.
37
+
38
+ ```python
39
+ from willow import WillowClient
40
+
41
+ # Initialize with your API Key
42
+ client = WillowClient(api_key="YOUR_WILLOW_API_KEY")
43
+
44
+ # Option A: Stream directly to RAM (Secure / Cloud / Ephemeral)
45
+ # The model never touches the physical disk.
46
+ model = client.get_model("tactical-reload-v1")
47
+
48
+ # Option B: Download to Disk (Offline / Edge / Air-Gapped)
49
+ client.download_model("tactical-reload-v1", "./models/reload.int8")
50
+ ```
51
+
52
+ ---
53
+
54
+ ## 2. Usage: Active Streaming (Real-Time)
55
+
56
+ Best for **Robotics**, **Webcams**, or **Smart Gyms** where frames arrive one by one. The `step()` method manages an internal sliding window buffer for zero-latency triggering.
57
+
58
+ ```python
59
+ from willow import WillowClient, WillowDetector
60
+
61
+ # 1. Load Model
62
+ client = WillowClient(api_key="YOUR_API_KEY")
63
+ model = client.get_model("tactical-reload-v1")
64
+ detector = WillowDetector(model)
65
+
66
+ # 2. The Real-Time Loop
67
+ # Assume 'get_next_frame()' returns a (75, 3) numpy array of skeletal data
68
+ while True:
69
+ current_skeleton, timestamp_ms = get_next_frame()
70
+
71
+ # .step() executes in <2ms on modern CPUs
72
+ event = detector.step(current_skeleton, timestamp_ms)
73
+
74
+ if event:
75
+ print(f"!!! ACTION DETECTED !!!")
76
+ print(f"Timestamp: {event['end_ms']}ms | Confidence: {event['confidence']:.2f}")
77
+
78
+ # Trigger Actuator / Robot / UI here
79
+ ```
80
+
81
+ ---
82
+
83
+ ## 3. Usage: Batch Analysis (Passive)
84
+
85
+ Best for **Data Science**, **Historical Video Processing**, or **Cloud ETL** pipelines.
86
+
87
+ ```python
88
+ from willow import WillowClient, WillowDetector
89
+ import numpy as np
90
+
91
+ # 1. Load Model
92
+ client = WillowClient(api_key="YOUR_API_KEY")
93
+ model = client.get_model("golf-swing-v4")
94
+ detector = WillowDetector(model)
95
+
96
+ # 2. Load Data (e.g., from an uploaded video file processed by MediaPipe)
97
+ # Shape: (Frames, 75, 3)
98
+ full_sequence = np.load("recording_data.npy")
99
+ timestamps = [t * 33 for t in range(len(full_sequence))]
100
+
101
+ # 3. Detect All Occurrences
102
+ matches = detector.detect(full_sequence, timestamps)
103
+
104
+ print(f"Found {len(matches)} events:")
105
+ for m in matches:
106
+ print(f" - {m['start_ms']}ms to {m['end_ms']}ms (Conf: {m['confidence']:.2f})")
107
+ ```
108
+
109
+ ---
110
+
111
+ ## 4. Usage: Sim-to-Real & Robotics
112
+
113
+ Best for **Reinforcement Learning (RL)**, **NVIDIA Isaac Sim**, and **Humanoid Teleoperation**. Bridges the gap between Computer Vision coordinates and Robotics standards.
114
+
115
+ ```python
116
+ from willow import CoordinateBridge, KinematicRetargeter
117
+
118
+ # 1. Convert Coordinate Space
119
+ # MediaPipe (Y-Down) -> ROS/Isaac Sim (Z-Up)
120
+ ros_ready_sequence = CoordinateBridge.to_ros_z_up(raw_skeleton_sequence)
121
+
122
+ # 2. Extract Joint Angles for RL Training
123
+ # Returns dictionary of 1D angle arrays (e.g., "right_elbow_flexion")
124
+ joint_angles = KinematicRetargeter.extract_joint_angles(ros_ready_sequence)
125
+
126
+ print(f"Extracted {len(joint_angles)} joint features for simulation training.")
127
+ ```
128
+
129
+ ---
130
+
131
+ ## License
132
+
133
+ MIT License. Copyright (c) 2025 Willow Dynamics.
@@ -0,0 +1,118 @@
1
+ # Willow 5 Runtime SDK
2
+
3
+ The official Python SDK for **Willow Dynamics**.
4
+
5
+ This SDK acts as the bridge between the Willow Cloud Oracle and your local environment. It enables **Zero-Shot Action Recognition**, **Kinematic Retargeting**, and **Physics Evaluation** on edge devices, cloud pipelines, and simulation clusters.
6
+
7
+ It is designed to be dependency-light (No AWS/Boto3 required) and privacy-first (Models run in ephemeral RAM).
8
+
9
+ ---
10
+
11
+ ## Installation
12
+
13
+ ```bash
14
+ pip install willow-runtime
15
+ ```
16
+
17
+ ---
18
+
19
+ ## 1. Quick Start: Model Provisioning
20
+
21
+ Securely fetch your proprietary action models from the Willow Cloud.
22
+
23
+ ```python
24
+ from willow import WillowClient
25
+
26
+ # Initialize with your API Key
27
+ client = WillowClient(api_key="YOUR_WILLOW_API_KEY")
28
+
29
+ # Option A: Stream directly to RAM (Secure / Cloud / Ephemeral)
30
+ # The model never touches the physical disk.
31
+ model = client.get_model("tactical-reload-v1")
32
+
33
+ # Option B: Download to Disk (Offline / Edge / Air-Gapped)
34
+ client.download_model("tactical-reload-v1", "./models/reload.int8")
35
+ ```
36
+
37
+ ---
38
+
39
+ ## 2. Usage: Active Streaming (Real-Time)
40
+
41
+ Best for **Robotics**, **Webcams**, or **Smart Gyms** where frames arrive one by one. The `step()` method manages an internal sliding window buffer for zero-latency triggering.
42
+
43
+ ```python
44
+ from willow import WillowClient, WillowDetector
45
+
46
+ # 1. Load Model
47
+ client = WillowClient(api_key="YOUR_API_KEY")
48
+ model = client.get_model("tactical-reload-v1")
49
+ detector = WillowDetector(model)
50
+
51
+ # 2. The Real-Time Loop
52
+ # Assume 'get_next_frame()' returns a (75, 3) numpy array of skeletal data
53
+ while True:
54
+ current_skeleton, timestamp_ms = get_next_frame()
55
+
56
+ # .step() executes in <2ms on modern CPUs
57
+ event = detector.step(current_skeleton, timestamp_ms)
58
+
59
+ if event:
60
+ print(f"!!! ACTION DETECTED !!!")
61
+ print(f"Timestamp: {event['end_ms']}ms | Confidence: {event['confidence']:.2f}")
62
+
63
+ # Trigger Actuator / Robot / UI here
64
+ ```
65
+
66
+ ---
67
+
68
+ ## 3. Usage: Batch Analysis (Passive)
69
+
70
+ Best for **Data Science**, **Historical Video Processing**, or **Cloud ETL** pipelines.
71
+
72
+ ```python
73
+ from willow import WillowClient, WillowDetector
74
+ import numpy as np
75
+
76
+ # 1. Load Model
77
+ client = WillowClient(api_key="YOUR_API_KEY")
78
+ model = client.get_model("golf-swing-v4")
79
+ detector = WillowDetector(model)
80
+
81
+ # 2. Load Data (e.g., from an uploaded video file processed by MediaPipe)
82
+ # Shape: (Frames, 75, 3)
83
+ full_sequence = np.load("recording_data.npy")
84
+ timestamps = [t * 33 for t in range(len(full_sequence))]
85
+
86
+ # 3. Detect All Occurrences
87
+ matches = detector.detect(full_sequence, timestamps)
88
+
89
+ print(f"Found {len(matches)} events:")
90
+ for m in matches:
91
+ print(f" - {m['start_ms']}ms to {m['end_ms']}ms (Conf: {m['confidence']:.2f})")
92
+ ```
93
+
94
+ ---
95
+
96
+ ## 4. Usage: Sim-to-Real & Robotics
97
+
98
+ Best for **Reinforcement Learning (RL)**, **NVIDIA Isaac Sim**, and **Humanoid Teleoperation**. Bridges the gap between Computer Vision coordinates and Robotics standards.
99
+
100
+ ```python
101
+ from willow import CoordinateBridge, KinematicRetargeter
102
+
103
+ # 1. Convert Coordinate Space
104
+ # MediaPipe (Y-Down) -> ROS/Isaac Sim (Z-Up)
105
+ ros_ready_sequence = CoordinateBridge.to_ros_z_up(raw_skeleton_sequence)
106
+
107
+ # 2. Extract Joint Angles for RL Training
108
+ # Returns dictionary of 1D angle arrays (e.g., "right_elbow_flexion")
109
+ joint_angles = KinematicRetargeter.extract_joint_angles(ros_ready_sequence)
110
+
111
+ print(f"Extracted {len(joint_angles)} joint features for simulation training.")
112
+ ```
113
+
114
+ ---
115
+
116
+ ## License
117
+
118
+ MIT License. Copyright (c) 2025 Willow Dynamics.
@@ -0,0 +1,20 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "willow-runtime"
7
+ version = "5.1.0"
8
+ description = "Willow 5 Runtime: Action Recognition, Retargeting, & Edge Evaluator"
9
+ readme = "README.md"
10
+ authors = [{ name = "Willow Dynamics" }]
11
+ license = { text = "MIT" }
12
+ requires-python = ">=3.8"
13
+ dependencies = [
14
+ "numpy>=1.21.0",
15
+ "numba>=0.56.0",
16
+ "requests>=2.25.0"
17
+ ]
18
+
19
+ [project.urls]
20
+ Homepage = "https://willowdynamics.com"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,93 @@
1
+ import struct
2
+ import numpy as np
3
+ from willow import WillowDetector, load_local_model, parse_int8_model
4
+ from willow import CoordinateBridge, KinematicRetargeter, PhysicsEvaluator
5
+
6
+ def create_mock_int8_model():
7
+ # Model: Version 40, Torso Bitmask (2), 5 frames
8
+ # Scale: 1.2
9
+ header = struct.pack('<IIffff', 40, 2, 1.2, 0.25, 3.0, 0.20)
10
+ # Features: 127 (Int8 Max) -> Dequantized to 1.2
11
+ mock_features = np.ones((5, 6), dtype=np.int8) * 127
12
+ return header + mock_features.tobytes()
13
+
14
+ def test_willow_pipeline():
15
+ print("--- RUNNING WILLOW 5 SDK TEST SUITE ---")
16
+
17
+ # 1. PARSERS
18
+ model = parse_int8_model(create_mock_int8_model())
19
+ assert model.config.version == 40
20
+ print("✓ RAM-Only Parsing Passed")
21
+
22
+ # 2. GENERATE DATA
23
+ test_seq = np.zeros((20, 75, 3), dtype=np.float32)
24
+ timestamps = [t * 33 for t in range(20)]
25
+
26
+ # Base Pose (Rest)
27
+ for f in range(20):
28
+ test_seq[f, 11] = [-0.5, 1.5, 0.0] # L Shoulder
29
+ test_seq[f, 12] = [ 0.5, 1.5, 0.0] # R Shoulder
30
+ test_seq[f, 23] = [-0.3, 0.5, 0.0] # L Hip (Torso Len = 1.0)
31
+ test_seq[f, 24] = [ 0.3, 0.5, 0.0] # R Hip
32
+
33
+ # Inject Perfect Match at frames 8-12
34
+ if 8 <= f <= 12:
35
+ # We need Normalized Distances = 1.2
36
+ # Torso Length = 1.0 (Shoulder Y 1.5 - Hip Y 0.5)
37
+ # So we need Absolute Distances = 1.2
38
+
39
+ # Widen Shoulders to 1.2 width (-0.6 to 0.6)
40
+ test_seq[f, 11] = [-0.6, 1.5, 0.0]
41
+ test_seq[f, 12] = [ 0.6, 1.5, 0.0]
42
+
43
+ # Widen Hips to 1.2 width (-0.6 to 0.6)
44
+ test_seq[f, 23] = [-0.6, 0.5, 0.0]
45
+ test_seq[f, 24] = [ 0.6, 0.5, 0.0]
46
+
47
+ # Vertical Dists (11-23) = 1.0.
48
+ # This will result in features [1.2, 1.2, 1.0, 1.0, 1.56, 1.56]
49
+ # This is closer to the model [1.2, 1.2, 1.2, 1.2, 1.2, 1.2]
50
+ # than the previous data.
51
+
52
+ # 3. DETECTOR (PASSIVE BATCH)
53
+ detector = WillowDetector(model)
54
+ batch_detections = detector.detect(test_seq, timestamps)
55
+
56
+ if len(batch_detections) == 0:
57
+ print("DEBUG: No detections found. Check NMS logic or Data Scaling.")
58
+
59
+ assert len(batch_detections) > 0
60
+ print("✓ Passive Batch Detection Passed")
61
+
62
+ # 4. DETECTOR (ACTIVE STREAMING)
63
+ detector_live = WillowDetector(model)
64
+ live_detections = []
65
+ for frame, ts in zip(test_seq, timestamps):
66
+ match = detector_live.step(frame, ts)
67
+ if match:
68
+ live_detections.append(match)
69
+
70
+ assert len(live_detections) > 0
71
+ print("✓ Active Real-Time Streaming Passed")
72
+
73
+ # 5. SPATIAL TRANSFORMS
74
+ ros_seq = CoordinateBridge.to_ros_z_up(test_seq)
75
+ # Check Y -> -Z translation (1.5 becomes -1.5)
76
+ assert ros_seq[0, 11, 2] == -1.5
77
+ print("✓ Coordinate Bridging Passed")
78
+
79
+ # 6. KINEMATIC RETARGETING
80
+ angles = KinematicRetargeter.extract_joint_angles(test_seq)
81
+ assert "left_elbow_flexion" in angles
82
+ print("✓ Sim-to-Real IK Passed")
83
+
84
+ # 7. PHYSICS EVALUATOR
85
+ right_wrist_traj = test_seq[:, 16, :3]
86
+ physics = PhysicsEvaluator.calculate_derivatives(right_wrist_traj, fps=30.0)
87
+ assert "peak_jerk" in physics
88
+ print("✓ Edge Physics Evaluator Passed")
89
+
90
+ print("--- ALL TESTS PASSED ---")
91
+
92
+ if __name__ == "__main__":
93
+ test_willow_pipeline()
@@ -0,0 +1,14 @@
1
+ from .types import WillowConfig, WillowModel, ZONES
2
+ from .client import WillowClient
3
+ from .parsers import parse_int8_model, parse_json_model, load_local_model
4
+ from .detector import WillowDetector
5
+ from .transforms import CoordinateBridge
6
+ from .retargeting import KinematicRetargeter
7
+ from .evaluator import PhysicsEvaluator
8
+
9
+ __all__ =[
10
+ "WillowConfig", "WillowModel", "ZONES",
11
+ "WillowClient", "WillowDetector",
12
+ "CoordinateBridge", "KinematicRetargeter", "PhysicsEvaluator",
13
+ "load_local_model", "parse_int8_model", "parse_json_model"
14
+ ]
@@ -0,0 +1,82 @@
1
+ import base64
2
+ import json
3
+ import requests
4
+ from io import BytesIO
5
+ from typing import Union
6
+
7
+ from .types import WillowModel
8
+ from .parsers import parse_int8_model, parse_json_model
9
+
10
+ class WillowClient:
11
+ """
12
+ The official Willow API Client for Model Provisioning.
13
+ Handles secure retrieval of action recognition models for local or edge execution.
14
+ """
15
+
16
+ # Defaults to the Master AWS Oracle Gateway
17
+ DEFAULT_API_URL = "https://55zydxbe05.execute-api.us-east-2.amazonaws.com"
18
+
19
+ def __init__(self, api_key: str, base_url: str = DEFAULT_API_URL):
20
+ """
21
+ Initializes the client.
22
+ :param api_key: Your Willow API Key.
23
+ :param base_url: The base URL of the Willow API.
24
+ """
25
+ self.api_key = api_key
26
+ self.base_url = base_url.rstrip('/')
27
+ self.headers = {
28
+ "Authorization": f"Bearer {self.api_key}",
29
+ "Accept": "application/json"
30
+ }
31
+
32
+ def _fetch_model_payload(self, model_id: str, format_type: str) -> Union[bytes, dict]:
33
+ """Internal method to handle the HTTP request and Base64 decoding."""
34
+ url = f"{self.base_url}/export"
35
+
36
+ # Legacy architecture routing abstraction:
37
+ # 'shopifyCustomerId' acts as the API identifier credential for the Oracle backend.
38
+ params = {
39
+ "analysisId": model_id,
40
+ "shopifyCustomerId": self.api_key,
41
+ "format": format_type
42
+ }
43
+
44
+ response = requests.get(url, params=params, headers=self.headers)
45
+
46
+ if response.status_code != 200:
47
+ raise ConnectionError(f"Failed to fetch model {model_id}: {response.status_code} - {response.text}")
48
+
49
+ if format_type == "int8":
50
+ # The Willow Backend returns the binary payload as a Base64 string
51
+ return base64.b64decode(response.text)
52
+ else:
53
+ # Standard JSON payload
54
+ return response.json()
55
+
56
+ def get_model(self, model_id: str) -> WillowModel:
57
+ """
58
+ ON-DEMAND EPHEMERAL RAM:
59
+ Fetches the highly-optimized .int8 model directly into an ephemeral memory buffer.
60
+ The model is parsed and ready for inference, never touching the physical disk.
61
+ """
62
+ raw_bytes = self._fetch_model_payload(model_id, format_type="int8")
63
+ memory_buffer = BytesIO(raw_bytes)
64
+ return parse_int8_model(memory_buffer)
65
+
66
+ def download_model(self, model_id: str, dest_path: str, format_type: str = "int8") -> str:
67
+ """
68
+ STORE LOCALLY:
69
+ Fetches the model via the API and writes it to the local disk for offline usage.
70
+ :param dest_path: The file path to save the model to (e.g., './models/reload.int8').
71
+ :param format_type: 'int8' (recommended) or 'json'.
72
+ """
73
+ data = self._fetch_model_payload(model_id, format_type)
74
+
75
+ mode = 'wb' if format_type == "int8" else 'w'
76
+ with open(dest_path, mode) as f:
77
+ if format_type == "int8":
78
+ f.write(data)
79
+ else:
80
+ json.dump(data, f, indent=2)
81
+
82
+ return dest_path
@@ -0,0 +1,124 @@
1
+ import numpy as np
2
+ from typing import List, Dict, Optional
3
+ from .types import WillowModel
4
+ from .math_kernels import fast_streaming_dtw_continuous, extract_rdm_signature
5
+
6
+ class WillowDetector:
7
+ """
8
+ Core Runtime Engine for executing Zero-Shot Action Recognition locally.
9
+ """
10
+ def __init__(self, model: WillowModel):
11
+ self.model = model
12
+ self.cfg = model.config
13
+
14
+ # State architecture for the Real-Time Streaming functionality
15
+ self._max_buffer_size = self.model.signature.shape[0] * 3
16
+ self._frame_buffer =[]
17
+ self._timestamp_buffer =[]
18
+ self._last_emitted_end_ms = -1
19
+
20
+ def detect(self, skeleton_sequence: np.ndarray, timestamps_ms: List[int]) -> List[Dict]:
21
+ """
22
+ PASSIVE MODE: Batch execution. Processes a full video array at once.
23
+ skeleton_sequence: Expected shape (Frames, 75, 3) or (Frames, 75, 4).
24
+ """
25
+ if skeleton_sequence.shape[0] != len(timestamps_ms):
26
+ raise ValueError("Skeleton sequence length must match timestamps array length.")
27
+
28
+ # 1. RDM Feature Extraction (Torso-Normalized)
29
+ test_sig = extract_rdm_signature(skeleton_sequence, self.cfg.zone_bitmask)
30
+ if len(test_sig) == 0:
31
+ return[]
32
+
33
+ # 2. Continuous DTW Kernel
34
+ costs, lengths = fast_streaming_dtw_continuous(test_sig, self.model.signature)
35
+
36
+ # Normalize costs to similarities (0.0 to 1.0)
37
+ similarities = np.clip(1.0 - (costs / self.cfg.dtw_sensitivity), 0.0, 1.0)
38
+
39
+ occurrences =[]
40
+ n_frames = len(similarities)
41
+
42
+ # 3. NMS with Terminal Peak Logic (Robust to Plateaus)
43
+ for i in range(1, n_frames):
44
+ # Check if this frame is a local maximum (Allowing plateaus via >=)
45
+ is_local_max = similarities[i] >= similarities[i-1]
46
+
47
+ # Check falling edge (Strictly greater than next, or end of sequence)
48
+ # This ensures we pick the *end* of a plateau as the event timestamp
49
+ is_falling_edge = (i == n_frames - 1) or (similarities[i] > similarities[i+1])
50
+
51
+ if similarities[i] >= 0.50 and is_local_max and is_falling_edge:
52
+ match_len = lengths[i]
53
+ start_idx = max(0, i - match_len)
54
+
55
+ occurrences.append({
56
+ "start_ms": timestamps_ms[start_idx],
57
+ "end_ms": timestamps_ms[i],
58
+ "confidence": float(similarities[i]),
59
+ "start_idx": start_idx,
60
+ "end_idx": i
61
+ })
62
+
63
+ # 4. Temporal Overlap Suppression (NMS)
64
+ occurrences.sort(key=lambda x: x['confidence'], reverse=True)
65
+ final_occurrences =[]
66
+
67
+ for occ in occurrences:
68
+ is_overlapping = False
69
+ occ_dur = max(1, occ['end_ms'] - occ['start_ms'])
70
+
71
+ for fin in final_occurrences:
72
+ latest_start = max(occ['start_ms'], fin['start_ms'])
73
+ earliest_end = min(occ['end_ms'], fin['end_ms'])
74
+ overlap_dur = max(0, earliest_end - latest_start)
75
+
76
+ if overlap_dur > 0:
77
+ overlap_ratio = overlap_dur / occ_dur
78
+ if overlap_ratio > self.cfg.overlap_tolerance:
79
+ is_overlapping = True
80
+ break
81
+
82
+ if not is_overlapping:
83
+ final_occurrences.append(occ)
84
+
85
+ # Return chronologically sorted
86
+ final_occurrences.sort(key=lambda x: x['start_ms'])
87
+
88
+ # Clean internal indices for pristine API output
89
+ for o in final_occurrences:
90
+ del o['start_idx']
91
+ del o['end_idx']
92
+
93
+ return final_occurrences
94
+
95
+ def step(self, current_frame_skeleton: np.ndarray, timestamp_ms: int) -> Optional[Dict]:
96
+ """
97
+ ACTIVE MODE: Real-Time Streaming loop for live webcam or robotics edge processing.
98
+ Ingests a single frame (75, 3) and yields an event the millisecond an action completes.
99
+ """
100
+ # 1. Manage Ring Buffer
101
+ self._frame_buffer.append(current_frame_skeleton)
102
+ self._timestamp_buffer.append(timestamp_ms)
103
+
104
+ if len(self._frame_buffer) > self._max_buffer_size:
105
+ self._frame_buffer.pop(0)
106
+ self._timestamp_buffer.pop(0)
107
+
108
+ # Needs minimum history to run DTW
109
+ if len(self._frame_buffer) < self.model.signature.shape[0]:
110
+ return None
111
+
112
+ # 2. Execute Batch Detect on the sliding window
113
+ window_arr = np.array(self._frame_buffer)
114
+ window_matches = self.detect(window_arr, self._timestamp_buffer)
115
+
116
+ # 3. Filter for Zero-Latency Events
117
+ for match in window_matches:
118
+ # If the match completed EXACTLY on this millisecond, emit it.
119
+ # And ensure we haven't already emitted it due to sliding window overlap.
120
+ if match['end_ms'] == timestamp_ms and match['end_ms'] > self._last_emitted_end_ms:
121
+ self._last_emitted_end_ms = match['end_ms']
122
+ return match
123
+
124
+ return None
@@ -0,0 +1,35 @@
1
+ import numpy as np
2
+
3
+ class PhysicsEvaluator:
4
+ """
5
+ Ports the Da Vinci Backend's physics derivatives to the Edge.
6
+ Allows real-time coaching apps to evaluate speed, acceleration, and jerk.
7
+ """
8
+
9
+ @staticmethod
10
+ def calculate_derivatives(end_effector_trajectory: np.ndarray, fps: float) -> dict:
11
+ """
12
+ Calculates Speed (m/s), Acceleration (m/s^2), and Jerk (m/s^3)
13
+ for a specific 3D trajectory sequence.
14
+ """
15
+ dt = 1.0 / fps if fps > 0 else 0.033
16
+
17
+ # 1st Derivative: Velocity
18
+ velocity = np.gradient(end_effector_trajectory, dt, axis=0)
19
+ speed = np.linalg.norm(velocity, axis=1)
20
+
21
+ # 2nd Derivative: Acceleration
22
+ acceleration = np.gradient(velocity, dt, axis=0)
23
+ accel_mag = np.linalg.norm(acceleration, axis=1)
24
+
25
+ # 3rd Derivative: Jerk (Smoothness penalty)
26
+ jerk = np.gradient(acceleration, dt, axis=0)
27
+ jerk_mag = np.linalg.norm(jerk, axis=1)
28
+
29
+ return {
30
+ "speed_mps": speed,
31
+ "acceleration_mps2": accel_mag,
32
+ "jerk_mps3": jerk_mag,
33
+ "peak_speed": float(np.max(speed)),
34
+ "peak_jerk": float(np.max(jerk_mag))
35
+ }
@@ -0,0 +1,106 @@
1
+ import numpy as np
2
+ from numba import jit
3
+ from .types import ZONES
4
+
5
+ @jit(nopython=True)
6
+ def fast_streaming_dtw_continuous(test_seq, seed_seq):
7
+ """
8
+ Space-Optimized O(M) Subsequence DTW.
9
+ Returns the normalized cost vector and match length for every frame.
10
+ """
11
+ N = test_seq.shape[0]
12
+ M = seed_seq.shape[0]
13
+
14
+ cost_array = np.full(N, np.inf)
15
+ length_array = np.zeros(N, dtype=np.int32)
16
+ if M == 0 or N == 0:
17
+ return cost_array, length_array
18
+
19
+ D_prev = np.full(M + 1, np.inf)
20
+ D_curr = np.full(M + 1, np.inf)
21
+ D_prev[0] = 0.0
22
+
23
+ S_prev = np.zeros(M + 1, dtype=np.int32)
24
+ S_curr = np.zeros(M + 1, dtype=np.int32)
25
+
26
+ for i in range(1, N + 1):
27
+ D_curr[0] = 0.0
28
+ S_curr[0] = i - 1
29
+
30
+ for j in range(1, M + 1):
31
+ dist = 0.0
32
+ for k in range(test_seq.shape[1]):
33
+ diff = test_seq[i-1, k] - seed_seq[j-1, k]
34
+ dist += diff * diff
35
+ cost = np.sqrt(dist)
36
+
37
+ if D_prev[j-1] <= D_prev[j] and D_prev[j-1] <= D_curr[j-1]:
38
+ prev_cost, S_curr[j] = D_prev[j-1], S_prev[j-1]
39
+ elif D_prev[j] <= D_curr[j-1]:
40
+ prev_cost, S_curr[j] = D_prev[j], S_prev[j]
41
+ else:
42
+ prev_cost, S_curr[j] = D_curr[j-1], S_curr[j-1]
43
+
44
+ D_curr[j] = cost + prev_cost
45
+
46
+ # Save the normalized alignment cost ending at THIS specific frame
47
+ cost_array[i-1] = D_curr[M] / M
48
+ length_array[i-1] = (i - 1) - S_curr[M]
49
+
50
+ for j in range(M + 1):
51
+ D_prev[j] = D_curr[j]
52
+ S_prev[j] = S_curr[j]
53
+
54
+ return cost_array, length_array
55
+
56
+ def extract_rdm_signature(skeleton_sequence: np.ndarray, bitmask: int) -> np.ndarray:
57
+ """
58
+ Extracts the Scale-Invariant Relative Distance Matrix (RDM).
59
+ Strictly adheres to the Torso-Length Normalization mandate.
60
+ """
61
+ # Resolve active joint indices based on binary bitmask
62
+ active_indices =[]
63
+ for name, (bit, indices) in ZONES.items():
64
+ if bitmask & bit:
65
+ active_indices.extend(indices)
66
+
67
+ if not active_indices:
68
+ active_indices = ZONES["torso"][1] # Failsafe
69
+
70
+ n_joints = len(active_indices)
71
+ dim = int(n_joints * (n_joints - 1) / 2)
72
+ frames = skeleton_sequence.shape[0]
73
+ has_vis = skeleton_sequence.shape[2] == 4 if frames > 0 else False
74
+
75
+ signature = np.zeros((frames, dim), dtype=np.float32)
76
+ CONF_THRESH = 0.5
77
+
78
+ for f in range(frames):
79
+ lms = skeleton_sequence[f]
80
+ if np.all(lms == 0):
81
+ continue
82
+
83
+ # --- WILLOW 5 DIRECTIVE: Strict Torso-Length Normalization ---
84
+ # 3D distance between the midpoint of shoulders (11, 12) and hips (23, 24)
85
+ mid_shoulder = (lms[11][:3] + lms[12][:3]) / 2.0
86
+ mid_hip = (lms[23][:3] + lms[24][:3]) / 2.0
87
+ torso_length = np.linalg.norm(mid_shoulder - mid_hip)
88
+
89
+ scale = max(torso_length, 0.01) # Avoid division by zero
90
+
91
+ idx = 0
92
+ for i in range(n_joints):
93
+ for j in range(i + 1, n_joints):
94
+ idx1, idx2 = active_indices[i], active_indices[j]
95
+
96
+ v1 = lms[idx1][3] if has_vis else 1.0
97
+ v2 = lms[idx2][3] if has_vis else 1.0
98
+
99
+ if (v1 * v2) >= (CONF_THRESH * CONF_THRESH):
100
+ dist = np.linalg.norm(lms[idx1][:3] - lms[idx2][:3]) / scale
101
+ signature[f, idx] = dist
102
+ else:
103
+ signature[f, idx] = 0.0 # Mask out low confidence
104
+ idx += 1
105
+
106
+ return signature
@@ -0,0 +1,87 @@
1
+ import os
2
+ import json
3
+ import struct
4
+ import numpy as np
5
+ from io import BytesIO
6
+ from typing import Union
7
+ from .types import WillowConfig, WillowModel, ZONES
8
+
9
+ def parse_int8_model(data: Union[bytes, BytesIO]) -> WillowModel:
10
+ """
11
+ Parses the secure Willow V4.0 .int8 binary format.
12
+ Executes entirely in RAM (Zero physical disk footprint).
13
+ """
14
+ if isinstance(data, BytesIO):
15
+ buffer = data.read()
16
+ else:
17
+ buffer = data
18
+
19
+ if len(buffer) < 24:
20
+ raise ValueError("Invalid Willow Binary: Header too short.")
21
+
22
+ # Strict Little-Endian 24-Byte Header parsing
23
+ header = struct.unpack('<IIffff', buffer[:24])
24
+ version, bitmask, scale, overlap, dtw_sens, tempo = header
25
+
26
+ if version != 40:
27
+ raise ValueError(f"Unsupported model version: {version}. Expected 40 (V4.0).")
28
+
29
+ config = WillowConfig(
30
+ version=version,
31
+ zone_bitmask=bitmask,
32
+ overlap_tolerance=overlap,
33
+ dtw_sensitivity=dtw_sens,
34
+ tempo_variance=tempo
35
+ )
36
+
37
+ # Resolve dimension size from bitmask to reshape flat array
38
+ n_joints = sum(len(indices) for bit, indices in ZONES.values() if bitmask & bit)
39
+ dim = int(n_joints * (n_joints - 1) / 2)
40
+
41
+ # Fast De-Quantization
42
+ raw_int8 = np.frombuffer(buffer[24:], dtype=np.int8)
43
+ signature = (raw_int8.astype(np.float32) / 127.0) * scale
44
+
45
+ # Reshape to (Frames, Features)
46
+ signature = signature.reshape(-1, dim)
47
+
48
+ return WillowModel(config=config, signature=signature)
49
+
50
+ def parse_json_model(data: Union[str, dict]) -> WillowModel:
51
+ """
52
+ Fallback parser for standard web JSON signatures.
53
+ """
54
+ if isinstance(data, str):
55
+ payload = json.loads(data)
56
+ else:
57
+ payload = data
58
+
59
+ cfg = payload.get("calibration_config", {})
60
+
61
+ config = WillowConfig(
62
+ version=int(float(cfg.get("version", 4.0)) * 10),
63
+ zone_bitmask=int(cfg.get("zone_bitmask", 2)),
64
+ overlap_tolerance=float(cfg.get("overlap_tolerance", 0.25)),
65
+ dtw_sensitivity=float(cfg.get("dtw_sensitivity", 3.0)),
66
+ tempo_variance=float(cfg.get("tempo_variance", 0.20))
67
+ )
68
+
69
+ signature = np.array(payload.get("signature",[]), dtype=np.float32)
70
+ return WillowModel(config=config, signature=signature)
71
+
72
+ def load_local_model(filepath: str) -> WillowModel:
73
+ """
74
+ Loads a Willow model directly from a local file.
75
+ Use this if you downloaded the model manually via the Willow Web Interface.
76
+ """
77
+ if not os.path.exists(filepath):
78
+ raise FileNotFoundError(f"Model file not found: {filepath}")
79
+
80
+ if filepath.endswith('.json'):
81
+ with open(filepath, 'r') as f:
82
+ payload = json.load(f)
83
+ return parse_json_model(payload)
84
+ else:
85
+ # Assumes .int8 or .bin optimized format
86
+ with open(filepath, 'rb') as f:
87
+ return parse_int8_model(f.read())
@@ -0,0 +1,52 @@
1
+ import numpy as np
2
+
3
+ class KinematicRetargeter:
4
+ """
5
+ Extracts joint angles (Euler approximations) from raw 3D point clouds.
6
+ Required for Reinforcement Learning (RL) simulation environments.
7
+ """
8
+
9
+ @staticmethod
10
+ def _angle_between_vectors(v1: np.ndarray, v2: np.ndarray) -> np.ndarray:
11
+ n1 = np.linalg.norm(v1, axis=1)
12
+ n2 = np.linalg.norm(v2, axis=1)
13
+ # Avoid division by zero
14
+ mask = (n1 > 0) & (n2 > 0)
15
+ angles = np.zeros(v1.shape[0])
16
+ dot_prod = np.sum(v1 * v2, axis=1)
17
+ cos_theta = np.clip(dot_prod[mask] / (n1[mask] * n2[mask]), -1.0, 1.0)
18
+ angles[mask] = np.degrees(np.arccos(cos_theta))
19
+ return angles
20
+
21
+ @staticmethod
22
+ def extract_joint_angles(skeleton_seq: np.ndarray) -> dict:
23
+ """
24
+ Returns a dictionary of critical 1D joint angle arrays over time.
25
+ """
26
+ # Willow topology keys
27
+ L_SHOULDER, L_ELBOW, L_WRIST = 11, 13, 15
28
+ R_SHOULDER, R_ELBOW, R_WRIST = 12, 14, 16
29
+ L_HIP, L_KNEE, L_ANKLE = 23, 25, 27
30
+ R_HIP, R_KNEE, R_ANKLE = 24, 26, 28
31
+
32
+ angles = {}
33
+
34
+ # 1. Elbow Flexions
35
+ l_upper_arm = skeleton_seq[:, L_SHOULDER, :3] - skeleton_seq[:, L_ELBOW, :3]
36
+ l_forearm = skeleton_seq[:, L_WRIST, :3] - skeleton_seq[:, L_ELBOW, :3]
37
+ angles["left_elbow_flexion"] = KinematicRetargeter._angle_between_vectors(l_upper_arm, l_forearm)
38
+
39
+ r_upper_arm = skeleton_seq[:, R_SHOULDER, :3] - skeleton_seq[:, R_ELBOW, :3]
40
+ r_forearm = skeleton_seq[:, R_WRIST, :3] - skeleton_seq[:, R_ELBOW, :3]
41
+ angles["right_elbow_flexion"] = KinematicRetargeter._angle_between_vectors(r_upper_arm, r_forearm)
42
+
43
+ # 2. Knee Flexions
44
+ l_thigh = skeleton_seq[:, L_HIP, :3] - skeleton_seq[:, L_KNEE, :3]
45
+ l_calf = skeleton_seq[:, L_ANKLE, :3] - skeleton_seq[:, L_KNEE, :3]
46
+ angles["left_knee_flexion"] = KinematicRetargeter._angle_between_vectors(l_thigh, l_calf)
47
+
48
+ r_thigh = skeleton_seq[:, R_HIP, :3] - skeleton_seq[:, R_KNEE, :3]
49
+ r_calf = skeleton_seq[:, R_ANKLE, :3] - skeleton_seq[:, R_KNEE, :3]
50
+ angles["right_knee_flexion"] = KinematicRetargeter._angle_between_vectors(r_thigh, r_calf)
51
+
52
+ return angles
@@ -0,0 +1,35 @@
1
+ import numpy as np
2
+
3
+ class CoordinateBridge:
4
+ """
5
+ Bridges the standard Willow space (MediaPipe: X-Right, Y-Down, Z-Forward)
6
+ to industry-standard spaces for Robotics and VR/AR.
7
+ """
8
+
9
+ @staticmethod
10
+ def to_ros_z_up(skeleton_seq: np.ndarray) -> np.ndarray:
11
+ """
12
+ Translates to ROS/Isaac Sim Standard (Z-Up, Right-Handed).
13
+ Maps: MP_X -> ROS_X, MP_Z -> ROS_Y, -MP_Y -> ROS_Z
14
+ """
15
+ transformed = np.zeros_like(skeleton_seq)
16
+ transformed[..., 0] = skeleton_seq[..., 0] # X = X
17
+ transformed[..., 1] = skeleton_seq[..., 2] # Y = Depth
18
+ transformed[..., 2] = -skeleton_seq[..., 1] # Z = Up
19
+ if skeleton_seq.shape[-1] == 4:
20
+ transformed[..., 3] = skeleton_seq[..., 3]
21
+ return transformed
22
+
23
+ @staticmethod
24
+ def to_unity_y_up(skeleton_seq: np.ndarray) -> np.ndarray:
25
+ """
26
+ Translates to Unity/Unreal Standard (Y-Up, Left-Handed).
27
+ Maps: MP_X -> Unity_X, -MP_Y -> Unity_Y, MP_Z -> Unity_Z
28
+ """
29
+ transformed = np.zeros_like(skeleton_seq)
30
+ transformed[..., 0] = skeleton_seq[..., 0]
31
+ transformed[..., 1] = -skeleton_seq[..., 1]
32
+ transformed[..., 2] = skeleton_seq[..., 2]
33
+ if skeleton_seq.shape[-1] == 4:
34
+ transformed[..., 3] = skeleton_seq[..., 3]
35
+ return transformed
@@ -0,0 +1,28 @@
1
+ import dataclasses
2
+ import numpy as np
3
+
4
+ # Willow Standard 75-Point Topology Map
5
+ # Maps logical zones to their respective 3D joint indices
6
+ ZONES = {
7
+ "head": (1,[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
8
+ "torso": (2,[11, 12, 23, 24]),
9
+ "arms": (4,[13, 14, 15, 16]),
10
+ "hands": (8, list(range(33, 75))),
11
+ "legs": (16,[25, 26, 27, 28]),
12
+ "feet": (32,[29, 30, 31, 32])
13
+ }
14
+
15
+ @dataclasses.dataclass
16
+ class WillowConfig:
17
+ """Configuration derived from the 24-Byte .int8 Model Header."""
18
+ version: int
19
+ zone_bitmask: int
20
+ overlap_tolerance: float
21
+ dtw_sensitivity: float
22
+ tempo_variance: float
23
+
24
+ @dataclasses.dataclass
25
+ class WillowModel:
26
+ """The runtime representation of an Action Model."""
27
+ config: WillowConfig
28
+ signature: np.ndarray # The Float32 De-quantized RDM Matrix
@@ -0,0 +1,133 @@
1
+ Metadata-Version: 2.4
2
+ Name: willow-runtime
3
+ Version: 5.1.0
4
+ Summary: Willow 5 Runtime: Action Recognition, Retargeting, & Edge Evaluator
5
+ Author: Willow Dynamics
6
+ License: MIT
7
+ Project-URL: Homepage, https://willowdynamics.com
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE.txt
11
+ Requires-Dist: numpy>=1.21.0
12
+ Requires-Dist: numba>=0.56.0
13
+ Requires-Dist: requests>=2.25.0
14
+ Dynamic: license-file
15
+
16
+ # Willow 5 Runtime SDK
17
+
18
+ The official Python SDK for **Willow Dynamics**.
19
+
20
+ This SDK acts as the bridge between the Willow Cloud Oracle and your local environment. It enables **Zero-Shot Action Recognition**, **Kinematic Retargeting**, and **Physics Evaluation** on edge devices, cloud pipelines, and simulation clusters.
21
+
22
+ It is designed to be dependency-light (No AWS/Boto3 required) and privacy-first (Models run in ephemeral RAM).
23
+
24
+ ---
25
+
26
+ ## Installation
27
+
28
+ ```bash
29
+ pip install willow-runtime
30
+ ```
31
+
32
+ ---
33
+
34
+ ## 1. Quick Start: Model Provisioning
35
+
36
+ Securely fetch your proprietary action models from the Willow Cloud.
37
+
38
+ ```python
39
+ from willow import WillowClient
40
+
41
+ # Initialize with your API Key
42
+ client = WillowClient(api_key="YOUR_WILLOW_API_KEY")
43
+
44
+ # Option A: Stream directly to RAM (Secure / Cloud / Ephemeral)
45
+ # The model never touches the physical disk.
46
+ model = client.get_model("tactical-reload-v1")
47
+
48
+ # Option B: Download to Disk (Offline / Edge / Air-Gapped)
49
+ client.download_model("tactical-reload-v1", "./models/reload.int8")
50
+ ```
51
+
52
+ ---
53
+
54
+ ## 2. Usage: Active Streaming (Real-Time)
55
+
56
+ Best for **Robotics**, **Webcams**, or **Smart Gyms** where frames arrive one by one. The `step()` method manages an internal sliding window buffer for zero-latency triggering.
57
+
58
+ ```python
59
+ from willow import WillowClient, WillowDetector
60
+
61
+ # 1. Load Model
62
+ client = WillowClient(api_key="YOUR_API_KEY")
63
+ model = client.get_model("tactical-reload-v1")
64
+ detector = WillowDetector(model)
65
+
66
+ # 2. The Real-Time Loop
67
+ # Assume 'get_next_frame()' returns a (75, 3) numpy array of skeletal data
68
+ while True:
69
+ current_skeleton, timestamp_ms = get_next_frame()
70
+
71
+ # .step() executes in <2ms on modern CPUs
72
+ event = detector.step(current_skeleton, timestamp_ms)
73
+
74
+ if event:
75
+ print(f"!!! ACTION DETECTED !!!")
76
+ print(f"Timestamp: {event['end_ms']}ms | Confidence: {event['confidence']:.2f}")
77
+
78
+ # Trigger Actuator / Robot / UI here
79
+ ```
80
+
81
+ ---
82
+
83
+ ## 3. Usage: Batch Analysis (Passive)
84
+
85
+ Best for **Data Science**, **Historical Video Processing**, or **Cloud ETL** pipelines.
86
+
87
+ ```python
88
+ from willow import WillowClient, WillowDetector
89
+ import numpy as np
90
+
91
+ # 1. Load Model
92
+ client = WillowClient(api_key="YOUR_API_KEY")
93
+ model = client.get_model("golf-swing-v4")
94
+ detector = WillowDetector(model)
95
+
96
+ # 2. Load Data (e.g., from an uploaded video file processed by MediaPipe)
97
+ # Shape: (Frames, 75, 3)
98
+ full_sequence = np.load("recording_data.npy")
99
+ timestamps = [t * 33 for t in range(len(full_sequence))]
100
+
101
+ # 3. Detect All Occurrences
102
+ matches = detector.detect(full_sequence, timestamps)
103
+
104
+ print(f"Found {len(matches)} events:")
105
+ for m in matches:
106
+ print(f" - {m['start_ms']}ms to {m['end_ms']}ms (Conf: {m['confidence']:.2f})")
107
+ ```
108
+
109
+ ---
110
+
111
+ ## 4. Usage: Sim-to-Real & Robotics
112
+
113
+ Best for **Reinforcement Learning (RL)**, **NVIDIA Isaac Sim**, and **Humanoid Teleoperation**. Bridges the gap between Computer Vision coordinates and Robotics standards.
114
+
115
+ ```python
116
+ from willow import CoordinateBridge, KinematicRetargeter
117
+
118
+ # 1. Convert Coordinate Space
119
+ # MediaPipe (Y-Down) -> ROS/Isaac Sim (Z-Up)
120
+ ros_ready_sequence = CoordinateBridge.to_ros_z_up(raw_skeleton_sequence)
121
+
122
+ # 2. Extract Joint Angles for RL Training
123
+ # Returns dictionary of 1D angle arrays (e.g., "right_elbow_flexion")
124
+ joint_angles = KinematicRetargeter.extract_joint_angles(ros_ready_sequence)
125
+
126
+ print(f"Extracted {len(joint_angles)} joint features for simulation training.")
127
+ ```
128
+
129
+ ---
130
+
131
+ ## License
132
+
133
+ MIT License. Copyright (c) 2025 Willow Dynamics.
@@ -0,0 +1,18 @@
1
+ LICENSE.txt
2
+ README.md
3
+ pyproject.toml
4
+ tests/test_willow_sdk.py
5
+ willow/__init__.py
6
+ willow/client.py
7
+ willow/detector.py
8
+ willow/evaluator.py
9
+ willow/math_kernels.py
10
+ willow/parsers.py
11
+ willow/retargeting.py
12
+ willow/transforms.py
13
+ willow/types.py
14
+ willow_runtime.egg-info/PKG-INFO
15
+ willow_runtime.egg-info/SOURCES.txt
16
+ willow_runtime.egg-info/dependency_links.txt
17
+ willow_runtime.egg-info/requires.txt
18
+ willow_runtime.egg-info/top_level.txt
@@ -0,0 +1,3 @@
1
+ numpy>=1.21.0
2
+ numba>=0.56.0
3
+ requests>=2.25.0