kinemotion 0.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -0,0 +1,220 @@
1
+ """Pose tracking using MediaPipe Pose."""
2
+
3
+ import cv2
4
+ import mediapipe as mp
5
+ import numpy as np
6
+
7
+
8
+ class PoseTracker:
9
+ """Tracks human pose landmarks in video frames using MediaPipe."""
10
+
11
+ def __init__(
12
+ self,
13
+ min_detection_confidence: float = 0.5,
14
+ min_tracking_confidence: float = 0.5,
15
+ ):
16
+ """
17
+ Initialize the pose tracker.
18
+
19
+ Args:
20
+ min_detection_confidence: Minimum confidence for pose detection
21
+ min_tracking_confidence: Minimum confidence for pose tracking
22
+ """
23
+ self.mp_pose = mp.solutions.pose
24
+ self.pose = self.mp_pose.Pose(
25
+ min_detection_confidence=min_detection_confidence,
26
+ min_tracking_confidence=min_tracking_confidence,
27
+ model_complexity=1,
28
+ )
29
+
30
+ def process_frame(
31
+ self, frame: np.ndarray
32
+ ) -> dict[str, tuple[float, float, float]] | None:
33
+ """
34
+ Process a single frame and extract pose landmarks.
35
+
36
+ Args:
37
+ frame: BGR image frame
38
+
39
+ Returns:
40
+ Dictionary mapping landmark names to (x, y, visibility) tuples,
41
+ or None if no pose detected. Coordinates are normalized (0-1).
42
+ """
43
+ # Convert BGR to RGB
44
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
45
+
46
+ # Process the frame
47
+ results = self.pose.process(rgb_frame)
48
+
49
+ if not results.pose_landmarks:
50
+ return None
51
+
52
+ # Extract key landmarks for feet tracking and CoM estimation
53
+ landmarks = {}
54
+ landmark_names = {
55
+ # Feet landmarks
56
+ self.mp_pose.PoseLandmark.LEFT_ANKLE: "left_ankle",
57
+ self.mp_pose.PoseLandmark.RIGHT_ANKLE: "right_ankle",
58
+ self.mp_pose.PoseLandmark.LEFT_HEEL: "left_heel",
59
+ self.mp_pose.PoseLandmark.RIGHT_HEEL: "right_heel",
60
+ self.mp_pose.PoseLandmark.LEFT_FOOT_INDEX: "left_foot_index",
61
+ self.mp_pose.PoseLandmark.RIGHT_FOOT_INDEX: "right_foot_index",
62
+ # Torso landmarks for CoM estimation
63
+ self.mp_pose.PoseLandmark.LEFT_HIP: "left_hip",
64
+ self.mp_pose.PoseLandmark.RIGHT_HIP: "right_hip",
65
+ self.mp_pose.PoseLandmark.LEFT_SHOULDER: "left_shoulder",
66
+ self.mp_pose.PoseLandmark.RIGHT_SHOULDER: "right_shoulder",
67
+ # Additional landmarks for better CoM estimation
68
+ self.mp_pose.PoseLandmark.NOSE: "nose",
69
+ self.mp_pose.PoseLandmark.LEFT_KNEE: "left_knee",
70
+ self.mp_pose.PoseLandmark.RIGHT_KNEE: "right_knee",
71
+ }
72
+
73
+ for landmark_id, name in landmark_names.items():
74
+ lm = results.pose_landmarks.landmark[landmark_id]
75
+ landmarks[name] = (lm.x, lm.y, lm.visibility)
76
+
77
+ return landmarks
78
+
79
+ def close(self) -> None:
80
+ """Release resources."""
81
+ self.pose.close()
82
+
83
+
84
+ def compute_center_of_mass(
85
+ landmarks: dict[str, tuple[float, float, float]],
86
+ visibility_threshold: float = 0.5,
87
+ ) -> tuple[float, float, float]:
88
+ """
89
+ Compute approximate center of mass (CoM) from body landmarks.
90
+
91
+ Uses biomechanical segment weights based on Dempster's body segment parameters:
92
+ - Head: 8% of body mass (represented by nose)
93
+ - Trunk (shoulders to hips): 50% of body mass
94
+ - Thighs: 2 × 10% = 20% of body mass
95
+ - Legs (knees to ankles): 2 × 5% = 10% of body mass
96
+ - Feet: 2 × 1.5% = 3% of body mass
97
+
98
+ The CoM is estimated as a weighted average of these segments, with
99
+ weights corresponding to their proportion of total body mass.
100
+
101
+ Args:
102
+ landmarks: Dictionary of landmark positions (x, y, visibility)
103
+ visibility_threshold: Minimum visibility to include landmark in calculation
104
+
105
+ Returns:
106
+ (x, y, visibility) tuple for estimated CoM position
107
+ visibility = average visibility of all segments used
108
+ """
109
+ # Define segment representatives and their weights (as fraction of body mass)
110
+ # Each segment uses midpoint or average of its bounding landmarks
111
+ segments = []
112
+ segment_weights = []
113
+ visibilities = []
114
+
115
+ # Head segment: 8% (use nose as proxy)
116
+ if "nose" in landmarks:
117
+ x, y, vis = landmarks["nose"]
118
+ if vis > visibility_threshold:
119
+ segments.append((x, y))
120
+ segment_weights.append(0.08)
121
+ visibilities.append(vis)
122
+
123
+ # Trunk segment: 50% (midpoint between shoulders and hips)
124
+ trunk_landmarks = ["left_shoulder", "right_shoulder", "left_hip", "right_hip"]
125
+ trunk_positions = [
126
+ (x, y, vis)
127
+ for key in trunk_landmarks
128
+ if key in landmarks
129
+ for x, y, vis in [landmarks[key]]
130
+ if vis > visibility_threshold
131
+ ]
132
+ if len(trunk_positions) >= 2:
133
+ trunk_x = float(np.mean([pos[0] for pos in trunk_positions]))
134
+ trunk_y = float(np.mean([pos[1] for pos in trunk_positions]))
135
+ trunk_vis = float(np.mean([pos[2] for pos in trunk_positions]))
136
+ segments.append((trunk_x, trunk_y))
137
+ segment_weights.append(0.50)
138
+ visibilities.append(trunk_vis)
139
+
140
+ # Thigh segment: 20% total (midpoint hip to knee for each leg)
141
+ for side in ["left", "right"]:
142
+ hip_key = f"{side}_hip"
143
+ knee_key = f"{side}_knee"
144
+ if hip_key in landmarks and knee_key in landmarks:
145
+ hip_x, hip_y, hip_vis = landmarks[hip_key]
146
+ knee_x, knee_y, knee_vis = landmarks[knee_key]
147
+ if hip_vis > visibility_threshold and knee_vis > visibility_threshold:
148
+ thigh_x = (hip_x + knee_x) / 2
149
+ thigh_y = (hip_y + knee_y) / 2
150
+ thigh_vis = (hip_vis + knee_vis) / 2
151
+ segments.append((thigh_x, thigh_y))
152
+ segment_weights.append(0.10) # 10% per leg
153
+ visibilities.append(thigh_vis)
154
+
155
+ # Lower leg segment: 10% total (midpoint knee to ankle for each leg)
156
+ for side in ["left", "right"]:
157
+ knee_key = f"{side}_knee"
158
+ ankle_key = f"{side}_ankle"
159
+ if knee_key in landmarks and ankle_key in landmarks:
160
+ knee_x, knee_y, knee_vis = landmarks[knee_key]
161
+ ankle_x, ankle_y, ankle_vis = landmarks[ankle_key]
162
+ if knee_vis > visibility_threshold and ankle_vis > visibility_threshold:
163
+ leg_x = (knee_x + ankle_x) / 2
164
+ leg_y = (knee_y + ankle_y) / 2
165
+ leg_vis = (knee_vis + ankle_vis) / 2
166
+ segments.append((leg_x, leg_y))
167
+ segment_weights.append(0.05) # 5% per leg
168
+ visibilities.append(leg_vis)
169
+
170
+ # Foot segment: 3% total (average of ankle, heel, foot_index)
171
+ for side in ["left", "right"]:
172
+ foot_keys = [f"{side}_ankle", f"{side}_heel", f"{side}_foot_index"]
173
+ foot_positions = [
174
+ (x, y, vis)
175
+ for key in foot_keys
176
+ if key in landmarks
177
+ for x, y, vis in [landmarks[key]]
178
+ if vis > visibility_threshold
179
+ ]
180
+ if foot_positions:
181
+ foot_x = float(np.mean([pos[0] for pos in foot_positions]))
182
+ foot_y = float(np.mean([pos[1] for pos in foot_positions]))
183
+ foot_vis = float(np.mean([pos[2] for pos in foot_positions]))
184
+ segments.append((foot_x, foot_y))
185
+ segment_weights.append(0.015) # 1.5% per foot
186
+ visibilities.append(foot_vis)
187
+
188
+ # If no segments found, fall back to hip average
189
+ if not segments:
190
+ if "left_hip" in landmarks and "right_hip" in landmarks:
191
+ lh_x, lh_y, lh_vis = landmarks["left_hip"]
192
+ rh_x, rh_y, rh_vis = landmarks["right_hip"]
193
+ return (
194
+ (lh_x + rh_x) / 2,
195
+ (lh_y + rh_y) / 2,
196
+ (lh_vis + rh_vis) / 2,
197
+ )
198
+ # Ultimate fallback: center of frame
199
+ return (0.5, 0.5, 0.0)
200
+
201
+ # Normalize weights to sum to 1.0
202
+ total_weight = sum(segment_weights)
203
+ normalized_weights = [w / total_weight for w in segment_weights]
204
+
205
+ # Compute weighted average of segment positions
206
+ com_x = float(
207
+ sum(
208
+ pos[0] * weight
209
+ for pos, weight in zip(segments, normalized_weights, strict=True)
210
+ )
211
+ )
212
+ com_y = float(
213
+ sum(
214
+ pos[1] * weight
215
+ for pos, weight in zip(segments, normalized_weights, strict=True)
216
+ )
217
+ )
218
+ com_visibility = float(np.mean(visibilities)) if visibilities else 0.0
219
+
220
+ return (com_x, com_y, com_visibility)
@@ -0,0 +1,366 @@
1
+ """Landmark smoothing utilities to reduce jitter in pose tracking."""
2
+
3
+ import numpy as np
4
+ from scipy.signal import savgol_filter
5
+
6
+ from .filtering import (
7
+ bilateral_temporal_filter,
8
+ reject_outliers,
9
+ )
10
+
11
+
12
+ def smooth_landmarks(
13
+ landmark_sequence: list[dict[str, tuple[float, float, float]] | None],
14
+ window_length: int = 5,
15
+ polyorder: int = 2,
16
+ ) -> list[dict[str, tuple[float, float, float]] | None]:
17
+ """
18
+ Smooth landmark trajectories using Savitzky-Golay filter.
19
+
20
+ Args:
21
+ landmark_sequence: List of landmark dictionaries from each frame
22
+ window_length: Length of filter window (must be odd, >= polyorder + 2)
23
+ polyorder: Order of polynomial used to fit samples
24
+
25
+ Returns:
26
+ Smoothed landmark sequence with same structure as input
27
+ """
28
+ if len(landmark_sequence) < window_length:
29
+ # Not enough frames to smooth effectively
30
+ return landmark_sequence
31
+
32
+ # Ensure window_length is odd
33
+ if window_length % 2 == 0:
34
+ window_length += 1
35
+
36
+ # Extract landmark names from first valid frame
37
+ landmark_names = None
38
+ for frame_landmarks in landmark_sequence:
39
+ if frame_landmarks is not None:
40
+ landmark_names = list(frame_landmarks.keys())
41
+ break
42
+
43
+ if landmark_names is None:
44
+ return landmark_sequence
45
+
46
+ # Build arrays for each landmark coordinate
47
+ smoothed_sequence: list[dict[str, tuple[float, float, float]] | None] = []
48
+
49
+ for landmark_name in landmark_names:
50
+ # Extract x, y coordinates for this landmark across all frames
51
+ x_coords = []
52
+ y_coords = []
53
+ valid_frames = []
54
+
55
+ for i, frame_landmarks in enumerate(landmark_sequence):
56
+ if frame_landmarks is not None and landmark_name in frame_landmarks:
57
+ x, y, _ = frame_landmarks[landmark_name] # vis not used
58
+ x_coords.append(x)
59
+ y_coords.append(y)
60
+ valid_frames.append(i)
61
+
62
+ if len(x_coords) < window_length:
63
+ continue
64
+
65
+ # Apply Savitzky-Golay filter
66
+ x_smooth = savgol_filter(x_coords, window_length, polyorder)
67
+ y_smooth = savgol_filter(y_coords, window_length, polyorder)
68
+
69
+ # Store smoothed values back
70
+ for idx, frame_idx in enumerate(valid_frames):
71
+ if frame_idx >= len(smoothed_sequence):
72
+ smoothed_sequence.extend(
73
+ [{}] * (frame_idx - len(smoothed_sequence) + 1)
74
+ )
75
+
76
+ # Ensure smoothed_sequence[frame_idx] is a dict, not None
77
+ if smoothed_sequence[frame_idx] is None:
78
+ smoothed_sequence[frame_idx] = {}
79
+
80
+ if (
81
+ landmark_name not in smoothed_sequence[frame_idx]
82
+ and landmark_sequence[frame_idx] is not None
83
+ ):
84
+ # Keep original visibility
85
+ orig_vis = landmark_sequence[frame_idx][landmark_name][2]
86
+ smoothed_sequence[frame_idx][landmark_name] = (
87
+ float(x_smooth[idx]),
88
+ float(y_smooth[idx]),
89
+ orig_vis,
90
+ )
91
+
92
+ # Fill in any missing frames with original data
93
+ for i in range(len(landmark_sequence)):
94
+ if i >= len(smoothed_sequence) or not smoothed_sequence[i]:
95
+ if i < len(smoothed_sequence):
96
+ smoothed_sequence[i] = landmark_sequence[i]
97
+ else:
98
+ smoothed_sequence.append(landmark_sequence[i])
99
+
100
+ return smoothed_sequence
101
+
102
+
103
+ def compute_velocity(
104
+ positions: np.ndarray, fps: float, smooth_window: int = 3
105
+ ) -> np.ndarray:
106
+ """
107
+ Compute velocity from position data.
108
+
109
+ Args:
110
+ positions: Array of positions over time (n_frames, n_dims)
111
+ fps: Frames per second of the video
112
+ smooth_window: Window size for velocity smoothing
113
+
114
+ Returns:
115
+ Velocity array (n_frames, n_dims)
116
+ """
117
+ dt = 1.0 / fps
118
+ velocity = np.gradient(positions, dt, axis=0)
119
+
120
+ # Smooth velocity if we have enough data
121
+ if len(velocity) >= smooth_window and smooth_window > 1:
122
+ if smooth_window % 2 == 0:
123
+ smooth_window += 1
124
+ for dim in range(velocity.shape[1]):
125
+ velocity[:, dim] = savgol_filter(velocity[:, dim], smooth_window, 1)
126
+
127
+ return velocity
128
+
129
+
130
+ def compute_velocity_from_derivative(
131
+ positions: np.ndarray,
132
+ window_length: int = 5,
133
+ polyorder: int = 2,
134
+ ) -> np.ndarray:
135
+ """
136
+ Compute velocity as derivative of smoothed position trajectory.
137
+
138
+ Uses Savitzky-Golay filter to compute the derivative directly, which provides
139
+ a much smoother and more accurate velocity estimate than frame-to-frame differences.
140
+
141
+ This method:
142
+ 1. Fits a polynomial to the position data in a sliding window
143
+ 2. Analytically computes the derivative of that polynomial
144
+ 3. Returns smooth velocity values
145
+
146
+ Args:
147
+ positions: 1D array of position values (e.g., foot y-positions)
148
+ window_length: Window size for smoothing (must be odd, >= polyorder + 2)
149
+ polyorder: Polynomial order for Savitzky-Golay filter (typically 2 or 3)
150
+
151
+ Returns:
152
+ Array of absolute velocity values (magnitude of derivative)
153
+ """
154
+ if len(positions) < window_length:
155
+ # Fallback to simple differences for short sequences
156
+ return np.abs(np.diff(positions, prepend=positions[0]))
157
+
158
+ # Ensure window_length is odd
159
+ if window_length % 2 == 0:
160
+ window_length += 1
161
+
162
+ # Compute derivative using Savitzky-Golay filter
163
+ # deriv=1: compute first derivative
164
+ # delta=1.0: frame spacing (velocity per frame)
165
+ # mode='interp': interpolate at boundaries
166
+ velocity = savgol_filter(
167
+ positions,
168
+ window_length,
169
+ polyorder,
170
+ deriv=1, # First derivative
171
+ delta=1.0, # Frame spacing
172
+ mode="interp",
173
+ )
174
+
175
+ # Return absolute velocity (magnitude only)
176
+ return np.abs(velocity)
177
+
178
+
179
+ def compute_acceleration_from_derivative(
180
+ positions: np.ndarray,
181
+ window_length: int = 5,
182
+ polyorder: int = 2,
183
+ ) -> np.ndarray:
184
+ """
185
+ Compute acceleration as second derivative of smoothed position trajectory.
186
+
187
+ Uses Savitzky-Golay filter to compute the second derivative directly,
188
+ providing smooth acceleration (curvature) estimates for detecting
189
+ characteristic patterns at landing and takeoff.
190
+
191
+ Landing and takeoff events show distinctive acceleration patterns:
192
+ - Landing: Large acceleration spike as feet decelerate on impact
193
+ - Takeoff: Acceleration change as body accelerates upward
194
+ - In flight: Constant acceleration due to gravity
195
+ - On ground: Near-zero acceleration (stationary position)
196
+
197
+ Args:
198
+ positions: 1D array of position values (e.g., foot y-positions)
199
+ window_length: Window size for smoothing (must be odd, >= polyorder + 2)
200
+ polyorder: Polynomial order for Savitzky-Golay filter (typically 2 or 3)
201
+
202
+ Returns:
203
+ Array of acceleration values (second derivative of position)
204
+ """
205
+ if len(positions) < window_length:
206
+ # Fallback to simple second differences for short sequences
207
+ velocity = np.diff(positions, prepend=positions[0])
208
+ return np.diff(velocity, prepend=velocity[0])
209
+
210
+ # Ensure window_length is odd
211
+ if window_length % 2 == 0:
212
+ window_length += 1
213
+
214
+ # Compute second derivative using Savitzky-Golay filter
215
+ # deriv=2: compute second derivative (acceleration/curvature)
216
+ # delta=1.0: frame spacing
217
+ # mode='interp': interpolate at boundaries
218
+ acceleration = savgol_filter(
219
+ positions,
220
+ window_length,
221
+ polyorder,
222
+ deriv=2, # Second derivative
223
+ delta=1.0, # Frame spacing
224
+ mode="interp",
225
+ )
226
+
227
+ return acceleration
228
+
229
+
230
+ def smooth_landmarks_advanced(
231
+ landmark_sequence: list[dict[str, tuple[float, float, float]] | None],
232
+ window_length: int = 5,
233
+ polyorder: int = 2,
234
+ use_outlier_rejection: bool = True,
235
+ use_bilateral: bool = False,
236
+ ransac_threshold: float = 0.02,
237
+ bilateral_sigma_spatial: float = 3.0,
238
+ bilateral_sigma_intensity: float = 0.02,
239
+ ) -> list[dict[str, tuple[float, float, float]] | None]:
240
+ """
241
+ Advanced landmark smoothing with outlier rejection and bilateral filtering.
242
+
243
+ Combines multiple techniques for robust smoothing:
244
+ 1. Outlier rejection (RANSAC + median filtering)
245
+ 2. Optional bilateral filtering (edge-preserving)
246
+ 3. Savitzky-Golay smoothing
247
+
248
+ Args:
249
+ landmark_sequence: List of landmark dictionaries from each frame
250
+ window_length: Length of filter window (must be odd, >= polyorder + 2)
251
+ polyorder: Order of polynomial used to fit samples
252
+ use_outlier_rejection: Apply outlier detection and removal
253
+ use_bilateral: Use bilateral filter instead of Savitzky-Golay
254
+ ransac_threshold: Threshold for RANSAC outlier detection
255
+ bilateral_sigma_spatial: Spatial sigma for bilateral filter
256
+ bilateral_sigma_intensity: Intensity sigma for bilateral filter
257
+
258
+ Returns:
259
+ Smoothed landmark sequence with same structure as input
260
+ """
261
+ if len(landmark_sequence) < window_length:
262
+ # Not enough frames to smooth effectively
263
+ return landmark_sequence
264
+
265
+ # Ensure window_length is odd
266
+ if window_length % 2 == 0:
267
+ window_length += 1
268
+
269
+ # Extract landmark names from first valid frame
270
+ landmark_names = None
271
+ for frame_landmarks in landmark_sequence:
272
+ if frame_landmarks is not None:
273
+ landmark_names = list(frame_landmarks.keys())
274
+ break
275
+
276
+ if landmark_names is None:
277
+ return landmark_sequence
278
+
279
+ # Build arrays for each landmark coordinate
280
+ smoothed_sequence: list[dict[str, tuple[float, float, float]] | None] = []
281
+
282
+ for landmark_name in landmark_names:
283
+ # Extract x, y coordinates for this landmark across all frames
284
+ x_coords = []
285
+ y_coords = []
286
+ valid_frames = []
287
+
288
+ for i, frame_landmarks in enumerate(landmark_sequence):
289
+ if frame_landmarks is not None and landmark_name in frame_landmarks:
290
+ x, y, _ = frame_landmarks[landmark_name] # vis not used
291
+ x_coords.append(x)
292
+ y_coords.append(y)
293
+ valid_frames.append(i)
294
+
295
+ if len(x_coords) < window_length:
296
+ continue
297
+
298
+ x_array = np.array(x_coords)
299
+ y_array = np.array(y_coords)
300
+
301
+ # Step 1: Outlier rejection
302
+ if use_outlier_rejection:
303
+ x_array, _ = reject_outliers(
304
+ x_array,
305
+ use_ransac=True,
306
+ use_median=True,
307
+ ransac_threshold=ransac_threshold,
308
+ )
309
+ y_array, _ = reject_outliers(
310
+ y_array,
311
+ use_ransac=True,
312
+ use_median=True,
313
+ ransac_threshold=ransac_threshold,
314
+ )
315
+
316
+ # Step 2: Smoothing (bilateral or Savitzky-Golay)
317
+ if use_bilateral:
318
+ x_smooth = bilateral_temporal_filter(
319
+ x_array,
320
+ window_size=window_length,
321
+ sigma_spatial=bilateral_sigma_spatial,
322
+ sigma_intensity=bilateral_sigma_intensity,
323
+ )
324
+ y_smooth = bilateral_temporal_filter(
325
+ y_array,
326
+ window_size=window_length,
327
+ sigma_spatial=bilateral_sigma_spatial,
328
+ sigma_intensity=bilateral_sigma_intensity,
329
+ )
330
+ else:
331
+ # Standard Savitzky-Golay
332
+ x_smooth = savgol_filter(x_array, window_length, polyorder)
333
+ y_smooth = savgol_filter(y_array, window_length, polyorder)
334
+
335
+ # Store smoothed values back
336
+ for idx, frame_idx in enumerate(valid_frames):
337
+ if frame_idx >= len(smoothed_sequence):
338
+ smoothed_sequence.extend(
339
+ [{}] * (frame_idx - len(smoothed_sequence) + 1)
340
+ )
341
+
342
+ # Ensure smoothed_sequence[frame_idx] is a dict, not None
343
+ if smoothed_sequence[frame_idx] is None:
344
+ smoothed_sequence[frame_idx] = {}
345
+
346
+ if (
347
+ landmark_name not in smoothed_sequence[frame_idx]
348
+ and landmark_sequence[frame_idx] is not None
349
+ ):
350
+ # Keep original visibility
351
+ orig_vis = landmark_sequence[frame_idx][landmark_name][2]
352
+ smoothed_sequence[frame_idx][landmark_name] = (
353
+ float(x_smooth[idx]),
354
+ float(y_smooth[idx]),
355
+ orig_vis,
356
+ )
357
+
358
+ # Fill in any missing frames with original data
359
+ for i in range(len(landmark_sequence)):
360
+ if i >= len(smoothed_sequence) or not smoothed_sequence[i]:
361
+ if i < len(smoothed_sequence):
362
+ smoothed_sequence[i] = landmark_sequence[i]
363
+ else:
364
+ smoothed_sequence.append(landmark_sequence[i])
365
+
366
+ return smoothed_sequence