c4dynamics 1.0.80__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of c4dynamics might be problematic. Click here for more details.

@@ -0,0 +1,369 @@
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ from c4dynamics import fdatapoint
5
+ import pkg_resources
6
+
7
+ MODEL_SIZE = (416, 416, 3)
8
+
9
+
10
+ class yolov3:
11
+
12
+ _nms_th = 0.5
13
+ _confidence_th = 0.5
14
+
15
+ def __init__(self): # , **kwargs):
16
+
17
+ v3path = os.path.join('resources', 'detectors', 'yolo', 'v3')
18
+
19
+ weights_path = pkg_resources.resource_filename('c4dynamics'
20
+ , os.path.join(v3path, 'yolov3.weights'))
21
+ cfg_path = pkg_resources.resource_filename('c4dynamics'
22
+ , os.path.join(v3path, 'yolov3.cfg'))
23
+ coconames = pkg_resources.resource_filename('c4dynamics'
24
+ , os.path.join(v3path, 'coco.names'))
25
+
26
+ self.net = cv2.dnn.readNetFromDarknet(cfg_path, weights_path)
27
+ self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
28
+ ln = self.net.getLayerNames()
29
+ self.ln = [ln[i - 1] for i in self.net.getUnconnectedOutLayers()]
30
+
31
+ with open(coconames, 'r') as f:
32
+ self.class_names = f.read().strip().split('\n')
33
+
34
+ # self.__dict__.update(kwargs)
35
+
36
+
37
+
38
+ @property
39
+ def nms_th(self):
40
+ '''
41
+ Non-Maximum Suppression (NMS) threshold.
42
+
43
+ Gets or sets for the Non-Maximum Suppression (NMS) threshold. Default: `nms_th = 0.5`.
44
+
45
+
46
+ Parameters (Setter)
47
+ -------------------
48
+ val : float
49
+ The new threshold value for NMS during object detection.
50
+
51
+ Returns (Getter)
52
+ ----------------
53
+
54
+ out : float
55
+ The threshold value used for NMS during object detection.
56
+ Objects with confidence scores below this threshold are suppressed.
57
+
58
+
59
+ Example
60
+ -------
61
+
62
+ .. code::
63
+
64
+ >>> imagename = 'planes.jpg'
65
+ >>> imgpath = os.path.join(os.getcwd(), 'examples', 'resources', imagename)
66
+ >>> yolo3 = c4d.detectors.yolov3()
67
+ >>> nms_thresholds = [0.1, 0.5, 0.9]
68
+ >>> for i, nms_threshold in enumerate(nms_thresholds, 1)
69
+ ... yolo3.nms_th = nms_threshold
70
+ ... img = cv2.imread(imgpath)
71
+ ... pts = yolo3.detect(img)
72
+ ... for p in pts:
73
+ ... cv2.rectangle(img, p.box[0], p.box[1], [0, 0, 0], 2)
74
+ ... plt.subplot(1, 3, i)
75
+ ... plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
76
+ ... plt.title(f"NMS Threshold: {nms_threshold}")
77
+ ... plt.axis('off')
78
+
79
+ .. figure:: /_static/images/yolo3_nms_th.png
80
+
81
+
82
+ A high value (0.9) for the Non-Maximum Suppression (NMS) threshold here
83
+ leads to an increased number of bounding boxes around a single object.
84
+ When the NMS threshold is high, it means that a significant overlap is
85
+ required for two bounding boxes to be considered redundant,
86
+ and one of them will be suppressed.
87
+ To address this issue, it's essential to choose an appropriate
88
+ NMS threshold based on the characteristics of your dataset and the
89
+ level of overlap between objects.
90
+ A lower NMS threshold (e.g., 0.4 or 0.5)
91
+ is commonly used to suppress redundant boxes effectively
92
+ while retaining accurate detections.
93
+ Experimenting with different
94
+ threshold values and observing their impact on the results is crucial
95
+ for optimizing the performance of object detection models.
96
+
97
+
98
+
99
+ '''
100
+ return self._nms_th
101
+
102
+ @nms_th.setter
103
+ def nms_th(self, val):
104
+ self._nms_th = val
105
+
106
+ @property
107
+ def confidence_th(self):
108
+ '''
109
+ Confidence threshold used in the object detection.
110
+
111
+ Gets or sets for the confidence threshold. Default: `confidence_th = 0.5`.
112
+
113
+
114
+ Parameters (Setter)
115
+ -------------------
116
+ val : float
117
+ The new confidence threshold for object detection.
118
+
119
+ Returns (Getter)
120
+ ----------------
121
+ out : float
122
+ The confidence threshold for object detection.
123
+ Detected objects with confidence scores below this threshold are filtered out.
124
+
125
+
126
+ Example
127
+ -------
128
+
129
+ .. code::
130
+
131
+ >>> imagename = 'planes.jpg'
132
+ >>> imgpath = os.path.join(os.getcwd(), 'examples', 'resources', imagename)
133
+ >>> yolo3 = c4d.detectors.yolov3()
134
+ >>> confidence_thresholds = [0.9, 0.95, 0.99]
135
+ ... for i, confidence_threshold in enumerate(confidence_thresholds, 1):
136
+ ... yolo3.confidence_th = confidence_threshold
137
+ ... img = cv2.imread(imgpath)
138
+ ... pts = yolo3.detect(img)
139
+ ... for p in pts:
140
+ ... cv2.rectangle(img, p.box[0], p.box[1], [0, 0, 0], 2)
141
+ ... plt.subplot(1, 3, i)
142
+ ... plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
143
+ ... plt.title(f"Confidence Threshold: {confidence_threshold}")
144
+ ... plt.axis('off')
145
+
146
+
147
+ .. figure:: /_static/images/yolo3_confidence_th.png
148
+
149
+
150
+ A single object being missed, particularly when setting the confidence threshold to 0.99,
151
+ suggests that the model is highly confident in its predictions.
152
+ This level of performance is typically achievable when the model
153
+ has been trained on a diverse and representative dataset,
154
+ encompassing a wide variety of object instances, backgrounds,
155
+ and conditions.
156
+
157
+
158
+ '''
159
+
160
+ return self._confidence_th
161
+
162
+ @confidence_th.setter
163
+ def confidence_th(self, val):
164
+ self._confidence_th = val
165
+
166
+
167
+
168
+ def detect(self, frame):
169
+ '''
170
+ Detects objects in a frame using the YOLOv3 model.
171
+
172
+ At each sample, the detector performs the following steps:
173
+
174
+ 1. Preprocesses the frame by creating a blob, normalizing pixel values, and swapping Red and Blue channels.
175
+
176
+ 2. Sets input to the YOLOv3 model and performs a forward pass to obtain detections.
177
+
178
+ 3. Extracts detected objects based on a confidence threshold, calculates bounding box coordinates, and filters results using Non-Maximum Suppression (NMS).
179
+
180
+ Parameters
181
+ ----------
182
+ frame : numpy.array or list
183
+ An input frame for object detection.
184
+
185
+ Returns
186
+ -------
187
+ out : list[fdatapoint]
188
+ A list of :class:`fdatapoint` objects representing detected objects,
189
+ each containing bounding box coordinates and class labels.
190
+
191
+ Examples
192
+ --------
193
+
194
+ The datasets used in the examples are available in the
195
+ `Source Repository <https://github.com/C4dynamics/C4dynamics>`
196
+ under example/resources.
197
+
198
+
199
+ Import required packages
200
+ ^^^^^^^^^^^^^^^^^^^^^^^^
201
+
202
+ .. code::
203
+
204
+ >>> import os
205
+ >>> import cv2 # opencv-python
206
+ >>> import numpy as np
207
+ >>> import c4dynamics as c4d
208
+ >>> from matplotlib import pyplot as plt
209
+
210
+
211
+
212
+
213
+
214
+ Object detecion in a single frame
215
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
216
+
217
+ .. code::
218
+
219
+ >>> imagename = 'planes.jpg'
220
+ >>> img = cv2.imread(os.path.join(os.getcwd(), 'examples', 'resources', imagename))
221
+ >>> yolo3 = c4d.detectors.yolov3()
222
+ >>> pts = yolo3.detect(img)
223
+ >>> for p in pts:
224
+ ... cv2.rectangle(img, p.box[0], p.box[1], np.random.randint(0, 255, 3).tolist(), 3)
225
+ >>> fig, ax = plt.subplots()
226
+ >>> ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
227
+
228
+ .. figure:: /_static/images/yolo3_image.png
229
+
230
+
231
+ Object detecion in a video
232
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
233
+
234
+ .. code::
235
+
236
+ >>> videoname = 'aerobatics.mp4'
237
+ >>> videoin = os.path.join('examples', 'resources', videoname)
238
+ >>> videoout = os.path.join(os.getcwd(), videoname)
239
+ >>> cvideo = cv2.VideoCapture(videoin)
240
+ >>> cvideo_out = cv2.VideoWriter(videoout, cv2.VideoWriter_fourcc(*'mp4v')
241
+ ... , int(cvideo.get(cv2.CAP_PROP_FPS))
242
+ ... , [int(cvideo.get(cv2.CAP_PROP_FRAME_WIDTH))
243
+ ... , int(cvideo.get(cv2.CAP_PROP_FRAME_HEIGHT))])
244
+ >>> yolo3 = c4d.detectors.yolov3()
245
+ >>> while cvideo.isOpened():
246
+ ... ret, frame = cvideo.read()
247
+ ... if not ret: break
248
+ ... pts = yolo3.detect(frame)
249
+ ... for p in pts:
250
+ ... cv2.rectangle(frame, p.box[0], p.box[1], [0, 0, 0], 2)# np.random.randint(0, 255, 3).tolist(), 2)
251
+ ... cvideo_out.write(frame)
252
+ >>> cvideo_out.release()
253
+
254
+ .. figure:: /_static/images/aerobatics.gif
255
+
256
+
257
+ The output structure
258
+ ^^^^^^^^^^^^^^^^^^^^
259
+
260
+ The output of the detect() function is a list of :class:`fdatapoint` object.
261
+ The :class:`fdatapoint` has unique attributes to manipulate the detected object class and
262
+ bounding box.
263
+
264
+ .. code::
265
+
266
+ >>> print('{:^10} | {:^10} | {:^10} | {:^16} | {:^16} | {:^10} | {:^14}'.format(
267
+ ... '# object', 'center x', 'center y', 'box top-left'
268
+ ... , 'box bottom-right', 'class', 'frame size'))
269
+ >>> for i, p in enumerate(pts):
270
+ ... tlb = '(' + str(p.box[0][0]) + ', ' + str(p.box[0][1]) + ')'
271
+ ... brb = '(' + str(p.box[1][0]) + ', ' + str(p.box[1][1]) + ')'
272
+ ... fsize = '(' + str(p.fsize[0]) + ', ' + str(p.fsize[1]) + ')'
273
+ ... print('{:^10d} | {:^10.3f} | {:^10.3f} | {:^16} | {:^16} | {:^10} | {:^14}'.format(
274
+ ... i, p.x, p.y, tlb, brb, p.iclass, fsize))
275
+ ... c = np.random.randint(0, 255, 3).tolist()
276
+ ... cv2.rectangle(img, p.box[0], p.box[1], c, 2)
277
+ ... point = (int((p.box[0][0] + p.box[1][0]) / 2 - 75), p.box[1][1] + 22)
278
+ ... cv2.putText(img, p.iclass, point, cv2.FONT_HERSHEY_SIMPLEX, 1, c, 2)
279
+ >>> fig, ax = plt.subplots()
280
+ >>> ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
281
+ >>> ax.set_axis_off()
282
+ # object | center x | center y | box top-left | box bottom-right | class | frame size
283
+ 0 | 0.584 | 0.376 | (691, 234) | (802, 306) | aeroplane | (1280, 720)
284
+ 1 | 0.457 | 0.473 | (528, 305) | (642, 376) | aeroplane | (1280, 720)
285
+ 2 | 0.471 | 0.322 | (542, 196) | (661, 267) | aeroplane | (1280, 720)
286
+ 3 | 0.546 | 0.873 | (645, 588) | (752, 668) | aeroplane | (1280, 720)
287
+
288
+ .. figure:: /_static/images/yolo3_outformat.png
289
+
290
+
291
+ '''
292
+ #
293
+ # Step 1: Preprocess the Frame
294
+ # - Create a blob (binary large object) from the input frame with the
295
+ # specified dimensions
296
+ # - Normalize pixel values to a range of 0 to 1
297
+ # - Specify the dimensions of the input layer of the YOLOv3 model
298
+ # - Swap Red and Blue channels (BGR to RGB)
299
+ # - Set crop to False to preserve the original aspect ratio
300
+ ##
301
+ blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (MODEL_SIZE[0], MODEL_SIZE[1]), swapRB = True, crop = False)
302
+
303
+ #
304
+ # Step 2: Set Input to the YOLOv3 Model and Perform Forward Pass
305
+ # - Set the blob as the input to the YOLOv3 model
306
+ # - Get the names of the output layers of the model
307
+ # - Perform a forward pass through the model to obtain detections
308
+ ##
309
+ self.net.setInput(blob)
310
+ detections = self.net.forward(self.ln)
311
+
312
+ #
313
+ # Step 3: Extract Detected Objects
314
+ # - Iterate through the detected objects in the forward pass results
315
+ # - Filter objects based on confidence threshold
316
+ # - Calculate bounding box coordinates and convert to integers
317
+ # - Append bounding box coordinates and class labels to respective lists
318
+ ##
319
+ raw = []
320
+ boxes = []
321
+ classIDs = []
322
+ confidences = []
323
+ h, w = frame.shape[:2]
324
+
325
+ for detection in detections:
326
+ for d in detection:
327
+
328
+ scores = d[5:]
329
+ classID = np.argmax(scores)
330
+ confidence = scores[classID]
331
+
332
+ if scores[classID] > self._confidence_th: # Adjust the confidence threshold as needed
333
+
334
+ box = d[:4] * [w, h, w, h] # relative (xc, yc, w, h) to pixels
335
+ # (center_x, center_y, width, height) = box.astype('int')
336
+
337
+ x = box[0] - box[2] / 2 # top left x
338
+ y = box[1] - box[3] / 2 # top left y
339
+
340
+ boxes.append([x, y, box[2], box[3]]) # top left x, top left y, width, height
341
+ confidences.append(float(confidence))
342
+ classIDs.append(classID)
343
+ raw.append(d[:4])
344
+
345
+
346
+ indices = cv2.dnn.NMSBoxes(boxes, confidences, self._confidence_th, self._nms_th)
347
+
348
+ box_out = []
349
+ class_out = []
350
+ points_out = []
351
+
352
+
353
+ if len(indices) > 0:
354
+ for i in indices.flatten():
355
+ # (x, y) = (boxes[i][0], boxes[i][1])
356
+ # (w, h) = (boxes[i][2], boxes[i][3])
357
+ # x top left, y top left, x bottom right, y bottom right
358
+ box_out.append([boxes[i][0], boxes[i][1], boxes[i][0] + boxes[i][2], boxes[i][1] + boxes[i][3]])
359
+
360
+ class_out.append(self.class_names[classIDs[i]])
361
+
362
+ points_out.append(fdatapoint(raw[i], self.class_names[classIDs[i]], (w, h)))
363
+
364
+ box_out = np.array(box_out)
365
+
366
+ return points_out # box_out, class_out,
367
+
368
+
369
+
@@ -19,7 +19,7 @@ CONFIDENCE_THRESHOLD = 0.5
19
19
  class yolo():
20
20
 
21
21
  '''
22
- The yolo_detector class is a wrapper for object detection using the YOLO
22
+ The yolo3_detector class is a wrapper for object detection using the YOLO
23
23
  (You Only Look Once) model.
24
24
 
25
25
  1. __init__(self, height=0, width=0):
@@ -42,7 +42,7 @@ class yolo():
42
42
 
43
43
 
44
44
 
45
- The yolo_detector class encapsulates the functionality of object detection using the YOLO model,
45
+ The yolo3_detector class encapsulates the functionality of object detection using the YOLO model,
46
46
  providing methods to perform detection, extract measurements,
47
47
  and visualize the results on the frame. The class works in conjunction
48
48
  with the YOLO model, which is loaded externally and used for the actual detection process.
@@ -54,7 +54,7 @@ class yolo():
54
54
  def __init__(self, model, height = 0, width = 0):
55
55
  self.model = model # load_model(modelpath, compile = False)
56
56
 
57
- with open(os.path.join(os.getcwd(), 'c4dynamics', 'src', 'main', 'resources', 'detectors', 'yolo', 'v3', 'coco.names'), 'r') as f:
57
+ with open(os.path.join(os.getcwd(), 'c4dynamics', 'resources', 'detectors', 'yolo', 'v3', 'coco.names'), 'r') as f:
58
58
  self.class_names = f.read().splitlines()
59
59
 
60
60
  self.width = width
@@ -15,9 +15,9 @@ class e_kalman:
15
15
  # H = 0 # measurement matrix
16
16
  # R = 0 # measurement noise matrix
17
17
 
18
- tau = 0
18
+ # tau = 0
19
19
 
20
- def __init__(obj, x0, p0noise, tau): # vp,
20
+ def __init__(obj, x0, p0noise, dt): # vp,
21
21
  ''' '''
22
22
 
23
23
  obj.x = np.reshape(x0, ((3, 1)))
@@ -37,7 +37,7 @@ class e_kalman:
37
37
  obj.H = np.zeros((n))
38
38
  obj.H[0] = 1
39
39
  obj.R = p0noise[0]**2
40
- obj.tau = tau
40
+ obj.dt = dt
41
41
 
42
42
 
43
43
 
@@ -51,7 +51,7 @@ class e_kalman:
51
51
  b input matrix
52
52
  u control input
53
53
  '''
54
- obj.x = obj.x + f(obj.x) * obj.tau
54
+ obj.x = obj.x + f(obj.x) * obj.dt
55
55
  obj.P = np.linalg.multi_dot([Phi, obj.P, Phi.T]) + Q
56
56
 
57
57
  return obj.x
@@ -1,42 +1,58 @@
1
1
  import numpy as np
2
2
 
3
3
  class kalman:
4
- # x = 0 # state vector.
5
- # P = 0 # covariance matrix
6
- # Q = 0 # process noise matrix
7
- # H = 0 # measurement matrix
8
- # R = 0 # measurement noise matrix
9
-
10
- def __init__(obj, x0, P0, A, H, Q, R, b = None):
4
+ # x = 0 # state vector.
5
+ # P = 0 # covariance matrix, nxn
6
+ # Q = 0 # process noise covariance matrix, nxn
7
+ # H = 0 # measurement matrix, pxn
8
+ # R = 0 # measurement noise covariance matrix, pxp
11
9
 
12
- obj.x = x0
13
- obj.P = P0 # Initial error covariance matrix
14
- obj.A = A # State transition matrix
15
- obj.H = H # Measurement matrix
16
- obj.Q = Q # Process noise covariance matrix
17
- obj.R = R # Measurement noise covariance matrix
18
-
19
- obj.b = b
10
+ Kinf = None
11
+
12
+ def __init__(self, P0, A, H, Q, R, dt, b = None, steadystate = False):
13
+ self.P = P0 # Initial error covariance matrix
14
+ self.A = A # State transition matrix
15
+ self.H = H # Measurement matrix
16
+ self.Q = Q # Process noise covariance matrix
17
+ self.R = R # Measurement noise covariance matrix
18
+ self.dt = dt
19
+ self.b = b
20
+ self.F = np.eye(len(A)) + dt * A
21
+
22
+ if steadystate:
23
+ R_inv = np.linalg.inv(R)
24
+ # Compute the solution to the Riccati equation
25
+ P = np.linalg.solve(-(A.T @ R_inv @ H.T @ H @ A) + A.T @ R_inv @ A, -Q)
26
+ self.Kinf = P @ H.T @ R_inv
27
+
20
28
 
21
- def predict(obj, u = None):
29
+ def predict(self, x, u = None):
22
30
  #
23
- # Predict step
31
+ # Predict
24
32
  ##
25
- obj.x = obj.A @ obj.x
33
+ x = self.F @ x
26
34
 
27
- if u is not None:
28
- obj.x += obj.B @ u
35
+ if u is not None and self.b is not None:
36
+ x += self.b @ u
29
37
 
30
- obj.P = obj.A @ obj.P @ obj.A.T + obj.Q
38
+ if self.Kinf is not None:
39
+ self.P = self.F @ self.P @ self.F.T + self.Q
40
+
41
+ return x
31
42
 
32
43
 
33
- def correct(obj, z):
44
+ def correct(self, x, z):
34
45
  #
35
- # Correct step
46
+ # Correct
47
+ # // assumes called immediately after a predict.
36
48
  ##
37
- K = obj.P @ obj.H.T @ np.linalg.inv(obj.H @ obj.P @ obj.H.T + obj.R)
38
- obj.x += K @ (z - obj.H @ obj.x)
39
- obj.P = obj.P - K @ obj.H @ obj.P
49
+ if self.Kinf is None:
50
+ K = self.P @ self.H.T @ np.linalg.inv(self.H @ self.P @ self.H.T + self.R)
51
+ self.P = self.P - K @ self.H @ self.P
52
+
53
+ x += K @ (z - self.H @ x)
54
+
40
55
 
56
+ return x
41
57
 
42
58
 
@@ -83,7 +83,7 @@ class seeker(c4d.rigidbody):
83
83
  bias = bias.std \\cdot randn
84
84
 
85
85
  The `bias.std` has a default value of `0.1deg` and it can
86
- be set by the **kwargs at the stage of constructing the seeker instance:
86
+ be set by the \**kwargs at the stage of constructing the seeker instance:
87
87
 
88
88
  .. code::
89
89
 
@@ -153,7 +153,7 @@ class seeker(c4d.rigidbody):
153
153
  scalefactor = scalefactor.std \\cdot randn
154
154
 
155
155
  The `scalefactor.std` has a default value of 0.05 and it can
156
- be set by the **kwargs at the stage of constructing the seeker instance:
156
+ be set by the \**kwargs at the stage of constructing the seeker instance:
157
157
 
158
158
  .. code::
159
159
 
@@ -8,7 +8,7 @@ TXTCOLORS = { 'k': '30', 'black': '30'
8
8
  , 'w': '37', 'white': '37'
9
9
  }
10
10
 
11
- def print(txt, color = 'white', bold = False, italic = False):
11
+ def cprint(txt, color = 'white', bold = False, italic = False):
12
12
 
13
13
  settxt = '\033['
14
14
 
@@ -19,5 +19,5 @@ def print(txt, color = 'white', bold = False, italic = False):
19
19
 
20
20
  settxt += TXTCOLORS[color]
21
21
 
22
- print(settxt + 'm' + txt + '\033[0m')
22
+ print(settxt + 'm' + str(txt) + '\033[0m')
23
23
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: c4dynamics
3
- Version: 1.0.80
3
+ Version: 1.1.0
4
4
  Summary: The framework for algorithms engineering with Python.
5
5
  Author: C4dynamics
6
6
  Author-email: zivmeri@gmail.com
@@ -0,0 +1,38 @@
1
+ c4dynamics/__init__.py,sha256=jRGCKDenpYKlbCL60cstxocenk5AXjavhKowj4KR9vo,1526
2
+ c4dynamics/body/__init__.py,sha256=JLUVlqcSCqjMAe9fzcdTNCwtay6FJnJfTNpEtJTiaM8,16
3
+ c4dynamics/body/datapoint.py,sha256=Nvqezfi1m1BbPHvqJkh15pJ1Yc4s80j1uz1IQYsA22o,39764
4
+ c4dynamics/body/rigidbody.py,sha256=QDWE2W8HAs4TBKXa5oOgwiHQiD8I1Uid0XoQEmEtggY,7683
5
+ c4dynamics/detectors/__init__.py,sha256=AKLw6rH3wsPC7zLD3qAnytDy9W4XvcsfllJyDobkFa4,82
6
+ c4dynamics/detectors/yolo3_opencv.py,sha256=AMJ89ZlxN3frjInteohkJkOOkMh2s7M2pZHr75kBUYw,14400
7
+ c4dynamics/detectors/yolo3_tf.py,sha256=sSm0yBsL7qrodlnK0N6mL1EuyMgiPJt1kaDsrxXvTSk,5040
8
+ c4dynamics/eqm/__init__.py,sha256=Lq5AYYI9wTF5aPVV2CePIDdb-aS4SmqC8_5bKsKDxNk,9193
9
+ c4dynamics/eqm/derivs.py,sha256=rnC7Y_IHakA-S1m9WjhvSOlC0ISbGeQCzU7qogos6FI,4150
10
+ c4dynamics/eqm/integrate.py,sha256=ppVu1y0OYa5GI_KZESPQCM4Oky1fSJNOHwW7glOuA9E,9193
11
+ c4dynamics/filters/__init__.py,sha256=KGgAM_W4SLfv7yw6TfzytruHuwbMLXAlMxLzalgJlb4,643
12
+ c4dynamics/filters/e_kalman.py,sha256=3ELC3iXdo6ItI041IZqJO2EdNn8lmPmYNav8jaUemlU,2421
13
+ c4dynamics/filters/filtertype.py,sha256=9Tbyd_IpRrqXfzs_mLTvDSw3NPxb_Tue_6DyLBZs-ZI,188
14
+ c4dynamics/filters/kalman.py,sha256=E-z5TMVIdgqXKhYCEzFWlMNR2HEieAPBR_f5OdAYXMs,1681
15
+ c4dynamics/filters/lowpass.py,sha256=Z33-eWaIyFdmWUj2KjEaaD6jznt0bZjC5PsF9H6RvlM,473
16
+ c4dynamics/filters/luenberger.py,sha256=0S5v9tQBLlDb1IWT0nhFx3xG8f03SB_9BhoD_hgjxq8,2295
17
+ c4dynamics/resources/detectors/yolo/v3/coco.names,sha256=DzbNM53Ej4TWW02YPcDfglnbOo7Kuq6TpSDQGtMP9RA,703
18
+ c4dynamics/resources/detectors/yolo/v3/yolov3.cfg,sha256=4mlbYgIt7E1oZJzoH3Wi-cKOx1-yPIlDL5RQHNqJ6N8,9129
19
+ c4dynamics/resources/detectors/yolo/v3/yolov3.weights,sha256=Uj5OaeHQFTk6GwpEHO8dnHZZ4-stfhX3k_Bgohsy8pc,248007048
20
+ c4dynamics/rotmat/__init__.py,sha256=x0BVKfGVlV9uYENxRU3xWyyvKCFbN0-aUjZ4XJi10PE,3376
21
+ c4dynamics/rotmat/rotmat.py,sha256=aRbfBkMlLKFiiZ2cPxYxWQsDJ9lnJlyCcwHHAxBloDU,6052
22
+ c4dynamics/sensors/__init__.py,sha256=EFgElESGRoP0qO_iyh0LeEFYbYnUt0dwDeVtPRRa4e4,654
23
+ c4dynamics/sensors/lineofsight.py,sha256=4pkjXij60BXUkExAR1u3bQ6Dh8ZPsNoW7dynSNldF3E,2081
24
+ c4dynamics/sensors/radar.py,sha256=xfBSryrzGi9PL8hjdFSS-t9wB2sWEFjffYUwSPHlGH8,4052
25
+ c4dynamics/sensors/seeker.py,sha256=fDS_toUij_NNne8Lbx7taMenb9bGwYl1cAqu5dpWpz0,8091
26
+ c4dynamics/utils/__init__.py,sha256=iRj7jQfEn3Bir_B8VTaeUPfm-X3JbyYYknqgmYYIu1o,155
27
+ c4dynamics/utils/const.py,sha256=mgQu-GCWxLJYwEOrDJzQK01m95t05U69ge8q3RGgZIc,420
28
+ c4dynamics/utils/cprint.py,sha256=4XVU8t74Ti6ykYI9NVkZ6Lz3cRiDWZ4A2F9xOxVu8dw,665
29
+ c4dynamics/utils/gen_gif.py,sha256=eGrAyFLH7jx54SrX3zFAyOa74tlzDC3n6VSw-y8COVg,574
30
+ c4dynamics/utils/gif_tools.py,sha256=CSGDqm2_zIf1n1U8DCUnxg2D-TpvHF-t0r8X827MLWs,3336
31
+ c4dynamics/utils/images_loader.py,sha256=ihMtzy1__BV1gvH1Tz6EDJfahpgGV8hn-61Lhx1x0ws,1568
32
+ c4dynamics/utils/import_subdirs.py,sha256=e7M039yyRAoDEu0rEaE02MJpi_oQIaNIIMXi808gWhU,1796
33
+ c4dynamics/utils/math.py,sha256=esVQQGZST53raCPJNS5m1dBS6yqtbbDKzM0qZ32XQac,798
34
+ c4dynamics/utils/tictoc.py,sha256=3Bnp8jSvizfwe-djhjNok7eZKw_ACYiRPGnC3GklFn0,189
35
+ c4dynamics-1.1.0.dist-info/METADATA,sha256=crarHBNs7Y8h0kDo-9e4k6Qu31r5GDTNh4R4eDzMYig,8963
36
+ c4dynamics-1.1.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
37
+ c4dynamics-1.1.0.dist-info/top_level.txt,sha256=Pp3jXXljS2HbUDGItCnMueDc-7dI8pYDObXNtBhZVG0,11
38
+ c4dynamics-1.1.0.dist-info/RECORD,,