valor-lite 0.33.18__py3-none-any.whl → 0.34.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of valor-lite might be problematic. Click here for more details.

@@ -212,7 +212,7 @@ def _count_with_examples(
212
212
  data: NDArray[np.float64],
213
213
  unique_idx: int | list[int],
214
214
  label_idx: int | list[int],
215
- ) -> tuple[NDArray[np.float64], NDArray[np.int32], NDArray[np.int32]]:
215
+ ) -> tuple[NDArray[np.float64], NDArray[np.int32], NDArray[np.intp]]:
216
216
  """
217
217
  Helper function for counting occurences of unique detailed pairs.
218
218
 
@@ -231,7 +231,7 @@ def _count_with_examples(
231
231
  Examples drawn from the data input.
232
232
  NDArray[np.int32]
233
233
  Unique label indices.
234
- NDArray[np.int32]
234
+ NDArray[np.intp]
235
235
  Counts for each unique label index.
236
236
  """
237
237
  unique_rows, indices = np.unique(
@@ -282,18 +282,20 @@ def compute_confusion_matrix(
282
282
  NDArray[np.float64]
283
283
  Confusion matrix.
284
284
  NDArray[np.int32]
285
- Ground truths with missing predictions.
285
+ Unmatched Ground Truths.
286
286
  """
287
287
 
288
288
  n_labels = label_metadata.shape[0]
289
289
  n_scores = score_thresholds.shape[0]
290
290
 
291
- confusion_matrix = -1 * np.ones(
291
+ confusion_matrix = np.full(
292
292
  (n_scores, n_labels, n_labels, 2 * n_examples + 1),
293
+ fill_value=-1.0,
293
294
  dtype=np.float32,
294
295
  )
295
- missing_predictions = -1 * np.ones(
296
+ unmatched_ground_truths = np.full(
296
297
  (n_scores, n_labels, n_examples + 1),
298
+ fill_value=-1,
297
299
  dtype=np.int32,
298
300
  )
299
301
 
@@ -339,7 +341,7 @@ def compute_confusion_matrix(
339
341
  score_idx, misclf_labels[:, 0], misclf_labels[:, 1], 0
340
342
  ] = misclf_counts
341
343
 
342
- missing_predictions[score_idx, misprd_labels, 0] = misprd_counts
344
+ unmatched_ground_truths[score_idx, misprd_labels, 0] = misprd_counts
343
345
 
344
346
  if n_examples > 0:
345
347
  for label_idx in range(n_labels):
@@ -375,16 +377,16 @@ def compute_confusion_matrix(
375
377
  1 : 2 * misclf_label_examples.shape[0] + 1,
376
378
  ] = misclf_label_examples[:, [0, 3]].flatten()
377
379
 
378
- # missing prediction examples
380
+ # unmatched ground truth examples
379
381
  mask_misprd_label = misprd_examples[:, 1] == label_idx
380
382
  if misprd_examples.size > 0:
381
383
  misprd_label_examples = misprd_examples[mask_misprd_label][
382
384
  :n_examples
383
385
  ]
384
- missing_predictions[
386
+ unmatched_ground_truths[
385
387
  score_idx,
386
388
  label_idx,
387
389
  1 : misprd_label_examples.shape[0] + 1,
388
390
  ] = misprd_label_examples[:, 0].flatten()
389
391
 
390
- return confusion_matrix, missing_predictions
392
+ return confusion_matrix, unmatched_ground_truths # type: ignore[reportReturnType]
@@ -4,6 +4,7 @@ from dataclasses import dataclass
4
4
  import numpy as np
5
5
  from numpy.typing import NDArray
6
6
  from tqdm import tqdm
7
+
7
8
  from valor_lite.classification.annotation import Classification
8
9
  from valor_lite.classification.computation import (
9
10
  compute_confusion_matrix,
@@ -38,7 +39,7 @@ filtered_metrics = evaluator.evaluate(filter_mask=filter_mask)
38
39
 
39
40
  @dataclass
40
41
  class Filter:
41
- indices: NDArray[np.int32]
42
+ indices: NDArray[np.intp]
42
43
  label_metadata: NDArray[np.int32]
43
44
  n_datums: int
44
45
 
@@ -169,8 +170,7 @@ class Evaluator:
169
170
  label_metadata_per_datum = self._label_metadata_per_datum.copy()
170
171
  label_metadata_per_datum[:, ~mask] = 0
171
172
 
172
- label_metadata = np.zeros_like(self._label_metadata, dtype=np.int32)
173
- label_metadata = np.transpose(
173
+ label_metadata: NDArray[np.int32] = np.transpose(
174
174
  np.sum(
175
175
  label_metadata_per_datum,
176
176
  axis=1,
@@ -321,7 +321,7 @@ class Metric(BaseMetric):
321
321
  ],
322
322
  ],
323
323
  ],
324
- missing_predictions: dict[
324
+ unmatched_ground_truths: dict[
325
325
  str, # ground truth label value
326
326
  dict[
327
327
  str, # either `count` or `examples`
@@ -335,8 +335,8 @@ class Metric(BaseMetric):
335
335
  The confusion matrix and related metrics for the classification task.
336
336
 
337
337
  This class encapsulates detailed information about the model's performance, including correct
338
- predictions, misclassifications, hallucinations (false positives), and missing predictions
339
- (false negatives). It provides counts and examples for each category to facilitate in-depth analysis.
338
+ predictions, misclassifications, unmatched predictions (subset of false positives), and unmatched ground truths
339
+ (subset of false negatives). It provides counts and examples for each category to facilitate in-depth analysis.
340
340
 
341
341
  Confusion Matrix Structure:
342
342
  {
@@ -358,7 +358,7 @@ class Metric(BaseMetric):
358
358
  ...
359
359
  }
360
360
 
361
- Missing Prediction Structure:
361
+ Unmatched Ground Truths Structure:
362
362
  {
363
363
  ground_truth_label: {
364
364
  'count': int,
@@ -379,7 +379,7 @@ class Metric(BaseMetric):
379
379
  A nested dictionary where the first key is the ground truth label value, the second key
380
380
  is the prediction label value, and the innermost dictionary contains either a `count`
381
381
  or a list of `examples`. Each example includes the datum UID and prediction score.
382
- missing_predictions : dict
382
+ unmatched_ground_truths : dict
383
383
  A dictionary where each key is a ground truth label value for which the model failed to predict
384
384
  (false negatives). The value is a dictionary containing either a `count` or a list of `examples`.
385
385
  Each example includes the datum UID.
@@ -396,7 +396,7 @@ class Metric(BaseMetric):
396
396
  type=MetricType.ConfusionMatrix.value,
397
397
  value={
398
398
  "confusion_matrix": confusion_matrix,
399
- "missing_predictions": missing_predictions,
399
+ "unmatched_ground_truths": unmatched_ground_truths,
400
400
  },
401
401
  parameters={
402
402
  "score_threshold": score_threshold,
@@ -2,6 +2,7 @@ from collections import defaultdict
2
2
 
3
3
  import numpy as np
4
4
  from numpy.typing import NDArray
5
+
5
6
  from valor_lite.classification.metric import Metric, MetricType
6
7
 
7
8
 
@@ -153,20 +154,20 @@ def _unpack_confusion_matrix_value(
153
154
  }
154
155
 
155
156
 
156
- def _unpack_missing_predictions_value(
157
- missing_predictions: NDArray[np.int32],
157
+ def _unpack_unmatched_ground_truths_value(
158
+ unmatched_ground_truths: NDArray[np.int32],
158
159
  number_of_labels: int,
159
160
  number_of_examples: int,
160
161
  index_to_uid: dict[int, str],
161
162
  index_to_label: dict[int, str],
162
163
  ) -> dict[str, dict[str, int | list[dict[str, str]]]]:
163
164
  """
164
- Unpacks a numpy array of missing prediction counts and examples.
165
+ Unpacks a numpy array of unmatched ground truth counts and examples.
165
166
  """
166
167
 
167
168
  datum_idx = (
168
169
  lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
169
- missing_predictions[
170
+ unmatched_ground_truths[
170
171
  gt_label_idx,
171
172
  example_idx + 1,
172
173
  ]
@@ -176,7 +177,7 @@ def _unpack_missing_predictions_value(
176
177
  return {
177
178
  index_to_label[gt_label_idx]: {
178
179
  "count": max(
179
- int(missing_predictions[gt_label_idx, 0]),
180
+ int(unmatched_ground_truths[gt_label_idx, 0]),
180
181
  0,
181
182
  ),
182
183
  "examples": [
@@ -197,7 +198,7 @@ def unpack_confusion_matrix_into_metric_list(
197
198
  index_to_label: dict[int, str],
198
199
  ) -> list[Metric]:
199
200
 
200
- (confusion_matrix, missing_predictions) = results
201
+ (confusion_matrix, unmatched_ground_truths) = results
201
202
  n_scores, n_labels, _, _ = confusion_matrix.shape
202
203
  return [
203
204
  Metric.confusion_matrix(
@@ -210,8 +211,10 @@ def unpack_confusion_matrix_into_metric_list(
210
211
  index_to_label=index_to_label,
211
212
  index_to_uid=index_to_uid,
212
213
  ),
213
- missing_predictions=_unpack_missing_predictions_value(
214
- missing_predictions=missing_predictions[score_idx, :, :],
214
+ unmatched_ground_truths=_unpack_unmatched_ground_truths_value(
215
+ unmatched_ground_truths=unmatched_ground_truths[
216
+ score_idx, :, :
217
+ ],
215
218
  number_of_labels=n_labels,
216
219
  number_of_examples=number_of_examples,
217
220
  index_to_label=index_to_label,
@@ -381,9 +381,9 @@ def compute_precion_recall(
381
381
  _, indices_gt_unique = np.unique(
382
382
  tp_candidates[:, [0, 1, 4]], axis=0, return_index=True
383
383
  )
384
- mask_gt_unique = np.zeros(tp_candidates.shape[0], dtype=bool)
384
+ mask_gt_unique = np.zeros(tp_candidates.shape[0], dtype=np.bool_)
385
385
  mask_gt_unique[indices_gt_unique] = True
386
- true_positives_mask = np.zeros(n_rows, dtype=bool)
386
+ true_positives_mask = np.zeros(n_rows, dtype=np.bool_)
387
387
  true_positives_mask[mask_tp_inner] = mask_gt_unique
388
388
 
389
389
  # calculate intermediates
@@ -452,9 +452,9 @@ def compute_precion_recall(
452
452
  _, indices_gt_unique = np.unique(
453
453
  tp_candidates[:, [0, 1, 4]], axis=0, return_index=True
454
454
  )
455
- mask_gt_unique = np.zeros(tp_candidates.shape[0], dtype=bool)
455
+ mask_gt_unique = np.zeros(tp_candidates.shape[0], dtype=np.bool_)
456
456
  mask_gt_unique[indices_gt_unique] = True
457
- true_positives_mask = np.zeros(n_rows, dtype=bool)
457
+ true_positives_mask = np.zeros(n_rows, dtype=np.bool_)
458
458
  true_positives_mask[mask_tp_outer] = mask_gt_unique
459
459
 
460
460
  # count running tp and total for AP
@@ -501,8 +501,8 @@ def compute_precion_recall(
501
501
  )
502
502
 
503
503
  # calculate average precision
504
- running_max_precision = np.zeros((n_ious, n_labels))
505
- running_max_score = np.zeros((n_labels))
504
+ running_max_precision = np.zeros((n_ious, n_labels), dtype=np.float64)
505
+ running_max_score = np.zeros((n_labels), dtype=np.float64)
506
506
  for recall in range(100, -1, -1):
507
507
 
508
508
  # running max precision
@@ -528,8 +528,12 @@ def compute_precion_recall(
528
528
 
529
529
  # calculate mAP and mAR
530
530
  if unique_pd_labels.size > 0:
531
- mAP = average_precision[:, unique_pd_labels].mean(axis=1)
532
- mAR = average_recall[:, unique_pd_labels].mean(axis=1)
531
+ mAP: NDArray[np.float64] = average_precision[:, unique_pd_labels].mean(
532
+ axis=1
533
+ )
534
+ mAR: NDArray[np.float64] = average_recall[:, unique_pd_labels].mean(
535
+ axis=1
536
+ )
533
537
  else:
534
538
  mAP = np.zeros(n_ious, dtype=np.float64)
535
539
  mAR = np.zeros(n_scores, dtype=np.float64)
@@ -561,14 +565,14 @@ def compute_precion_recall(
561
565
  accuracy,
562
566
  counts,
563
567
  pr_curve,
564
- )
568
+ ) # type: ignore[reportReturnType]
565
569
 
566
570
 
567
571
  def _count_with_examples(
568
572
  data: NDArray[np.float64],
569
573
  unique_idx: int | list[int],
570
574
  label_idx: int | list[int],
571
- ) -> tuple[NDArray[np.float64], NDArray[np.int32], NDArray[np.int32]]:
575
+ ) -> tuple[NDArray[np.float64], NDArray[np.int32], NDArray[np.intp]]:
572
576
  """
573
577
  Helper function for counting occurences of unique detailed pairs.
574
578
 
@@ -587,7 +591,7 @@ def _count_with_examples(
587
591
  Examples drawn from the data input.
588
592
  NDArray[np.int32]
589
593
  Unique label indices.
590
- NDArray[np.int32]
594
+ NDArray[np.intp]
591
595
  Counts for each unique label index.
592
596
  """
593
597
  unique_rows, indices = np.unique(
@@ -669,9 +673,9 @@ def compute_confusion_matrix(
669
673
  NDArray[np.float64]
670
674
  Confusion matrix.
671
675
  NDArray[np.float64]
672
- Hallucinations.
676
+ Unmatched Predictions.
673
677
  NDArray[np.int32]
674
- Missing Predictions.
678
+ Unmatched Ground Truths.
675
679
  """
676
680
 
677
681
  n_labels = label_metadata.shape[0]
@@ -683,12 +687,12 @@ def compute_confusion_matrix(
683
687
  (n_ious, n_scores, n_labels, n_labels, 4 * n_examples + 1),
684
688
  dtype=np.float32,
685
689
  )
686
- hallucinations = -1 * np.ones(
690
+ unmatched_predictions = -1 * np.ones(
687
691
  # (datum idx, pd idx, pd score) * n_examples + count
688
692
  (n_ious, n_scores, n_labels, 3 * n_examples + 1),
689
693
  dtype=np.float32,
690
694
  )
691
- missing_predictions = -1 * np.ones(
695
+ unmatched_ground_truths = -1 * np.ones(
692
696
  # (datum idx, gt idx) * n_examples + count
693
697
  (n_ious, n_scores, n_labels, 2 * n_examples + 1),
694
698
  dtype=np.int32,
@@ -793,7 +797,7 @@ def compute_confusion_matrix(
793
797
  data[mask_misclf], unique_idx=[0, 1, 2, 4, 5], label_idx=[3, 4]
794
798
  )
795
799
 
796
- # count hallucinations
800
+ # count unmatched predictions
797
801
  (
798
802
  halluc_examples,
799
803
  halluc_labels,
@@ -802,7 +806,7 @@ def compute_confusion_matrix(
802
806
  data[mask_halluc], unique_idx=[0, 2, 5], label_idx=2
803
807
  )
804
808
 
805
- # count missing predictions
809
+ # count unmatched ground truths
806
810
  (
807
811
  misprd_examples,
808
812
  misprd_labels,
@@ -822,13 +826,13 @@ def compute_confusion_matrix(
822
826
  misclf_labels[:, 1],
823
827
  0,
824
828
  ] = misclf_counts
825
- hallucinations[
829
+ unmatched_predictions[
826
830
  iou_idx,
827
831
  score_idx,
828
832
  halluc_labels,
829
833
  0,
830
834
  ] = halluc_counts
831
- missing_predictions[
835
+ unmatched_ground_truths[
832
836
  iou_idx,
833
837
  score_idx,
834
838
  misprd_labels,
@@ -877,26 +881,26 @@ def compute_confusion_matrix(
877
881
  :, [0, 1, 2, 6]
878
882
  ].flatten()
879
883
 
880
- # hallucination examples
884
+ # unmatched prediction examples
881
885
  mask_halluc_label = halluc_examples[:, 5] == label_idx
882
886
  if mask_halluc_label.sum() > 0:
883
887
  halluc_label_examples = halluc_examples[
884
888
  mask_halluc_label
885
889
  ][:n_examples]
886
- hallucinations[
890
+ unmatched_predictions[
887
891
  iou_idx,
888
892
  score_idx,
889
893
  label_idx,
890
894
  1 : 3 * halluc_label_examples.shape[0] + 1,
891
895
  ] = halluc_label_examples[:, [0, 2, 6]].flatten()
892
896
 
893
- # missing prediction examples
897
+ # unmatched ground truth examples
894
898
  mask_misprd_label = misprd_examples[:, 4] == label_idx
895
899
  if misprd_examples.size > 0:
896
900
  misprd_label_examples = misprd_examples[
897
901
  mask_misprd_label
898
902
  ][:n_examples]
899
- missing_predictions[
903
+ unmatched_ground_truths[
900
904
  iou_idx,
901
905
  score_idx,
902
906
  label_idx,
@@ -905,6 +909,6 @@ def compute_confusion_matrix(
905
909
 
906
910
  return (
907
911
  confusion_matrix,
908
- hallucinations,
909
- missing_predictions,
910
- )
912
+ unmatched_predictions,
913
+ unmatched_ground_truths,
914
+ ) # type: ignore[reportReturnType]
@@ -4,6 +4,7 @@ from dataclasses import dataclass
4
4
  import numpy as np
5
5
  from numpy.typing import NDArray
6
6
  from tqdm import tqdm
7
+
7
8
  from valor_lite.object_detection.annotation import Detection
8
9
  from valor_lite.object_detection.computation import (
9
10
  compute_bbox_iou,
@@ -42,8 +43,8 @@ filtered_metrics = evaluator.evaluate(iou_thresholds=[0.5], filter_mask=filter_m
42
43
 
43
44
  @dataclass
44
45
  class Filter:
45
- ranked_indices: NDArray[np.int32]
46
- detailed_indices: NDArray[np.int32]
46
+ ranked_indices: NDArray[np.intp]
47
+ detailed_indices: NDArray[np.intp]
47
48
  label_metadata: NDArray[np.int32]
48
49
 
49
50
 
@@ -569,7 +570,8 @@ class DataLoader:
569
570
  [gt.extrema, pd.extrema]
570
571
  for pd in detection.predictions
571
572
  for gt in detection.groundtruths
572
- ]
573
+ ],
574
+ dtype=np.float64,
573
575
  )
574
576
  ).reshape(len(detection.predictions), len(detection.groundtruths))
575
577
  for detection in detections
@@ -619,7 +619,7 @@ class Metric(BaseMetric):
619
619
  ],
620
620
  ],
621
621
  ],
622
- hallucinations: dict[
622
+ unmatched_predictions: dict[
623
623
  str, # prediction label value
624
624
  dict[
625
625
  str, # either `count` or `examples`
@@ -636,7 +636,7 @@ class Metric(BaseMetric):
636
636
  ],
637
637
  ],
638
638
  ],
639
- missing_predictions: dict[
639
+ unmatched_ground_truths: dict[
640
640
  str, # ground truth label value
641
641
  dict[
642
642
  str, # either `count` or `examples`
@@ -660,8 +660,8 @@ class Metric(BaseMetric):
660
660
  Confusion matrix for object detection tasks.
661
661
 
662
662
  This class encapsulates detailed information about the model's performance, including correct
663
- predictions, misclassifications, hallucinations (false positives), and missing predictions
664
- (false negatives). It provides counts and examples for each category to facilitate in-depth analysis.
663
+ predictions, misclassifications, unmatched_predictions (subset of false positives), and unmatched ground truths
664
+ (subset of false negatives). It provides counts and examples for each category to facilitate in-depth analysis.
665
665
 
666
666
  Confusion Matrix Format:
667
667
  {
@@ -683,7 +683,7 @@ class Metric(BaseMetric):
683
683
  ...
684
684
  }
685
685
 
686
- Hallucinations Format:
686
+ Unmatched Predictions Format:
687
687
  {
688
688
  <prediction label>: {
689
689
  'count': int,
@@ -699,7 +699,7 @@ class Metric(BaseMetric):
699
699
  ...
700
700
  }
701
701
 
702
- Missing Prediction Format:
702
+ Unmatched Ground Truths Format:
703
703
  {
704
704
  <ground truth label>: {
705
705
  'count': int,
@@ -721,13 +721,13 @@ class Metric(BaseMetric):
721
721
  is the prediction label value, and the innermost dictionary contains either a `count`
722
722
  or a list of `examples`. Each example includes the datum UID, ground truth bounding box,
723
723
  predicted bounding box, and prediction scores.
724
- hallucinations : dict
724
+ unmatched_predictions : dict
725
725
  A dictionary where each key is a prediction label value with no corresponding ground truth
726
- (false positives). The value is a dictionary containing either a `count` or a list of
726
+ (subset of false positives). The value is a dictionary containing either a `count` or a list of
727
727
  `examples`. Each example includes the datum UID, predicted bounding box, and prediction score.
728
- missing_predictions : dict
728
+ unmatched_ground_truths : dict
729
729
  A dictionary where each key is a ground truth label value for which the model failed to predict
730
- (false negatives). The value is a dictionary containing either a `count` or a list of `examples`.
730
+ (subset of false negatives). The value is a dictionary containing either a `count` or a list of `examples`.
731
731
  Each example includes the datum UID and ground truth bounding box.
732
732
  score_threshold : float
733
733
  The confidence score threshold used to filter predictions.
@@ -744,8 +744,8 @@ class Metric(BaseMetric):
744
744
  type=MetricType.ConfusionMatrix.value,
745
745
  value={
746
746
  "confusion_matrix": confusion_matrix,
747
- "hallucinations": hallucinations,
748
- "missing_predictions": missing_predictions,
747
+ "unmatched_predictions": unmatched_predictions,
748
+ "unmatched_ground_truths": unmatched_ground_truths,
749
749
  },
750
750
  parameters={
751
751
  "score_threshold": score_threshold,
@@ -2,6 +2,7 @@ from collections import defaultdict
2
2
 
3
3
  import numpy as np
4
4
  from numpy.typing import NDArray
5
+
5
6
  from valor_lite.object_detection.metric import Metric, MetricType
6
7
 
7
8
 
@@ -136,10 +137,8 @@ def unpack_precision_recall_into_metric_lists(
136
137
 
137
138
  metrics[MetricType.PrecisionRecallCurve] = [
138
139
  Metric.precision_recall_curve(
139
- precisions=pr_curves[iou_idx, label_idx, :, 0]
140
- .astype(float)
141
- .tolist(),
142
- scores=pr_curves[iou_idx, label_idx, :, 1].astype(float).tolist(),
140
+ precisions=pr_curves[iou_idx, label_idx, :, 0].tolist(), # type: ignore[reportArgumentType]
141
+ scores=pr_curves[iou_idx, label_idx, :, 1].tolist(), # type: ignore[reportArgumentType]
143
142
  iou_threshold=iou_threshold,
144
143
  label=label,
145
144
  )
@@ -321,8 +320,8 @@ def _unpack_confusion_matrix_value(
321
320
  }
322
321
 
323
322
 
324
- def _unpack_hallucinations_value(
325
- hallucinations: NDArray[np.float64],
323
+ def _unpack_unmatched_predictions_value(
324
+ unmatched_predictions: NDArray[np.float64],
326
325
  number_of_labels: int,
327
326
  number_of_examples: int,
328
327
  index_to_uid: dict[int, str],
@@ -336,12 +335,12 @@ def _unpack_hallucinations_value(
336
335
  ],
337
336
  ]:
338
337
  """
339
- Unpacks a numpy array of hallucination counts and examples.
338
+ Unpacks a numpy array of unmatched_prediction counts and examples.
340
339
  """
341
340
 
342
341
  datum_idx = (
343
342
  lambda pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
344
- hallucinations[
343
+ unmatched_predictions[
345
344
  pd_label_idx,
346
345
  example_idx * 3 + 1,
347
346
  ]
@@ -350,7 +349,7 @@ def _unpack_hallucinations_value(
350
349
 
351
350
  prediction_idx = (
352
351
  lambda pd_label_idx, example_idx: int( # noqa: E731 - lambda fn
353
- hallucinations[
352
+ unmatched_predictions[
354
353
  pd_label_idx,
355
354
  example_idx * 3 + 2,
356
355
  ]
@@ -359,7 +358,7 @@ def _unpack_hallucinations_value(
359
358
 
360
359
  score_idx = (
361
360
  lambda pd_label_idx, example_idx: float( # noqa: E731 - lambda fn
362
- hallucinations[
361
+ unmatched_predictions[
363
362
  pd_label_idx,
364
363
  example_idx * 3 + 3,
365
364
  ]
@@ -369,7 +368,7 @@ def _unpack_hallucinations_value(
369
368
  return {
370
369
  index_to_label[pd_label_idx]: {
371
370
  "count": max(
372
- int(hallucinations[pd_label_idx, 0]),
371
+ int(unmatched_predictions[pd_label_idx, 0]),
373
372
  0,
374
373
  ),
375
374
  "examples": [
@@ -392,8 +391,8 @@ def _unpack_hallucinations_value(
392
391
  }
393
392
 
394
393
 
395
- def _unpack_missing_predictions_value(
396
- missing_predictions: NDArray[np.int32],
394
+ def _unpack_unmatched_ground_truths_value(
395
+ unmatched_ground_truths: NDArray[np.int32],
397
396
  number_of_labels: int,
398
397
  number_of_examples: int,
399
398
  index_to_uid: dict[int, str],
@@ -401,12 +400,12 @@ def _unpack_missing_predictions_value(
401
400
  groundtruth_examples: dict[int, NDArray[np.float16]],
402
401
  ) -> dict[str, dict[str, int | list[dict[str, str | dict[str, float]]]]]:
403
402
  """
404
- Unpacks a numpy array of missing prediction counts and examples.
403
+ Unpacks a numpy array of unmatched ground truth counts and examples.
405
404
  """
406
405
 
407
406
  datum_idx = (
408
407
  lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
409
- missing_predictions[
408
+ unmatched_ground_truths[
410
409
  gt_label_idx,
411
410
  example_idx * 2 + 1,
412
411
  ]
@@ -415,7 +414,7 @@ def _unpack_missing_predictions_value(
415
414
 
416
415
  groundtruth_idx = (
417
416
  lambda gt_label_idx, example_idx: int( # noqa: E731 - lambda fn
418
- missing_predictions[
417
+ unmatched_ground_truths[
419
418
  gt_label_idx,
420
419
  example_idx * 2 + 2,
421
420
  ]
@@ -425,7 +424,7 @@ def _unpack_missing_predictions_value(
425
424
  return {
426
425
  index_to_label[gt_label_idx]: {
427
426
  "count": max(
428
- int(missing_predictions[gt_label_idx, 0]),
427
+ int(unmatched_ground_truths[gt_label_idx, 0]),
429
428
  0,
430
429
  ),
431
430
  "examples": [
@@ -463,8 +462,8 @@ def unpack_confusion_matrix_into_metric_list(
463
462
  ) -> list[Metric]:
464
463
  (
465
464
  confusion_matrix,
466
- hallucinations,
467
- missing_predictions,
465
+ unmatched_predictions,
466
+ unmatched_ground_truths,
468
467
  ) = results
469
468
  n_labels = len(index_to_label)
470
469
  return [
@@ -481,16 +480,18 @@ def unpack_confusion_matrix_into_metric_list(
481
480
  groundtruth_examples=groundtruth_examples,
482
481
  prediction_examples=prediction_examples,
483
482
  ),
484
- hallucinations=_unpack_hallucinations_value(
485
- hallucinations=hallucinations[iou_idx, score_idx, :, :],
483
+ unmatched_predictions=_unpack_unmatched_predictions_value(
484
+ unmatched_predictions=unmatched_predictions[
485
+ iou_idx, score_idx, :, :
486
+ ],
486
487
  number_of_labels=n_labels,
487
488
  number_of_examples=number_of_examples,
488
489
  index_to_label=index_to_label,
489
490
  index_to_uid=index_to_uid,
490
491
  prediction_examples=prediction_examples,
491
492
  ),
492
- missing_predictions=_unpack_missing_predictions_value(
493
- missing_predictions=missing_predictions[
493
+ unmatched_ground_truths=_unpack_unmatched_ground_truths_value(
494
+ unmatched_ground_truths=unmatched_ground_truths[
494
495
  iou_idx, score_idx, :, :
495
496
  ],
496
497
  number_of_labels=n_labels,
@@ -98,9 +98,9 @@ def compute_metrics(
98
98
  NDArray[np.float64]
99
99
  Confusion matrix containing IOU values.
100
100
  NDArray[np.float64]
101
- Hallucination ratios.
101
+ Unmatched prediction ratios.
102
102
  NDArray[np.float64]
103
- Missing prediction ratios.
103
+ Unmatched ground truth ratios.
104
104
  """
105
105
  n_labels = label_metadata.shape[0]
106
106
  gt_counts = label_metadata[:, 0]
@@ -108,7 +108,7 @@ def compute_metrics(
108
108
 
109
109
  counts = data.sum(axis=0)
110
110
 
111
- # compute iou, missing_predictions and hallucinations
111
+ # compute iou, unmatched_ground_truth and unmatched predictions
112
112
  intersection_ = counts[1:, 1:]
113
113
  union_ = (
114
114
  gt_counts[:, np.newaxis] + pd_counts[np.newaxis, :] - intersection_
@@ -122,20 +122,20 @@ def compute_metrics(
122
122
  out=ious,
123
123
  )
124
124
 
125
- hallucination_ratio = np.zeros((n_labels), dtype=np.float64)
125
+ unmatched_prediction_ratio = np.zeros((n_labels), dtype=np.float64)
126
126
  np.divide(
127
127
  counts[0, 1:],
128
128
  pd_counts,
129
129
  where=pd_counts > 1e-9,
130
- out=hallucination_ratio,
130
+ out=unmatched_prediction_ratio,
131
131
  )
132
132
 
133
- missing_prediction_ratio = np.zeros((n_labels), dtype=np.float64)
133
+ unmatched_ground_truth_ratio = np.zeros((n_labels), dtype=np.float64)
134
134
  np.divide(
135
135
  counts[1:, 0],
136
136
  gt_counts,
137
137
  where=gt_counts > 1e-9,
138
- out=missing_prediction_ratio,
138
+ out=unmatched_ground_truth_ratio,
139
139
  )
140
140
 
141
141
  # compute precision, recall, f1
@@ -168,6 +168,6 @@ def compute_metrics(
168
168
  f1_score,
169
169
  accuracy,
170
170
  ious,
171
- hallucination_ratio,
172
- missing_prediction_ratio,
171
+ unmatched_prediction_ratio,
172
+ unmatched_ground_truth_ratio,
173
173
  )
@@ -4,6 +4,7 @@ from dataclasses import dataclass
4
4
  import numpy as np
5
5
  from numpy.typing import NDArray
6
6
  from tqdm import tqdm
7
+
7
8
  from valor_lite.semantic_segmentation.annotation import Segmentation
8
9
  from valor_lite.semantic_segmentation.computation import (
9
10
  compute_intermediate_confusion_matrices,
@@ -37,7 +38,7 @@ filtered_metrics = evaluator.evaluate(filter_mask=filter_mask)
37
38
 
38
39
  @dataclass
39
40
  class Filter:
40
- indices: NDArray[np.int32]
41
+ indices: NDArray[np.intp]
41
42
  label_metadata: NDArray[np.int32]
42
43
  n_pixels: int
43
44
 
@@ -209,11 +209,11 @@ class Metric(BaseMetric):
209
209
  dict[str, float], # iou
210
210
  ],
211
211
  ],
212
- hallucinations: dict[
212
+ unmatched_predictions: dict[
213
213
  str, # prediction label value
214
214
  dict[str, float], # pixel ratio
215
215
  ],
216
- missing_predictions: dict[
216
+ unmatched_ground_truths: dict[
217
217
  str, # ground truth label value
218
218
  dict[str, float], # pixel ratio
219
219
  ],
@@ -222,8 +222,8 @@ class Metric(BaseMetric):
222
222
  The confusion matrix and related metrics for semantic segmentation tasks.
223
223
 
224
224
  This class encapsulates detailed information about the model's performance, including correct
225
- predictions, misclassifications, hallucinations (false positives), and missing predictions
226
- (false negatives). It provides counts for each category to facilitate in-depth analysis.
225
+ predictions, misclassifications, unmatched_predictions (subset of false positives), and unmatched ground truths
226
+ (subset of false negatives). It provides counts for each category to facilitate in-depth analysis.
227
227
 
228
228
  Confusion Matrix Format:
229
229
  {
@@ -234,14 +234,14 @@ class Metric(BaseMetric):
234
234
  },
235
235
  }
236
236
 
237
- Hallucinations Format:
237
+ Unmatched Predictions Format:
238
238
  {
239
239
  <prediction label>: {
240
240
  'iou': <float>,
241
241
  },
242
242
  }
243
243
 
244
- Missing Predictions Format:
244
+ Unmatched Ground Truths Format:
245
245
  {
246
246
  <ground truth label>: {
247
247
  'iou': <float>,
@@ -253,10 +253,10 @@ class Metric(BaseMetric):
253
253
  confusion_matrix : dict
254
254
  Nested dictionaries representing the Intersection over Union (IOU) scores for each
255
255
  ground truth label and prediction label pair.
256
- hallucinations : dict
256
+ unmatched_predictions : dict
257
257
  Dictionary representing the pixel ratios for predicted labels that do not correspond
258
258
  to any ground truth labels (false positives).
259
- missing_predictions : dict
259
+ unmatched_ground_truths : dict
260
260
  Dictionary representing the pixel ratios for ground truth labels that were not predicted
261
261
  (false negatives).
262
262
 
@@ -268,8 +268,8 @@ class Metric(BaseMetric):
268
268
  type=MetricType.ConfusionMatrix.value,
269
269
  value={
270
270
  "confusion_matrix": confusion_matrix,
271
- "hallucinations": hallucinations,
272
- "missing_predictions": missing_predictions,
271
+ "unmatched_predictions": unmatched_predictions,
272
+ "unmatched_ground_truths": unmatched_ground_truths,
273
273
  },
274
274
  parameters={},
275
275
  )
@@ -2,6 +2,7 @@ from collections import defaultdict
2
2
 
3
3
  import numpy as np
4
4
  from numpy.typing import NDArray
5
+
5
6
  from valor_lite.semantic_segmentation.metric import Metric, MetricType
6
7
 
7
8
 
@@ -18,8 +19,8 @@ def unpack_precision_recall_iou_into_metric_lists(
18
19
  f1_score,
19
20
  accuracy,
20
21
  ious,
21
- hallucination_ratios,
22
- missing_prediction_ratios,
22
+ unmatched_prediction_ratios,
23
+ unmatched_ground_truth_ratios,
23
24
  ) = results
24
25
 
25
26
  metrics = defaultdict(list)
@@ -43,16 +44,16 @@ def unpack_precision_recall_iou_into_metric_lists(
43
44
  for gt_label_idx in range(n_labels)
44
45
  if label_metadata[gt_label_idx, 0] > 0
45
46
  },
46
- hallucinations={
47
+ unmatched_predictions={
47
48
  index_to_label[pd_label_idx]: {
48
- "ratio": float(hallucination_ratios[pd_label_idx])
49
+ "ratio": float(unmatched_prediction_ratios[pd_label_idx])
49
50
  }
50
51
  for pd_label_idx in range(n_labels)
51
52
  if label_metadata[pd_label_idx, 0] > 0
52
53
  },
53
- missing_predictions={
54
+ unmatched_ground_truths={
54
55
  index_to_label[gt_label_idx]: {
55
- "ratio": float(missing_prediction_ratios[gt_label_idx])
56
+ "ratio": float(unmatched_ground_truth_ratios[gt_label_idx])
56
57
  }
57
58
  for gt_label_idx in range(n_labels)
58
59
  if label_metadata[gt_label_idx, 0] > 0
@@ -1,6 +1,7 @@
1
1
  import evaluate
2
2
  from nltk.tokenize import RegexpTokenizer
3
3
  from nltk.translate import bleu_score
4
+
4
5
  from valor_lite.text_generation.llm.generation import (
5
6
  generate_answer_correctness_verdicts,
6
7
  generate_answer_relevance_verdicts,
@@ -1,50 +1,33 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: valor-lite
3
- Version: 0.33.18
4
- Summary: Compute valor metrics locally.
5
- License: MIT License
6
-
7
- Copyright (c) 2023 Striveworks
8
-
9
- Permission is hereby granted, free of charge, to any person obtaining a copy
10
- of this software and associated documentation files (the "Software"), to deal
11
- in the Software without restriction, including without limitation the rights
12
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13
- copies of the Software, and to permit persons to whom the Software is
14
- furnished to do so, subject to the following conditions:
15
-
16
- The above copyright notice and this permission notice shall be included in all
17
- copies or substantial portions of the Software.
18
-
19
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25
- SOFTWARE.
26
-
3
+ Version: 0.34.0
4
+ Summary: Evaluate machine learning models.
27
5
  Project-URL: homepage, https://www.striveworks.com
28
6
  Requires-Python: >=3.10
29
7
  Description-Content-Type: text/markdown
30
- License-File: LICENSE
31
- Requires-Dist: evaluate
32
- Requires-Dist: nltk
33
8
  Requires-Dist: numpy
34
- Requires-Dist: Pillow >=9.1.0
35
- Requires-Dist: requests
36
- Requires-Dist: rouge-score
37
- Requires-Dist: shapely
38
9
  Requires-Dist: tqdm
39
- Requires-Dist: importlib-metadata ; python_version < "3.8"
10
+ Requires-Dist: shapely
11
+ Requires-Dist: evaluate
12
+ Requires-Dist: nltk
13
+ Requires-Dist: rouge_score
40
14
  Provides-Extra: mistral
41
- Requires-Dist: mistralai >=1.0 ; extra == 'mistral'
15
+ Requires-Dist: mistralai>=1.0; extra == "mistral"
42
16
  Provides-Extra: openai
43
- Requires-Dist: openai ; extra == 'openai'
17
+ Requires-Dist: openai; extra == "openai"
44
18
  Provides-Extra: test
45
- Requires-Dist: pytest ; extra == 'test'
46
- Requires-Dist: coverage ; extra == 'test'
47
- Requires-Dist: pre-commit ; extra == 'test'
19
+ Requires-Dist: pytest; extra == "test"
20
+ Requires-Dist: coverage; extra == "test"
21
+ Requires-Dist: pre-commit; extra == "test"
22
+ Provides-Extra: docs
23
+ Requires-Dist: mkdocs; extra == "docs"
24
+ Requires-Dist: mkdocs-material; extra == "docs"
25
+ Requires-Dist: mkdocstrings; extra == "docs"
26
+ Requires-Dist: mkdocstrings-python; extra == "docs"
27
+ Requires-Dist: mkdocs-include-dir-to-nav; extra == "docs"
28
+ Requires-Dist: mkdocs-swagger-ui-tag; extra == "docs"
29
+ Provides-Extra: dev
30
+ Requires-Dist: valor-lite[docs,mistral,openai,test]; extra == "dev"
48
31
 
49
32
  # valor-lite: Fast, local machine learning evaluation.
50
33
 
@@ -4,26 +4,26 @@ valor_lite/profiling.py,sha256=TLIROA1qccFw9NoEkMeQcrvvGGO75c4K5yTIWoCUix8,11746
4
4
  valor_lite/schemas.py,sha256=pB0MrPx5qFLbwBWDiOUUm-vmXdWvbJLFCBmKgbcbI5g,198
5
5
  valor_lite/classification/__init__.py,sha256=8MI8bGwCxYGqRP7KxG7ezhYv4qQ5947XGvvlF8WPM5g,392
6
6
  valor_lite/classification/annotation.py,sha256=0aUOvcwBAZgiNOJuyh-pXyNTG7vP7r8CUfnU3OmpUwQ,1113
7
- valor_lite/classification/computation.py,sha256=qfBhhuDYCiY8h2RdBG3shzgJbHLXDVNujkYFg9xZa6U,12116
8
- valor_lite/classification/manager.py,sha256=8GXZECSx4CBbG5NfPrA19BPENqmrjo-wZBmaulWHY20,16676
9
- valor_lite/classification/metric.py,sha256=fkAo-_3s4EIRSkyn3owBSf4_Gp6lBK9xdToDYMWmT8A,12236
10
- valor_lite/classification/utilities.py,sha256=PmQar06Vt-ew4Jvnn0IM63mq730QVTsdRtFdVu1HMFU,6885
7
+ valor_lite/classification/computation.py,sha256=dOcq1xxarwwwtHya1Wl8KZXE-PoGafOAYK0LQqxGphY,12191
8
+ valor_lite/classification/manager.py,sha256=cZ6-DKao59QqF0JF_U26tBoydpCElAAH8rRyX_Kc6bc,16618
9
+ valor_lite/classification/metric.py,sha256=_mW3zynmpW8jUIhK2OeX4usdftHgHM9_l7EAbEe2N3w,12288
10
+ valor_lite/classification/utilities.py,sha256=eG-Qhd213uf2GXuuqsPxCgBRBFV-z_ADbzneF1kE368,6964
11
11
  valor_lite/object_detection/__init__.py,sha256=Ql8rju2q7y0Zd9zFvtBJDRhgQFDm1RSYkTsyH3ZE6pA,648
12
12
  valor_lite/object_detection/annotation.py,sha256=x9bsl8b75yvkMByXXiIYI9d9T03olDqtykSvKJc3aFw,7729
13
- valor_lite/object_detection/computation.py,sha256=P5ijxEBuZ3mxYjBQy24TiQpGxRmPuS40Gwn44uv0J7M,28064
14
- valor_lite/object_detection/manager.py,sha256=utdILUUCx04EWC0_bHGpEPaxcCOhmsOx5lxT9qU1a9s,23033
15
- valor_lite/object_detection/metric.py,sha256=8QhdauuaRrzE39idetkFYTPxA12wrBalQDIR4IUzEbg,24794
16
- valor_lite/object_detection/utilities.py,sha256=98VSW-g8EYI8Cdd9KHLHdm6F4fI89jaX5I4z99zny4s,16271
13
+ valor_lite/object_detection/computation.py,sha256=Q9sFMQhRupseZmGZzA7uEpqpeo9XpmSIkeKGMNEZfP4,28345
14
+ valor_lite/object_detection/manager.py,sha256=wjjMvzCoWs5oavtSj1NT-o4QFT5Sqf79WIb_OO64y1o,23071
15
+ valor_lite/object_detection/metric.py,sha256=npK2sxiwCUTKlRlFym1AlZTvP9herf9lakbsBDwljGM,24901
16
+ valor_lite/object_detection/utilities.py,sha256=42RRyP6L3eWtDY_f7qs7f0WTjhcibmUBu2I4yAwupF0,16456
17
17
  valor_lite/semantic_segmentation/__init__.py,sha256=BhTUbwbdJa1FdS4ZA3QSIZ8TuJmdGGLGCd5hX6SzKa4,297
18
18
  valor_lite/semantic_segmentation/annotation.py,sha256=xd2qJyIeTW8CT_Goyu3Kvl_51b9b6D3WvUfqwShR0Sk,4990
19
19
  valor_lite/semantic_segmentation/benchmark.py,sha256=iVdxUo9LgDbbXUa6eRhZ49LOYw-yyr2W4p9FP3KHg0k,3848
20
- valor_lite/semantic_segmentation/computation.py,sha256=myHjJZ70f2Xc-PGHx3DcLWvXXRu_H8w9z20n7qV-Abo,4687
21
- valor_lite/semantic_segmentation/manager.py,sha256=TtwJI7Bsn3zHL2ECOqCmymG-JqREo7I6qxYtycbz54Y,14322
22
- valor_lite/semantic_segmentation/metric.py,sha256=aJv3wPEl6USLhZ3c4yz6prnBU-EaG4Kz16f0BXcodd4,7046
23
- valor_lite/semantic_segmentation/utilities.py,sha256=vZM66YNMz9VJclhuKvcWp74nF65s6bscnnD5U9iDW7Q,2925
20
+ valor_lite/semantic_segmentation/computation.py,sha256=l98h8s9RTWQOB_eg2rconqGL1ZbTS4GMtz69vbyEdQ0,4741
21
+ valor_lite/semantic_segmentation/manager.py,sha256=p0RuV27S1NTBeYZD6G9dSdOcl3yuRLrjL_SMUjEgRXE,14322
22
+ valor_lite/semantic_segmentation/metric.py,sha256=T9RfPJf4WgqGQTXYvSy08vJG5bjXXJnyYZeW0mlxMa8,7132
23
+ valor_lite/semantic_segmentation/utilities.py,sha256=UD0X-iCWMR8Rmw2YaP4HM3lxwhYwo-yNGzF-taAJ8RA,2959
24
24
  valor_lite/text_generation/__init__.py,sha256=pGhpWCSZjLM0pPHCtPykAfos55B8ie3mi9EzbNxfj-U,356
25
25
  valor_lite/text_generation/annotation.py,sha256=O5aXiwCS4WjA-fqn4ly-O0MsTHoIOmqxqCaAp9IeI3M,1270
26
- valor_lite/text_generation/computation.py,sha256=cG35qMpxNPEYHXN2fz8wcanESriSHoWMl1idpm9-ous,18638
26
+ valor_lite/text_generation/computation.py,sha256=vdZTajB0OuGEwoKWxKkq3lWxlPaUsrp_X5-ZgjkYLhg,18639
27
27
  valor_lite/text_generation/manager.py,sha256=C4QwvronGHXmYSkaRmUGy7TN0C0aeyDx9Hb-ClNYXK4,24810
28
28
  valor_lite/text_generation/metric.py,sha256=C9gbWejjOJ23JVLecuUhYW5rkx30NUCfRtgsM46uMds,10409
29
29
  valor_lite/text_generation/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -33,8 +33,7 @@ valor_lite/text_generation/llm/instructions.py,sha256=fz2onBZZWcl5W8iy7zEWkPGU9N
33
33
  valor_lite/text_generation/llm/integrations.py,sha256=-rTfdAjq1zH-4ixwYuMQEOQ80pIFzMTe0BYfroVx3Pg,6974
34
34
  valor_lite/text_generation/llm/utilities.py,sha256=bjqatGgtVTcl1PrMwiDKTYPGJXKrBrx7PDtzIblGSys,1178
35
35
  valor_lite/text_generation/llm/validators.py,sha256=Wzr5RlfF58_2wOU-uTw7C8skan_fYdhy4Gfn0jSJ8HM,2700
36
- valor_lite-0.33.18.dist-info/LICENSE,sha256=M0L53VuwfEEqezhHb7NPeYcO_glw7-k4DMLZQ3eRN64,1068
37
- valor_lite-0.33.18.dist-info/METADATA,sha256=oo3sEQQvJJvAIelgFRB1Me2Jmkk-nb_dkphL2k4wo7Y,5888
38
- valor_lite-0.33.18.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
39
- valor_lite-0.33.18.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
40
- valor_lite-0.33.18.dist-info/RECORD,,
36
+ valor_lite-0.34.0.dist-info/METADATA,sha256=2l5zKt0RkVHbdYmC1kLF71rG_dKZnZQ-XyuAQGf7dTg,4908
37
+ valor_lite-0.34.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
38
+ valor_lite-0.34.0.dist-info/top_level.txt,sha256=9ujykxSwpl2Hu0_R95UQTR_l07k9UUTSdrpiqmq6zc4,11
39
+ valor_lite-0.34.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.5.0)
2
+ Generator: setuptools (75.6.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,21 +0,0 @@
1
- MIT License
2
-
3
- Copyright (c) 2023 Striveworks
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.