vtool-ibeis 2.3.0__py3-none-any.whl → 2.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vtool_ibeis/__init__.py CHANGED
@@ -5,7 +5,7 @@ Autogenerate Command:
5
5
  mkinit vtool_ibeis -i
6
6
  """
7
7
  # flake8: noqa
8
- __version__ = '2.3.0'
8
+ __version__ = '2.3.1'
9
9
  __author__ = 'Jon Crall, Avi Weinstock, Chuck Stewart, Hendrik Weideman, Jason Parham, Zackary Rutfield'
10
10
  __author_email__ = 'erotemic@gmail.com'
11
11
  __url__ = 'https://github.com/Erotemic/vtool_ibeis'
@@ -282,7 +282,7 @@ def find_duplicate_items(item_arr):
282
282
  >>> np.random.seed(0)
283
283
  >>> item_arr = np.random.randint(100, size=30)
284
284
  >>> duplicate_items = find_duplicate_items(item_arr)
285
- >>> assert duplicate_items == list(six.iterkeys(ut.find_duplicate_items(item_arr)))
285
+ >>> assert duplicate_items == list(ut.find_duplicate_items(item_arr).keys())
286
286
  >>> result = str(duplicate_items)
287
287
  >>> print(result)
288
288
  [9, 67, 87, 88]
vtool_ibeis/confusion.py CHANGED
@@ -382,12 +382,7 @@ class ConfusionMetrics(ub.NiceRepr):
382
382
  # sklearn has much faster implementation
383
383
  # n_fp - count the number of false positives with score >= threshold[i]
384
384
  # n_tp - count the number of true positives with score >= threshold[i]
385
- try:
386
- from sklearn.metrics._ranking import _binary_clf_curve
387
- except ImportError:
388
- from sklearn.metrics.ranking import _binary_clf_curve
389
-
390
- n_fp, n_tp, thresholds = _binary_clf_curve(
385
+ n_fp, n_tp, thresholds = _binary_counts_at_thresholds(
391
386
  labels, scores, pos_label=1)
392
387
 
393
388
  n_samples = len(labels)
@@ -1160,6 +1155,58 @@ def draw_precision_recall_curve(recall_domain, p_interp, title_pref=None,
1160
1155
  #fig.show()
1161
1156
 
1162
1157
 
1158
+ def _binary_clf_curve_local(y_true, y_score, pos_label=1):
1159
+ """
1160
+ Minimal replacement for sklearn.metrics._ranking._binary_clf_curve (unweighted).
1161
+ Returns: fps, tps, thresholds
1162
+ """
1163
+ import numpy as np
1164
+
1165
+ y_true = np.asarray(y_true)
1166
+ y_score = np.asarray(y_score)
1167
+
1168
+ y_true = (y_true == pos_label)
1169
+
1170
+ # sort scores descending
1171
+ desc = np.argsort(y_score, kind="mergesort")[::-1]
1172
+ y_score = y_score[desc]
1173
+ y_true = y_true[desc]
1174
+
1175
+ # indices where score changes
1176
+ distinct = np.where(np.diff(y_score))[0]
1177
+ threshold_idxs = np.r_[distinct, y_true.size - 1]
1178
+
1179
+ tps = np.cumsum(y_true)[threshold_idxs]
1180
+ fps = (threshold_idxs + 1) - tps
1181
+ thresholds = y_score[threshold_idxs]
1182
+ return fps, tps, thresholds
1183
+
1184
+
1185
+ def _binary_counts_at_thresholds(labels, scores, pos_label=1):
1186
+ """
1187
+ Returns fps, tps, thresholds (like old _binary_clf_curve) using public API when available.
1188
+ """
1189
+ try:
1190
+ from sklearn.metrics._ranking import _binary_clf_curve
1191
+ except ImportError:
1192
+ try:
1193
+ from sklearn.metrics.ranking import _binary_clf_curve
1194
+ except ImportError:
1195
+ _binary_clf_curve = None
1196
+
1197
+ if _binary_clf_curve is not None:
1198
+ n_fp, n_tp, thresholds = _binary_clf_curve(
1199
+ labels, scores, pos_label=1)
1200
+ else:
1201
+ # scikit-learn >= 1.8
1202
+ n_fp, n_tp, thresholds = _binary_clf_curve_local(labels, scores, pos_label)
1203
+ # from sklearn.metrics import confusion_matrix_at_thresholds
1204
+ # n_tp, n_fp, fns, tps, thresholds = confusion_matrix_at_thresholds(
1205
+ # labels, scores, pos_label=pos_label
1206
+ # )
1207
+ return n_fp, n_tp, thresholds
1208
+
1209
+
1163
1210
  if __name__ == '__main__':
1164
1211
  """
1165
1212
  CommandLine:
vtool_ibeis/distance.py CHANGED
@@ -732,6 +732,25 @@ def safe_pdist(arr, *args, **kwargs):
732
732
 
733
733
  SeeAlso:
734
734
  scipy.spatial.distance.pdist
735
+
736
+ Example:
737
+ >>> # xdoctest: +REQUIRES(module:vtool)
738
+ >>> import vtool_ibeis as vt
739
+ >>> import numpy as np
740
+ >>> arr = np.array([1, 2, 3, 4, 5])
741
+ >>> result = vt.safe_pdist(arr)
742
+ >>> assert result.shape == (10,)
743
+ >>> # metric needs to return a scalar, even if the inputs are vectors
744
+ >>> # Test the use-cases in ibeis
745
+ >>> result = vt.safe_pdist(arr, metric=lambda x, y: abs(x - y).item())
746
+ >>> assert result.shape == (10,)
747
+ >>> result = vt.safe_pdist(arr, metric=lambda *a: ut.unixtime_hourdiff(*a)[0])
748
+ >>> assert result.shape == (10,)
749
+ >>> result = vt.safe_pdist(arr, metric=lambda *a: vt.ori_distance(*a)[0])
750
+ >>> assert result.shape == (10,)
751
+ >>> latlon_arr = np.array([(40, 20), (20, 30), (-40, 20)])
752
+ >>> km_dists = ut.safe_pdist(latlon_arr, metric=vt.haversine)
753
+ >>> assert km_dists.shape == (3,)
735
754
  """
736
755
  if arr is None or len(arr) < 2:
737
756
  return None
vtool_ibeis/exif.py CHANGED
@@ -244,7 +244,7 @@ def get_lat_lon(exif_dict, default=(-1, -1)):
244
244
  python -m vtool_ibeis.exif --test-get_lat_lon
245
245
 
246
246
  Example:
247
- >>> # DISABLE_DOCTEST
247
+ >>> # xdoctest: +SKIP("bad url")
248
248
  >>> from vtool_ibeis.exif import * # NOQA
249
249
  >>> import numpy as np
250
250
  >>> image_fpath = ut.grab_file_url('http://images.summitpost.org/original/769474.JPG')
@@ -294,7 +294,7 @@ def get_orientation(exif_dict, default=0, on_error='warn'):
294
294
  python -m vtool_ibeis.exif --test-get_orientation
295
295
 
296
296
  Example:
297
- >>> # ENABLE_DOCTEST
297
+ >>> # xdoctest: +SKIP("bad url")
298
298
  >>> from vtool_ibeis.exif import * # NOQA
299
299
  >>> from os.path import join
300
300
  >>> import numpy as np
@@ -334,7 +334,7 @@ def get_orientation_str(exif_dict, **kwargs):
334
334
  python -m vtool_ibeis.exif --test-get_orientation_str
335
335
 
336
336
  Example:
337
- >>> # ENABLE_DOCTEST
337
+ >>> # xdoctest: +SKIP("bad url")
338
338
  >>> from vtool_ibeis.exif import * # NOQA
339
339
  >>> from os.path import join
340
340
  >>> import numpy as np
@@ -360,7 +360,7 @@ def get_unixtime(exif_dict, default=-1):
360
360
  TODO: Exif.Image.TimeZoneOffset
361
361
 
362
362
  Example:
363
- >>> # ENABLE_DOCTEST
363
+ >>> # xdoctest: +SKIP("bad url")
364
364
  >>> from vtool_ibeis.exif import * # NOQA
365
365
  >>> image_fpath = ut.grab_file_url('http://images.summitpost.org/original/769474.JPG')
366
366
  >>> pil_img = Image.open(image_fpath)
vtool_ibeis/features.py CHANGED
@@ -1,6 +1,5 @@
1
1
  import utool as ut
2
2
  import ubelt as ub
3
- import six
4
3
 
5
4
 
6
5
  def extract_feature_from_patch(patch):
@@ -69,7 +68,7 @@ def extract_features(img_or_fpath, feat_type='hesaff+sift', **kwargs):
69
68
  # hacky
70
69
  from ibeis_cnn import _plugin
71
70
  (kpts, sift) = pyhesaff.detect_feats2(img_or_fpath, **kwargs)
72
- if isinstance(img_or_fpath, six.string_types):
71
+ if isinstance(img_or_fpath, str):
73
72
  import vtool_ibeis as vt
74
73
  img_or_fpath = vt.imread(img_or_fpath)
75
74
  vecs_list = _plugin.extract_siam128_vecs([img_or_fpath], [kpts])
vtool_ibeis/histogram.py CHANGED
@@ -20,18 +20,21 @@ def argsubmax(ydata, xdata=None):
20
20
  >>> xdata = [00, 10, 20, 30, 40]
21
21
  >>> result1 = argsubmax(ydata, xdata=None)
22
22
  >>> result2 = argsubmax(ydata, xdata=xdata)
23
- >>> result = ub.repr2([result1, result2], precision=4, nl=1, nobr=True)
24
- >>> print(result)
25
- 2.1667, 2.0208,
26
- 21.6667, 2.0208,
23
+ >>> import ubelt as ub
24
+ >>> print(f'result1 = {ub.urepr(result1, nl=0, precision=3)}')
25
+ >>> print(f'result2 = {ub.urepr(result2, nl=0, precision=3)}')
26
+ result1 = (2.167, 2.021)
27
+ result2 = (21.667, 2.021)
27
28
 
28
29
  Example:
29
30
  >>> from vtool_ibeis.histogram import * # NOQA
30
31
  >>> hist_ = np.array([0, 1, 2, 3, 4])
31
32
  >>> centers = None
32
33
  >>> maxima_thresh=None
33
- >>> argsubmax(hist_)
34
- (4.0, 4.0)
34
+ >>> result = argsubmax(hist_)
35
+ >>> import ubelt as ub
36
+ >>> print(f'result = {ub.urepr(result, nl=0)}')
37
+ result = (4.0, 4.0)
35
38
  """
36
39
  if len(ydata) == 0:
37
40
  raise IndexError('zero length array')
vtool_ibeis/image.py CHANGED
@@ -1,4 +1,3 @@
1
- import six
2
1
  import os
3
2
  from os.path import exists, join # NOQA
4
3
  from os.path import splitext
@@ -82,7 +81,7 @@ def _rectify_border_mode(border_mode, default=cv2.BORDER_CONSTANT):
82
81
  """ Converts argument to cv2 style """
83
82
  if border_mode is None:
84
83
  return default
85
- elif isinstance(border_mode, six.text_type):
84
+ elif isinstance(border_mode, str):
86
85
  return CV2_BORDER_TYPES[border_mode]
87
86
  else:
88
87
  return border_mode
@@ -102,7 +101,7 @@ def _rectify_interpolation(interp, default=cv2.INTER_LANCZOS4):
102
101
  """
103
102
  if interp is None:
104
103
  return default
105
- elif isinstance(interp, six.text_type):
104
+ elif isinstance(interp, str):
106
105
  try:
107
106
  return CV2_INTERPOLATION_TYPES[interp]
108
107
  except KeyError:
@@ -441,7 +440,7 @@ def imread_remote_s3(img_fpath, **kwargs):
441
440
 
442
441
 
443
442
  def imread_remote_url(img_url, **kwargs):
444
- from six.moves import urllib
443
+ import urllib
445
444
  import io
446
445
  print("USE PIL REMOTE")
447
446
  addinfourl = urllib.request.urlopen(img_url)
@@ -470,7 +469,10 @@ def _imread_bytesio(image_stream, use_pil=False, flags=None, **kwargs):
470
469
  if flags is None:
471
470
  grayscale = kwargs.get('grayscale', False)
472
471
  flags = cv2.IMREAD_GRAYSCALE if grayscale else IMREAD_COLOR
473
- nparr = np.fromstring(image_stream.getvalue(), np.uint8)
472
+ try:
473
+ nparr = np.fromstring(image_stream.getvalue(), np.uint8)
474
+ except Exception:
475
+ nparr = np.frombuffer(image_stream.getvalue(), np.uint8)
474
476
  imgBGR = cv2.imdecode(nparr, flags=flags) # cv2.IMREAD_COLOR in OpenCV 3.1
475
477
  return imgBGR
476
478
 
@@ -2199,7 +2201,7 @@ def combine_offset_lists(offsets_list, sfs_list, offset_tups, sf_tups):
2199
2201
  """ Helper for stacking """
2200
2202
  # combine the offsets
2201
2203
  import operator
2202
- from six.moves import reduce
2204
+ from functools import reduce
2203
2205
 
2204
2206
  assert len(offsets_list) == len(offset_tups)
2205
2207
  assert len(sfs_list) == len(sf_tups)
@@ -1,5 +1,7 @@
1
- import utool as ut
1
+ #!/usr/bin/env python3
2
+ import scriptconfig as scfg
2
3
  import ubelt as ub
4
+ import utool as ut
3
5
  try:
4
6
  import guitool_ibeis as gt
5
7
  from guitool_ibeis import mpl_widget
@@ -345,10 +347,45 @@ def show_matching_dict(matches, metadata, *args, **kwargs):
345
347
  return interact
346
348
 
347
349
 
350
+ class InspectMatchesCLI(scfg.DataConfig):
351
+ """
352
+ Run a 1vs1 matching
353
+ """
354
+ img1 = scfg.Value('tsukuba_r', type=str, position=1, help='key or path of test image 1')
355
+ img2 = scfg.Value('tsukuba_l', type=str, position=2, help='key or path of test image 2')
356
+
357
+ @classmethod
358
+ def main(cls, argv=1, **kwargs):
359
+ """
360
+ Example:
361
+ >>> # xdoctest: +SKIP
362
+ >>> from vtool_ibeis.inspect_matches import * # NOQA
363
+ >>> argv = 0
364
+ >>> kwargs = dict()
365
+ >>> cls = InspectMatchesCLI
366
+ >>> config = cls(**kwargs)
367
+ >>> cls.main(argv=argv, **config)
368
+ """
369
+ import vtool_ibeis as vt
370
+ config = cls.cli(argv=argv, data=kwargs, strict=True, verbose='auto')
371
+ gt.ensure_qapp()
372
+ ut.qtensure()
373
+ annot1 = lazy_test_annot(config.img1)
374
+ annot2 = lazy_test_annot(config.img2)
375
+ match = vt.PairwiseMatch(annot1, annot2)
376
+ self = MatchInspector(match=match)
377
+ self.show()
378
+ # xdoctest: +REQUIRES(--show)
379
+ #self.update()
380
+ gt.qtapp_loop(qwin=self, freq=10)
381
+
382
+ __cli__ = InspectMatchesCLI
383
+
348
384
  if __name__ == '__main__':
349
385
  """
386
+
350
387
  CommandLine:
351
- xdoctest -m vtool_ibeis.inspect_matches
388
+ python ~/code/vtool_ibeis/vtool_ibeis/inspect_matches.py
389
+ python -m vtool_ibeis.inspect_matches /home/joncrall/Downloads/tmp/ibeis/IMG_0070_A.JPG /home/joncrall/Downloads/tmp/ibeis/IMG_0315_A.JPG
352
390
  """
353
- import xdoctest
354
- xdoctest.doctest_module(__file__)
391
+ __cli__.main()
vtool_ibeis/keypoint.py CHANGED
@@ -659,7 +659,7 @@ def get_transforms_from_patch_image_kpts(kpts, patch_shape, scale_factor=1.0):
659
659
  %timeit np.array([S2.dot(A) for A in invVR_aff2Ds])
660
660
  %timeit op.matmul(S2, invVR_aff2Ds)
661
661
 
662
- from six.moves import reduce
662
+ from functools import reduce
663
663
  perspective_list2 = np.array([S2.dot(A).dot(S1).dot(T1) for A in invVR_aff2Ds])
664
664
  perspective_list = reduce(op.matmul, (S2, invVR_aff2Ds, S1, T1))
665
665
  assert np.all(perspective_list == perspective_list2)
vtool_ibeis/other.py CHANGED
@@ -1266,9 +1266,14 @@ def intersect2d_numpy(A, B, assume_unique=False, return_indices=False):
1266
1266
  >>> C, Ax, Bx = intersect2d_numpy(A, B, return_indices=True)
1267
1267
  >>> # verify results
1268
1268
  >>> result = str((C.T, Ax, Bx))
1269
- >>> print(result)
1270
- (array([[ 85, 403, 412],
1271
- [ 32, 22, 103]]), array([2, 6, 7]), array([0, 1, 2]))
1269
+ >>> import ubelt as ub
1270
+ >>> print(f'C.T = {ub.urepr(C.T, nl=1, with_dtype=False)}')
1271
+ >>> print(f'Ax = {ub.urepr(Ax, nl=1, with_dtype=False)}')
1272
+ >>> print(f'Bx = {ub.urepr(Bx, nl=1, with_dtype=False)}')
1273
+ C.T = np.array([[ 85, 403, 412],
1274
+ [ 32, 22, 103]])
1275
+ Ax = np.array([2, 6, 7])
1276
+ Bx = np.array([0, 1, 2])
1272
1277
 
1273
1278
  Example2:
1274
1279
  >>> # ENABLE_DOCTEST
@@ -1276,9 +1281,15 @@ def intersect2d_numpy(A, B, assume_unique=False, return_indices=False):
1276
1281
  >>> A = np.array([[1, 2, 3], [1, 1, 1]])
1277
1282
  >>> B = np.array([[1, 2, 3], [1, 2, 14]])
1278
1283
  >>> C, Ax, Bx = intersect2d_numpy(A, B, return_indices=True)
1279
- >>> result = str((C, Ax, Bx))
1280
- >>> print(result)
1281
- (array([[1, 2, 3]]), array([0]), array([0]))
1284
+ >>> import ubelt as ub
1285
+ >>> print(f'C.T = {ub.urepr(C.T, nl=1, with_dtype=False)}')
1286
+ >>> print(f'Ax = {ub.urepr(Ax, nl=1, with_dtype=False)}')
1287
+ >>> print(f'Bx = {ub.urepr(Bx, nl=1, with_dtype=False)}')
1288
+ C.T = np.array([[1],
1289
+ [2],
1290
+ [3]])
1291
+ Ax = np.array([0])
1292
+ Bx = np.array([0])
1282
1293
  """
1283
1294
  nrows, ncols = A.shape
1284
1295
  A_, B_, C_ = intersect2d_structured_numpy(A, B, assume_unique)
@@ -1672,12 +1683,13 @@ def find_first_true_indices(flags_list):
1672
1683
  >>> index_list = find_first_true_indices(flags_list)
1673
1684
  >>> # verify results
1674
1685
  >>> result = str(index_list)
1675
- >>> print(result)
1676
- [0, None, 1, 2]
1686
+ >>> import ubelt as ub
1687
+ >>> print(f'result = {ub.urepr(result, nl=1)}')
1688
+ result = '[0, None, 1, 2]'
1677
1689
  """
1678
1690
  def tryget_fisrt_true(flags):
1679
1691
  index_list = np.where(flags)[0]
1680
- index = None if len(index_list) == 0 else index_list[0]
1692
+ index = None if len(index_list) == 0 else int(index_list[0])
1681
1693
  return index
1682
1694
  index_list = [tryget_fisrt_true(flags) for flags in flags_list]
1683
1695
  return index_list
@@ -1691,9 +1703,6 @@ def find_k_true_indicies(flags_list, k):
1691
1703
  Args:
1692
1704
  flags_list (list): list of lists of booleans
1693
1705
 
1694
- CommandLine:
1695
- python -m utool.util_list --test-find_next_true_indices
1696
-
1697
1706
  Example:
1698
1707
  >>> # ENABLE_DOCTEST
1699
1708
  >>> from vtool_ibeis.other import * # NOQA
@@ -1703,9 +1712,14 @@ def find_k_true_indicies(flags_list, k):
1703
1712
  ... [True, True, True]]
1704
1713
  >>> k = 2
1705
1714
  >>> indices = find_k_true_indicies(flags_list, k)
1706
- >>> result = str(indices)
1707
- >>> print(result)
1708
- [array([2]), None, array([1, 2]), array([0, 1])]
1715
+ >>> import ubelt as np
1716
+ >>> print(f'result = {ub.urepr(indices, nl=1, with_dtype=False)}')
1717
+ result = [
1718
+ np.array([2]),
1719
+ None,
1720
+ np.array([1, 2]),
1721
+ np.array([0, 1]),
1722
+ ]
1709
1723
  """
1710
1724
 
1711
1725
  if False:
@@ -1733,7 +1747,7 @@ def find_next_true_indices(flags_list, offset_list):
1733
1747
  flags_list (list): list of lists of booleans
1734
1748
 
1735
1749
  CommandLine:
1736
- python -m utool.util_list --test-find_next_true_indices
1750
+ xdoctest vtool_ibeis.other find_next_true_indices
1737
1751
 
1738
1752
  Example:
1739
1753
  >>> # ENABLE_DOCTEST
@@ -1747,15 +1761,15 @@ def find_next_true_indices(flags_list, offset_list):
1747
1761
  >>> # execute function
1748
1762
  >>> index_list = find_next_true_indices(flags_list, offset_list)
1749
1763
  >>> # verify results
1750
- >>> result = str(index_list)
1751
- >>> print(result)
1752
- [2, None, 2, None]
1764
+ >>> import ubelt as ub
1765
+ >>> print(f'index_list = {ub.urepr(index_list, nl=0)}')
1766
+ index_list = [2, None, 2, None]
1753
1767
  """
1754
1768
  def tryget_next_true(flags, offset_):
1755
1769
  offset = offset_ + 1
1756
1770
  relative_flags = flags[offset:]
1757
1771
  rel_index_list = np.where(relative_flags)[0]
1758
- index = None if len(rel_index_list) == 0 else rel_index_list[0] + offset
1772
+ index = None if len(rel_index_list) == 0 else int(rel_index_list[0] + offset)
1759
1773
  return index
1760
1774
  index_list = [None if offset is None else tryget_next_true(flags, offset)
1761
1775
  for flags, offset in zip(flags_list, offset_list)]
@@ -25,17 +25,22 @@ def testdata_score_normalier(tp_bumps=[(6.5, 256)], tn_bumps=[(3.5, 256)], tp_sc
25
25
  encoder.fit(data, labels)
26
26
  return encoder, data, labels
27
27
 
28
+ try:
29
+ _trapz = np.trapz
30
+ except Exception:
31
+ _trapz = np.trapezoid
32
+
28
33
 
29
34
  def get_left_area(ydata, xdata, index_list):
30
35
  """ area to the left of each index point """
31
- left_area = np.array([np.trapz(ydata[:ix + 1], xdata[:ix + 1])
36
+ left_area = np.array([_trapz(ydata[:ix + 1], xdata[:ix + 1])
32
37
  for ix in index_list])
33
38
  return left_area
34
39
 
35
40
 
36
41
  def get_right_area(ydata, xdata, index_list):
37
42
  """ area to the right of each index point """
38
- right_area = np.array([np.trapz(ydata[ix:], xdata[ix:])
43
+ right_area = np.array([_trapz(ydata[ix:], xdata[ix:])
39
44
  for ix in index_list])
40
45
  return right_area
41
46
 
@@ -207,7 +212,7 @@ class ScoreNormalizer(ut.Cachable, ScoreNormVisualizeClass):
207
212
  >>> # ENABLE_DOCTEST
208
213
  >>> from vtool_ibeis.score_normalization import * # NOQA
209
214
  >>> encoder = ScoreNormalizer()
210
- >>> from six.moves import cPickle as pickle
215
+ >>> import pickle
211
216
  >>> dump = pickle.dumps(encoder)
212
217
  >>> encoder2 = pickle.loads(dump)
213
218
  """
@@ -1017,8 +1022,8 @@ def learn_score_normalization(tp_support, tn_support, gridsize=1024, adjust=8,
1017
1022
 
1018
1023
  if True:
1019
1024
  # Make sure we still have probability functions
1020
- area_tp = np.trapz(p_score_given_tp, score_domain)
1021
- area_tn = np.trapz(p_score_given_tn, score_domain)
1025
+ area_tp = _trapz(p_score_given_tp, score_domain)
1026
+ area_tn = _trapz(p_score_given_tn, score_domain)
1022
1027
  if verbose:
1023
1028
  print('pre.area_tp = %r' % (area_tp,))
1024
1029
  print('pre.area_tn = %r' % (area_tn,))
@@ -1027,8 +1032,8 @@ def learn_score_normalization(tp_support, tn_support, gridsize=1024, adjust=8,
1027
1032
  p_score_given_tp = p_score_given_tp / area_tp
1028
1033
  p_score_given_tn = p_score_given_tn / area_tn
1029
1034
 
1030
- area_tp = np.trapz(p_score_given_tp, score_domain)
1031
- area_tn = np.trapz(p_score_given_tn, score_domain)
1035
+ area_tp = _trapz(p_score_given_tp, score_domain)
1036
+ area_tn = _trapz(p_score_given_tn, score_domain)
1032
1037
  #if ut.DEBUG2:
1033
1038
  if verbose:
1034
1039
  print('norm.area_tp = %r' % (area_tp,))
@@ -1055,12 +1060,12 @@ def learn_score_normalization(tp_support, tn_support, gridsize=1024, adjust=8,
1055
1060
  # Apply bayes
1056
1061
  p_tp_given_score = ut.bayes_rule(p_score_given_tp, p_tp, p_score)
1057
1062
  if ut.DEBUG2:
1058
- assert np.isclose(np.trapz(p_score, score_domain), 1.0)
1059
- assert np.isclose(np.trapz(p_score, p_tp_given_score), 1.0)
1063
+ assert np.isclose(_trapz(p_score, score_domain), 1.0)
1064
+ assert np.isclose(_trapz(p_score, p_tp_given_score), 1.0)
1060
1065
  if np.any(np.isnan(p_tp_given_score)):
1061
1066
  p_tp_given_score = vt.interpolate_nans(p_tp_given_score)
1062
1067
  if verbose:
1063
- # np.trapz(p_tp_given_score / np.trapz(p_tp_given_score, score_domain), score_domain)
1068
+ # _trapz(p_tp_given_score / _trapz(p_tp_given_score, score_domain), score_domain)
1064
1069
  print('stats:p_score_given_tn = ' + ut.get_stats_str(p_score_given_tn, newlines=0, use_nan=True, precision=5))
1065
1070
  print('stats:p_score_given_tp = ' + ut.get_stats_str(p_score_given_tp, newlines=0, use_nan=True, precision=5))
1066
1071
  print('stats:p_score = ' + ut.get_stats_str(p_score, newlines=0, use_nan=True, precision=5))
vtool_ibeis/symbolic.py CHANGED
@@ -2,7 +2,6 @@
2
2
  Sympy helpers
3
3
  """
4
4
  import numpy as np
5
- import six
6
5
  import utool as ut
7
6
  import ubelt as ub
8
7
 
@@ -38,7 +37,7 @@ def evalprint(str_, globals_=None, locals_=None, simplify=False):
38
37
  globals_ = ut.get_parent_frame().f_globals
39
38
  if locals_ is None:
40
39
  locals_ = ut.get_parent_frame().f_locals
41
- if isinstance(str_, six.string_types):
40
+ if isinstance(str_, str):
42
41
  var = eval(str_, globals_, locals_)
43
42
  else:
44
43
  var = str_
@@ -72,9 +71,9 @@ def check_expr_eq(expr1, expr2, verbose=True):
72
71
  >>> print(result)
73
72
  """
74
73
  import sympy
75
- if isinstance(expr1, six.string_types):
74
+ if isinstance(expr1, str):
76
75
  expr1 = sympy.simplify(expr1)
77
- if isinstance(expr2, six.string_types):
76
+ if isinstance(expr2, str):
78
77
  expr2 = sympy.simplify(expr2)
79
78
  print(ub.hzcat('Checking if ', repr(expr1), ' == ', repr(expr2)))
80
79
  random_point_check = expr1.equals(expr2)