python-wml 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of python-wml might be problematic. Click here for more details.

Files changed (164) hide show
  1. python_wml-3.0.0.dist-info/LICENSE +23 -0
  2. python_wml-3.0.0.dist-info/METADATA +51 -0
  3. python_wml-3.0.0.dist-info/RECORD +164 -0
  4. python_wml-3.0.0.dist-info/WHEEL +5 -0
  5. python_wml-3.0.0.dist-info/top_level.txt +1 -0
  6. wml/__init__.py +0 -0
  7. wml/basic_data_def/__init__.py +2 -0
  8. wml/basic_data_def/detection_data_def.py +279 -0
  9. wml/basic_data_def/io_data_def.py +2 -0
  10. wml/basic_img_utils.py +816 -0
  11. wml/img_patch.py +92 -0
  12. wml/img_utils.py +571 -0
  13. wml/iotoolkit/__init__.py +17 -0
  14. wml/iotoolkit/aic_keypoint.py +115 -0
  15. wml/iotoolkit/baidu_mask_toolkit.py +244 -0
  16. wml/iotoolkit/base_dataset.py +210 -0
  17. wml/iotoolkit/bboxes_statistics.py +515 -0
  18. wml/iotoolkit/build.py +0 -0
  19. wml/iotoolkit/cityscapes_toolkit.py +183 -0
  20. wml/iotoolkit/classification_data_statistics.py +25 -0
  21. wml/iotoolkit/coco_data_fwd.py +225 -0
  22. wml/iotoolkit/coco_keypoints.py +118 -0
  23. wml/iotoolkit/coco_keypoints_fmt2.py +103 -0
  24. wml/iotoolkit/coco_toolkit.py +397 -0
  25. wml/iotoolkit/coco_wholebody.py +269 -0
  26. wml/iotoolkit/common.py +108 -0
  27. wml/iotoolkit/crowd_pose.py +146 -0
  28. wml/iotoolkit/fast_labelme.py +110 -0
  29. wml/iotoolkit/image_folder.py +95 -0
  30. wml/iotoolkit/imgs_cache.py +58 -0
  31. wml/iotoolkit/imgs_reader_mt.py +73 -0
  32. wml/iotoolkit/labelme_base.py +102 -0
  33. wml/iotoolkit/labelme_json_to_img.py +49 -0
  34. wml/iotoolkit/labelme_toolkit.py +117 -0
  35. wml/iotoolkit/labelme_toolkit_fwd.py +733 -0
  36. wml/iotoolkit/labelmemckeypoints_dataset.py +169 -0
  37. wml/iotoolkit/lspet.py +48 -0
  38. wml/iotoolkit/mapillary_vistas_toolkit.py +269 -0
  39. wml/iotoolkit/mat_data.py +90 -0
  40. wml/iotoolkit/mckeypoints_statistics.py +28 -0
  41. wml/iotoolkit/mot_datasets.py +62 -0
  42. wml/iotoolkit/mpii.py +108 -0
  43. wml/iotoolkit/npmckeypoints_dataset.py +164 -0
  44. wml/iotoolkit/o365_to_coco.py +136 -0
  45. wml/iotoolkit/object365_toolkit.py +156 -0
  46. wml/iotoolkit/object365v2_toolkit.py +71 -0
  47. wml/iotoolkit/pascal_voc_data.py +51 -0
  48. wml/iotoolkit/pascal_voc_toolkit.py +194 -0
  49. wml/iotoolkit/pascal_voc_toolkit_fwd.py +473 -0
  50. wml/iotoolkit/penn_action.py +57 -0
  51. wml/iotoolkit/rawframe_dataset.py +129 -0
  52. wml/iotoolkit/rewrite_pascal_voc.py +28 -0
  53. wml/iotoolkit/semantic_data.py +49 -0
  54. wml/iotoolkit/split_file_by_type.py +29 -0
  55. wml/iotoolkit/sports_mot_datasets.py +78 -0
  56. wml/iotoolkit/vis_objectdetection_dataset.py +70 -0
  57. wml/iotoolkit/vis_torch_data.py +39 -0
  58. wml/iotoolkit/yolo_toolkit.py +38 -0
  59. wml/object_detection2/__init__.py +4 -0
  60. wml/object_detection2/basic_visualization.py +37 -0
  61. wml/object_detection2/bboxes.py +812 -0
  62. wml/object_detection2/data_process_toolkit.py +146 -0
  63. wml/object_detection2/keypoints.py +292 -0
  64. wml/object_detection2/mask.py +120 -0
  65. wml/object_detection2/metrics/__init__.py +3 -0
  66. wml/object_detection2/metrics/build.py +15 -0
  67. wml/object_detection2/metrics/classifier_toolkit.py +440 -0
  68. wml/object_detection2/metrics/common.py +71 -0
  69. wml/object_detection2/metrics/mckps_toolkit.py +338 -0
  70. wml/object_detection2/metrics/toolkit.py +1953 -0
  71. wml/object_detection2/npod_toolkit.py +361 -0
  72. wml/object_detection2/odtools.py +243 -0
  73. wml/object_detection2/standard_names.py +75 -0
  74. wml/object_detection2/visualization.py +956 -0
  75. wml/object_detection2/wmath.py +34 -0
  76. wml/semantic/__init__.py +0 -0
  77. wml/semantic/basic_toolkit.py +65 -0
  78. wml/semantic/mask_utils.py +156 -0
  79. wml/semantic/semantic_test.py +21 -0
  80. wml/semantic/structures.py +1 -0
  81. wml/semantic/toolkit.py +105 -0
  82. wml/semantic/visualization_utils.py +658 -0
  83. wml/threadtoolkit.py +50 -0
  84. wml/walgorithm.py +228 -0
  85. wml/wcollections.py +212 -0
  86. wml/wfilesystem.py +487 -0
  87. wml/wml_utils.py +657 -0
  88. wml/wstructures/__init__.py +4 -0
  89. wml/wstructures/common.py +9 -0
  90. wml/wstructures/keypoints_train_toolkit.py +149 -0
  91. wml/wstructures/kps_structures.py +579 -0
  92. wml/wstructures/mask_structures.py +1161 -0
  93. wml/wtorch/__init__.py +8 -0
  94. wml/wtorch/bboxes.py +104 -0
  95. wml/wtorch/classes_suppression.py +24 -0
  96. wml/wtorch/conv_module.py +181 -0
  97. wml/wtorch/conv_ws.py +144 -0
  98. wml/wtorch/data/__init__.py +16 -0
  99. wml/wtorch/data/_utils/__init__.py +45 -0
  100. wml/wtorch/data/_utils/collate.py +183 -0
  101. wml/wtorch/data/_utils/fetch.py +47 -0
  102. wml/wtorch/data/_utils/pin_memory.py +121 -0
  103. wml/wtorch/data/_utils/signal_handling.py +72 -0
  104. wml/wtorch/data/_utils/worker.py +227 -0
  105. wml/wtorch/data/base_data_loader_iter.py +93 -0
  106. wml/wtorch/data/dataloader.py +501 -0
  107. wml/wtorch/data/datapipes/__init__.py +1 -0
  108. wml/wtorch/data/datapipes/iter/__init__.py +12 -0
  109. wml/wtorch/data/datapipes/iter/batch.py +126 -0
  110. wml/wtorch/data/datapipes/iter/callable.py +92 -0
  111. wml/wtorch/data/datapipes/iter/listdirfiles.py +37 -0
  112. wml/wtorch/data/datapipes/iter/loadfilesfromdisk.py +30 -0
  113. wml/wtorch/data/datapipes/iter/readfilesfromtar.py +60 -0
  114. wml/wtorch/data/datapipes/iter/readfilesfromzip.py +63 -0
  115. wml/wtorch/data/datapipes/iter/sampler.py +94 -0
  116. wml/wtorch/data/datapipes/utils/__init__.py +0 -0
  117. wml/wtorch/data/datapipes/utils/common.py +65 -0
  118. wml/wtorch/data/dataset.py +354 -0
  119. wml/wtorch/data/datasets/__init__.py +4 -0
  120. wml/wtorch/data/datasets/common.py +53 -0
  121. wml/wtorch/data/datasets/listdirfilesdataset.py +36 -0
  122. wml/wtorch/data/datasets/loadfilesfromdiskdataset.py +30 -0
  123. wml/wtorch/data/distributed.py +135 -0
  124. wml/wtorch/data/multi_processing_data_loader_iter.py +866 -0
  125. wml/wtorch/data/sampler.py +267 -0
  126. wml/wtorch/data/single_process_data_loader_iter.py +24 -0
  127. wml/wtorch/data/test_data_loader.py +26 -0
  128. wml/wtorch/dataset_toolkit.py +67 -0
  129. wml/wtorch/depthwise_separable_conv_module.py +98 -0
  130. wml/wtorch/dist.py +591 -0
  131. wml/wtorch/dropblock/__init__.py +6 -0
  132. wml/wtorch/dropblock/dropblock.py +228 -0
  133. wml/wtorch/dropblock/dropout.py +40 -0
  134. wml/wtorch/dropblock/scheduler.py +48 -0
  135. wml/wtorch/ema.py +61 -0
  136. wml/wtorch/fc_module.py +73 -0
  137. wml/wtorch/functional.py +34 -0
  138. wml/wtorch/iter_dataset.py +26 -0
  139. wml/wtorch/loss.py +69 -0
  140. wml/wtorch/nets/__init__.py +0 -0
  141. wml/wtorch/nets/ckpt_toolkit.py +219 -0
  142. wml/wtorch/nets/fpn.py +276 -0
  143. wml/wtorch/nets/hrnet/__init__.py +0 -0
  144. wml/wtorch/nets/hrnet/config.py +2 -0
  145. wml/wtorch/nets/hrnet/hrnet.py +494 -0
  146. wml/wtorch/nets/misc.py +249 -0
  147. wml/wtorch/nets/resnet/__init__.py +0 -0
  148. wml/wtorch/nets/resnet/layers/__init__.py +17 -0
  149. wml/wtorch/nets/resnet/layers/aspp.py +144 -0
  150. wml/wtorch/nets/resnet/layers/batch_norm.py +231 -0
  151. wml/wtorch/nets/resnet/layers/blocks.py +111 -0
  152. wml/wtorch/nets/resnet/layers/wrappers.py +110 -0
  153. wml/wtorch/nets/resnet/r50_config.py +38 -0
  154. wml/wtorch/nets/resnet/resnet.py +691 -0
  155. wml/wtorch/nets/shape_spec.py +20 -0
  156. wml/wtorch/nets/simple_fpn.py +101 -0
  157. wml/wtorch/nms.py +109 -0
  158. wml/wtorch/nn.py +896 -0
  159. wml/wtorch/ocr_block.py +193 -0
  160. wml/wtorch/summary.py +331 -0
  161. wml/wtorch/train_toolkit.py +603 -0
  162. wml/wtorch/transformer_blocks.py +266 -0
  163. wml/wtorch/utils.py +719 -0
  164. wml/wtorch/wlr_scheduler.py +100 -0
@@ -0,0 +1,169 @@
1
+ #coding=utf-8
2
+ import numpy as np
3
+ import matplotlib.image as mpimg
4
+ import xml.etree.ElementTree as ET
5
+ from xml.dom.minidom import Document
6
+ import random
7
+ import os
8
+ import math
9
+ import wml.wml_utils
10
+ import logging
11
+ import shutil
12
+ from functools import partial
13
+ import wml.wml_utils as wmlu
14
+ import wml.img_utils as wmli
15
+ import copy
16
+ from easydict import EasyDict
17
+ from wml.object_detection2.standard_names import *
18
+ import pickle
19
+ from .common import *
20
+ import wml.object_detection2.bboxes as odb
21
+ from .pascal_voc_toolkit_fwd import *
22
+ from wml.wstructures import WMCKeypoints
23
+ from .labelme_toolkit_fwd import get_files,read_labelme_mckp_data
24
+
25
+
26
+ class LabelmeMCKeypointsDataset(object):
27
+ def __init__(self, label_text2id=None, shuffle=False,
28
+ filter_empty_files=False,
29
+ resample_parameters=None,
30
+ keep_no_json_img=False,
31
+ ignore_case=True):
32
+ '''
33
+
34
+ :param label_text2id: trans a single label text to id: int func(str)
35
+ :param shuffle:
36
+ :param image_sub_dir:
37
+ :param xml_sub_dir:
38
+ '''
39
+ self.files = None
40
+ self.shuffle = shuffle
41
+ self.filter_empty_files = filter_empty_files
42
+ self.keep_no_json_img = keep_no_json_img
43
+ if isinstance(label_text2id,dict):
44
+ if ignore_case:
45
+ new_dict = dict([(k.lower(),v) for k,v in label_text2id.items()])
46
+ self.label_text2id = partial(ignore_case_dict_label_text2id,dict_data=new_dict)
47
+ else:
48
+ self.label_text2id = partial(dict_label_text2id,dict_data=label_text2id)
49
+ else:
50
+ self.label_text2id = label_text2id
51
+
52
+ if resample_parameters is not None:
53
+ self.resample_parameters = {}
54
+ for k,v in resample_parameters.items():
55
+ if isinstance(k,(str,bytes)):
56
+ k = self.label_text2id(k)
57
+ self.resample_parameters[k] = v
58
+ print("resample parameters")
59
+ wmlu.show_dict(self.resample_parameters)
60
+ else:
61
+ self.resample_parameters = None
62
+
63
+
64
+ def __len__(self):
65
+ return len(self.files)
66
+
67
+ def __getitem__(self,idx):
68
+ try:
69
+ fs = self.files[idx]
70
+ except Exception as e:
71
+ print(f"ERROR: {e} {self.files[idx]}")
72
+ print(self.files)
73
+ #print(xml_file)
74
+ try:
75
+ data = self.read_one_file(fs)
76
+ labels_names = data[GT_LABELS]
77
+ img_info = data[IMG_INFO]
78
+ if self.label_text2id is not None:
79
+ labels = [self.label_text2id(x) for x in labels_names]
80
+ keypoints = data[GT_KEYPOINTS]
81
+ keep = [x is not None for x in labels]
82
+ labels = list(filter(lambda x:x is not None,labels))
83
+ if len(labels)==0 or not isinstance(labels[0],(str,bytes)):
84
+ labels = np.array(labels,dtype=np.int32)
85
+ keypoints = [keypoints[i] for i in np.where(keep)[0]]
86
+ data[GT_LABELS] = labels
87
+ data[GT_KEYPOINTS] = WMCKeypoints(keypoints,width=img_info[WIDTH],height=img_info[HEIGHT])
88
+ else:
89
+ keypoints = data[GT_KEYPOINTS]
90
+ data[GT_KEYPOINTS] = WMCKeypoints(keypoints,width=img_info[WIDTH],height=img_info[HEIGHT])
91
+
92
+ except Exception as e:
93
+ print(f"Read {fs} {e} faild.")
94
+ return None
95
+ return EasyDict(data)
96
+
97
+ def read_data(self,dir_path,img_suffix=wmli.BASE_IMG_SUFFIX):
98
+ if isinstance(dir_path,str):
99
+ print(f"Read {dir_path}")
100
+ if not os.path.exists(dir_path):
101
+ print(f"Data path {dir_path} not exists.")
102
+ return False
103
+ self.files = get_files(dir_path,img_suffix=img_suffix,keep_no_json_img=self.keep_no_json_img)
104
+ elif isinstance(dir_path,(list,tuple)) and isinstance(dir_path[0],(str,bytes)) and os.path.isdir(dir_path[0]):
105
+ self.files = []
106
+ for dp in dir_path:
107
+ self.files.extend(get_files(dp,
108
+ img_suffix=img_suffix,
109
+ keep_no_json_img=self.keep_no_json_img))
110
+ else:
111
+ self.files = dir_path
112
+ if self.filter_empty_files and self.label_text2id:
113
+ self.files = self.apply_filter_empty_files(self.files)
114
+ if self.resample_parameters is not None and self.label_text2id:
115
+ self.files = self.resample(self.files)
116
+
117
+ if len(self.files) == 0:
118
+ return False
119
+
120
+ if self.shuffle:
121
+ random.shuffle(self.files)
122
+
123
+ return True
124
+
125
+ def read_one_file(self,data):
126
+ img_file,json_file = data
127
+ image_info,labels,points = read_labelme_mckp_data(json_file,keep_no_json_img=self.keep_no_json_img)
128
+ image_info[FILEPATH] = img_file
129
+ datas = {}
130
+ datas[IMG_INFO] = image_info
131
+ datas[GT_LABELS] = labels
132
+ datas[GT_KEYPOINTS] = points
133
+ with open(img_file,"rb") as f:
134
+ img_data = f.read()
135
+ datas[IMAGE] = img_data
136
+ return datas
137
+
138
+ def apply_filter_empty_files(self,files):
139
+ new_files = []
140
+ for fs in files:
141
+ data = self.read_one_file(fs)
142
+ labels_name = data[GT_LABELS]
143
+ labels = [self.label_text2id(x) for x in labels_name]
144
+ is_none = [x is None for x in labels]
145
+ if not all(is_none):
146
+ new_files.append(fs)
147
+ else:
148
+ print(f"File {fs} is empty, labels names {labels_name}, labels {labels}")
149
+
150
+ return new_files
151
+
152
+ def resample(self,files):
153
+ all_labels = []
154
+ for fs in files:
155
+ data = self.read_one_file(fs)
156
+ labels_name = data[GT_LABELS]
157
+ labels = [self.label_text2id(x) for x in labels_name]
158
+ all_labels.append(labels)
159
+
160
+ return resample(files,all_labels,self.resample_parameters)
161
+
162
+
163
+ def get_items(self):
164
+ '''
165
+ :return:
166
+ full_path,img_size,category_ids,category_names,boxes,binary_masks,area,is_crowd,num_annotations_skipped
167
+ '''
168
+ for i in range(len(self.files)):
169
+ yield self.__getitem__(i)
wml/iotoolkit/lspet.py ADDED
@@ -0,0 +1,48 @@
1
+ from wml.iotoolkit.mat_data import MatData
2
+ import wml.object_detection2.keypoints as odk
3
+ import numpy as np
4
+ import wml.object_detection2.bboxes as odb
5
+ '''
6
+ Right ankle
7
+ Right knee
8
+ Right hip
9
+ Left hip
10
+ Left knee
11
+ Left ankle
12
+ Right wrist 6
13
+ Right elbow 7
14
+ Right shoulder 8
15
+ Left shoulder 9
16
+ Left elbow 10
17
+ Left wrist 11
18
+ Neck
19
+ Head top
20
+ '''
21
+ def read_lspet_data(path):
22
+ data = MatData(path).data['joints']
23
+ kps = np.transpose(data, [2, 0, 1])
24
+ bboxes = odk.npbatchget_bboxes(kps)
25
+ return kps,bboxes
26
+
27
+ class Trans2COCO:
28
+ def __init__(self) -> None:
29
+ self.dst_idxs = [5,6,7,8,9,10,11,12,13,14,15,16]
30
+ self.src_idxs = [9,8,10,7,11,6,3,2,4,1,5,0]
31
+ self.coco_idxs = [0,1,2,3,4]
32
+
33
+ def __call__(self,mpii_kps,coco_kps):
34
+ if len(mpii_kps.shape)==2:
35
+ return self.trans_one(mpii_kps,coco_kps)
36
+ res = []
37
+ for mp,coco in zip(mpii_kps,coco_kps):
38
+ res.append(self.trans_one(mp,coco))
39
+ return np.array(res)
40
+
41
+ def trans_one(self,mpii_kps,coco_kps):
42
+ '''
43
+ img: [RGB]
44
+ '''
45
+ res = np.zeros([17,3],dtype=np.float32)
46
+ res[self.dst_idxs] = mpii_kps[self.src_idxs]
47
+ res[self.coco_idxs] = coco_kps[self.coco_idxs]
48
+ return res
@@ -0,0 +1,269 @@
1
+ import json
2
+ import wml.wml_utils as wmlu
3
+ import numpy as np
4
+ import os
5
+ import cv2 as cv
6
+ import sys
7
+ import random
8
+ from wml.iotoolkit.labelme_toolkit import get_labels_and_bboxes
9
+ import re
10
+ from .common import *
11
+
12
+
13
+ def get_files(dir_path):
14
+ img_dir = os.path.join(dir_path, 'images')
15
+ label_dir = os.path.join(dir_path, "v2.0","polygons")
16
+ res = []
17
+ json_files = wmlu.recurse_get_filepath_in_dir(label_dir,suffix=".json")
18
+ for jf in json_files:
19
+ base_name = wmlu.base_name(jf)
20
+ igf = os.path.join(img_dir, base_name + ".jpg")
21
+ if os.path.exists(igf):
22
+ res.append((igf, jf))
23
+ else:
24
+ print(f"ERROR: Find {igf} faild, json file is {jf}")
25
+
26
+ return res
27
+
28
+ class LabelsFn:
29
+ def __init__(self,data):
30
+ self.data = []
31
+ for x in data:
32
+ if isinstance(x,(tuple,list)):
33
+ self.data.append((re.compile(x[0]),x[1]))
34
+ else:
35
+ self.data.append(x)
36
+
37
+ def __call__(self,x):
38
+ for d in self.data:
39
+ if isinstance(d,(list,tuple)):
40
+ if d[0].fullmatch(x):
41
+ return d[1]
42
+ elif isinstance(d,(str,bytes)) and d==x:
43
+ return x
44
+ return False
45
+
46
+ class MapillaryVistasData(object):
47
+ def __init__(self, label_text2id=None, shuffle=False, ignored_labels=[],label_map={},
48
+ allowed_labels_fn=None,
49
+ use_semantic=True,
50
+ absolute_coord=True):
51
+ self.files = None
52
+ self.label_text2id = label_text2id
53
+ self.shuffle = shuffle
54
+ self.ignored_labels = ignored_labels
55
+ self.label_map = label_map
56
+ self.use_semantic = use_semantic
57
+ self.absolute_coord = absolute_coord
58
+ if allowed_labels_fn is not None and isinstance(allowed_labels_fn,list):
59
+ self.allowed_labels_fn = LabelsFn(allowed_labels_fn)
60
+ else:
61
+ self.allowed_labels_fn = allowed_labels_fn
62
+
63
+ def read_data(self, dir_path,*args,**kwargs):
64
+ self.files = get_files(dir_path)
65
+ if self.shuffle:
66
+ random.shuffle(self.files)
67
+
68
+ def __len__(self):
69
+ return len(self.files)
70
+
71
+ def get_items(self,beg=0,end=None,filter=None):
72
+ '''
73
+ :return:
74
+ bboxes: [ymin,xmin,ymax,xmax]
75
+ binary_masks [N,H,W], value is 0 or 1,
76
+ full_path,img_size,category_ids,category_names,boxes,binary_masks,area,is_crowd,num_annotations_skipped
77
+ '''
78
+ if end is None:
79
+ end = len(self.files)
80
+ if beg is None:
81
+ beg = 0
82
+ for i, (img_file, json_file) in enumerate(self.files[beg:end]):
83
+ if filter is not None and not filter(img_file,json_file):
84
+ continue
85
+ print(img_file,json_file)
86
+ sys.stdout.write('\r>> read data %d/%d' % (i + 1, len(self.files)))
87
+ sys.stdout.flush()
88
+ image, annotations_list = self.read_json(json_file,use_semantic=self.use_semantic)
89
+ labels_names, bboxes = get_labels_and_bboxes(image, annotations_list,is_relative_coordinate=not self.absolute_coord)
90
+ try:
91
+ if self.use_semantic:
92
+ masks = [ann["segmentation"] for ann in annotations_list]
93
+ if len(masks) > 0:
94
+ try:
95
+ masks = np.stack(masks, axis=0)
96
+ except:
97
+ print("ERROR: stack masks faild.")
98
+ else:
99
+ masks = None
100
+ except:
101
+ masks = None
102
+
103
+ if self.label_text2id is not None:
104
+ labels = [self.label_text2id(x) for x in labels_names]
105
+ else:
106
+ labels = None
107
+
108
+ yield DetData(img_file, [image['height'], image['width']], labels, labels_names, bboxes, masks, None, None, None)
109
+
110
+ def get_boxes_items(self):
111
+ '''
112
+ :return:
113
+ full_path,img_size,category_ids,boxes,is_crowd
114
+ '''
115
+ for i, (img_file, json_file) in enumerate(self.files):
116
+ sys.stdout.write('\r>> read data %d/%d' % (i + 1, len(self.files)))
117
+ sys.stdout.flush()
118
+ image, annotations_list = self.read_json(json_file,use_semantic=False)
119
+ labels_names, bboxes = get_labels_and_bboxes(image, annotations_list,is_relative_coordinate=not self.absolute_coord)
120
+ if self.label_text2id is not None:
121
+ labels = [self.label_text2id(x) for x in labels_names]
122
+ else:
123
+ labels = None
124
+ #file, img_size,category_ids, labels_text, bboxes, binary_mask, area, is_crowd, _
125
+ yield DetBboxesData(img_file, [image['height'], image['width']], labels, bboxes, None)
126
+
127
+ def read_json(self,file_path,use_semantic=True):
128
+ annotations_list = []
129
+ image = {}
130
+ with open(file_path, "r", encoding="gb18030") as f:
131
+ print(file_path)
132
+ data_str = f.read()
133
+ try:
134
+ json_data = json.loads(data_str)
135
+ img_width = int(json_data["width"])
136
+ img_height = int(json_data["height"])
137
+ image["height"] = int(img_height)
138
+ image["width"] = int(img_width)
139
+ image["file_name"] = wmlu.base_name(file_path)
140
+ for shape in json_data["objects"]:
141
+ #label = shape["label"].split("--")[-1]
142
+ label = shape["label"]
143
+ if self.ignored_labels is not None and label in self.ignored_labels:
144
+ continue
145
+ if self.label_map is not None and label in self.label_map:
146
+ label = self.label_map[label]
147
+ if self.allowed_labels_fn is not None:
148
+ nl = self.allowed_labels_fn(label)
149
+ if not nl:
150
+ continue
151
+ if isinstance(nl,(str,bytes)):
152
+ label = nl
153
+ all_points = np.array([shape["polygon"]]).astype(np.int32)
154
+ if len(all_points) < 1:
155
+ continue
156
+ points = np.transpose(all_points[0])
157
+ x, y = np.vsplit(points, 2)
158
+ x = np.reshape(x, [-1])
159
+ y = np.reshape(y, [-1])
160
+ x = np.minimum(np.maximum(0, x), img_width - 1)
161
+ y = np.minimum(np.maximum(0, y), img_height - 1)
162
+ xmin = np.min(x)
163
+ xmax = np.max(x)
164
+ ymin = np.min(y)
165
+ ymax = np.max(y)
166
+ if use_semantic:
167
+ mask = np.zeros(shape=[img_height, img_width], dtype=np.uint8)
168
+ segmentation = cv.drawContours(mask, all_points, -1, color=(1), thickness=cv.FILLED)
169
+ annotations_list.append({"bbox": (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1),
170
+ "segmentation": segmentation,
171
+ "category_id": label,
172
+ "points_x": x,
173
+ "points_y": y})
174
+ else:
175
+ annotations_list.append({"bbox": (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1),
176
+ "category_id": label,
177
+ "points_x": x,
178
+ "points_y": y})
179
+ except:
180
+ print(f"Read file {os.path.basename(file_path)} faild.")
181
+ pass
182
+ if use_semantic:
183
+ '''
184
+ Each pixel only belong to one classes, and the latter annotation will overwrite the previous
185
+ '''
186
+ if len(annotations_list)>2:
187
+ mask = 1-annotations_list[-1]['segmentation']
188
+ for i in reversed(range(len(annotations_list)-1)):
189
+ annotations_list[i]['segmentation'] = np.logical_and(annotations_list[i]['segmentation'], mask)
190
+ mask = np.logical_and(mask,1-annotations_list[i]['segmentation'])
191
+ return image, annotations_list
192
+
193
+
194
+ if __name__ == "__main__":
195
+ id = 0
196
+ # data_statistics("/home/vghost/ai/mldata/qualitycontrol/rdatasv3")
197
+ import wml.img_utils as wmli
198
+ import object_detection_tools.visualization as odv
199
+ import matplotlib.pyplot as plt
200
+
201
+ NAME2ID = {}
202
+ ID2NAME = {}
203
+
204
+
205
+ def name_to_id(x):
206
+ global id
207
+ if x in NAME2ID:
208
+ return NAME2ID[x]
209
+ else:
210
+ NAME2ID[x] = id
211
+ ID2NAME[id] = x
212
+ id += 1
213
+ return NAME2ID[x]
214
+
215
+
216
+ ignored_labels = [
217
+ 'manhole', 'dashed', 'other-marking', 'static', 'front', 'back',
218
+ 'solid', 'catch-basin','utility-pole', 'pole', 'street-light','direction-back', 'direction-front'
219
+ 'ambiguous', 'other','text','diagonal','left','right','water-valve','general-single','temporary-front',
220
+ 'wheeled-slow','parking-meter','split-left-or-straight','split-right-or-straight','zigzag',
221
+ 'give-way-row','ground-animal','phone-booth','give-way-single','garage','temporary-back','caravan','other-barrier',
222
+ 'chevron','pothole','sand'
223
+ ]
224
+ label_map = {
225
+ 'individual':'person',
226
+ 'cyclists':'person',
227
+ 'other-rider':'person'
228
+ }
229
+ data = MapillaryVistasData(label_text2id=name_to_id, shuffle=False, ignored_labels=ignored_labels,label_map=label_map)
230
+ # data.read_data("/data/mldata/qualitycontrol/rdatasv5_splited/rdatasv5")
231
+ # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatav10_preproc")
232
+ # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/rdatasv10_neg_preproc")
233
+ data.read_data(wmlu.home_dir("ai/mldata/mapillary_vistas/mapillary-vistas-dataset_public_v2.0"))
234
+
235
+
236
+ def filter(x):
237
+ return x in ['general-single', 'parking', 'temporary', 'general-horizontal']
238
+ # return x in ['terrain']
239
+ # return x in ['car']
240
+
241
+
242
+ # data.read_data("/home/vghost/ai/mldata2/qualitycontrol/x")
243
+ for x in data.get_items():
244
+ full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
245
+ img = wmli.imread(full_path)
246
+
247
+
248
+ def text_fn(classes, scores):
249
+ return f"{ID2NAME[classes]}"
250
+
251
+
252
+ if False:
253
+ is_keep = [filter(x) for x in category_names]
254
+ category_ids = np.array(category_ids)[is_keep]
255
+ boxes = np.array(boxes)[is_keep]
256
+ binary_mask = np.array(binary_mask)[is_keep]
257
+ if len(category_ids) == 0:
258
+ continue
259
+
260
+ wmlu.show_dict(NAME2ID)
261
+ odv.draw_bboxes_and_maskv2(
262
+ img=img, classes=category_ids, scores=None, bboxes=boxes, masks=binary_mask, color_fn=None,
263
+ text_fn=text_fn, thickness=4,
264
+ show_text=True,
265
+ fontScale=0.8)
266
+ plt.figure()
267
+ plt.imshow(img)
268
+ plt.show()
269
+
@@ -0,0 +1,90 @@
1
+ import scipy.io as scio
2
+ import numpy as np
3
+
4
+ class MatData:
5
+ def __init__(self,mat_path=None,data=None):
6
+ if mat_path is not None:
7
+ assert data is None,"Error arguments"
8
+ data = scio.loadmat(mat_path)
9
+ self.data = data
10
+
11
+ def __len__(self):
12
+ return len(self.data)
13
+
14
+ def __getitem__(self,item):
15
+ return self.get_item(item)
16
+
17
+ def __getattr__(self, item):
18
+ return self.get_item(item)
19
+
20
+ def get_item(self,item):
21
+ if item == 'shape' and 'shape' in self.data.__dict__:
22
+ return self.data.shape
23
+
24
+ if item in self.__dict__:
25
+ return self.__dict__[item]
26
+ else:
27
+ data = self.data[item]
28
+ shape = data.shape
29
+ if len(shape)==2:
30
+ if shape[0] == 1:
31
+ if shape[1] == 1:
32
+ data=data[0,0]
33
+ if data.shape==() and self.isscalar(data):
34
+ return data
35
+ return MatData(data=data)
36
+ elif shape[1]>1:
37
+ return MatData(data=data[0])
38
+ else:
39
+ return MatData(data=data)
40
+ elif shape[0] == 0 or shape[1]==0:
41
+ return None
42
+ else:
43
+ return MatData(data=data)
44
+ elif len(shape)==1:
45
+ if shape[0] == 1:
46
+ data=data[0]
47
+ if data.shape==() and self.isscalar(data):
48
+ return data
49
+ else:
50
+ return MatData(data=data)
51
+ elif shape[0] == 0:
52
+ return None
53
+ else:
54
+ return MatData(data=data)
55
+ else:
56
+ return MatData(data=data)
57
+ def get_array_item(self,item):
58
+ if item == 'shape' and 'shape' in self.data.__dict__:
59
+ return self.data.shape
60
+
61
+ if item in self.__dict__:
62
+ return self.__dict__[item]
63
+ else:
64
+ data = self.data[item]
65
+ shape = data.shape
66
+ if len(shape)==2:
67
+ if shape[0] == 1:
68
+ return MatData(data=data[0])
69
+ elif shape[0] == 0 or shape[1]==0:
70
+ return None
71
+ else:
72
+ return MatData(data=data)
73
+ elif len(shape)==1:
74
+ if shape[0] == 0:
75
+ return None
76
+ else:
77
+ return MatData(data=data)
78
+ else:
79
+ return MatData(data=data)
80
+
81
+ @staticmethod
82
+ def isscalar(data):
83
+ type_str = type(data)
84
+ return type_str in [np.uint8,np.int8,np.uint16,np.int16,np.int32,np.uint32,np.int64,np.uint64,np.float32,np.float64,np.str_,np.str]
85
+
86
+ def item(self):
87
+ return self.data
88
+
89
+ def data_keys(self):
90
+ return self.data.dtype.names
@@ -0,0 +1,28 @@
1
+ import wml.wml_utils as wmlu
2
+ from wml.object_detection2.standard_names import *
3
+ import numpy as np
4
+
5
+ def statistics_mckeypoints(dataset):
6
+ counter = wmlu.Counter()
7
+ samples_nr = 0
8
+ points_per_sample = []
9
+ max_classes_num_in_one_img = 0
10
+ for data in dataset:
11
+ points = data[GT_KEYPOINTS]
12
+ labels = data[GT_LABELS]
13
+ cur_nr = 0
14
+ for l,p in zip(labels,points):
15
+ counter.add(l,p.numel())
16
+ cur_nr += p.numel()
17
+ max_classes_num_in_one_img = max(max_classes_num_in_one_img,len(set(labels)))
18
+ samples_nr += 1
19
+ points_per_sample.append(cur_nr)
20
+
21
+ points_per_sample = np.array(points_per_sample)
22
+ data = list(counter.items())
23
+ data = sorted(data,key=lambda x:x[1],reverse=True)
24
+ print(f"Total samples nr {len(dataset)}")
25
+ print(f"Mean samples per sample: {np.mean(points_per_sample)}, max: {np.max(points_per_sample)}, min: {np.min(points_per_sample)}")
26
+ print(f"Max num classes per sample: {max_classes_num_in_one_img}")
27
+ print(f"Points per classes:")
28
+ wmlu.show_list(data)
@@ -0,0 +1,62 @@
1
+ import wml.wml_utils as wmlu
2
+ import os
3
+ import numpy as np
4
+
5
+ class MOTDatasets(object):
6
+ def __init__(self,dirs):
7
+ self.dirs = dirs
8
+ self.tid_curr = 0
9
+ self.dir_curr = 0
10
+
11
+ def get_data_items(self):
12
+ for seq_root in self.dirs:
13
+ seqs = wmlu.get_subdir_in_dir(seq_root)
14
+ for seq in seqs:
15
+ seq_info = open(os.path.join(seq_root, seq, 'seqinfo.ini')).read()
16
+ seq_width = int(seq_info[seq_info.find('imWidth=') + 8:seq_info.find('\nimHeight')])
17
+ seq_height = int(seq_info[seq_info.find('imHeight=') + 9:seq_info.find('\nimExt')])
18
+
19
+ gt_txt = os.path.join(seq_root, seq, 'gt', 'gt.txt')
20
+ if not os.path.exists(gt_txt):
21
+ print(f"{gt_txt} not exists!")
22
+ continue
23
+ gt = np.loadtxt(gt_txt, dtype=np.float64, delimiter=',')
24
+ idx = np.lexsort(gt.T[:2, :])
25
+ gt = gt[idx, :]
26
+
27
+ fid_datas = {}
28
+ for v in gt:
29
+ fid, tid, x, y, w, h, mark, label, _ = v[:9]
30
+ if mark == 0:
31
+ continue
32
+ if 'MOT15' not in seq_root and label !=1:
33
+ continue
34
+ if fid in fid_datas:
35
+ fid_datas[fid].append(v[:9])
36
+ else:
37
+ fid_datas[fid] = [v[:9]]
38
+ tid_to_cls = {}
39
+ for fid,datas in fid_datas.items():
40
+ tmp_data = []
41
+ for _, tid, x, y, w, h, mark, label, _ in datas:
42
+ fid = int(fid)
43
+ tid = int(tid)
44
+ if not tid in tid_to_cls:
45
+ self.tid_curr += 1
46
+ tid_to_cls[tid] = self.tid_curr
47
+ cls = self.tid_curr
48
+ else:
49
+ cls = tid_to_cls[tid]
50
+ xmin = x/seq_width
51
+ ymin = y/seq_height
52
+ xmax = (x+w)/seq_width
53
+ ymax = (y+h)/seq_height
54
+ tmp_data.append([cls,[ymin,xmin,ymax,xmax]])
55
+
56
+ if len(tmp_data)>0:
57
+ img_name = '{:06d}.jpg'.format(fid)
58
+ img_path = os.path.join(seq_root,seq,"img1",img_name)
59
+ img_data = {'img_width':seq_width,'img_height':seq_height,'img_path':img_path}
60
+ yield img_data,tmp_data
61
+
62
+ print(f"Last tid value {self.tid_curr}")