python-wml 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of python-wml might be problematic. Click here for more details.
- python_wml-3.0.0.dist-info/LICENSE +23 -0
- python_wml-3.0.0.dist-info/METADATA +51 -0
- python_wml-3.0.0.dist-info/RECORD +164 -0
- python_wml-3.0.0.dist-info/WHEEL +5 -0
- python_wml-3.0.0.dist-info/top_level.txt +1 -0
- wml/__init__.py +0 -0
- wml/basic_data_def/__init__.py +2 -0
- wml/basic_data_def/detection_data_def.py +279 -0
- wml/basic_data_def/io_data_def.py +2 -0
- wml/basic_img_utils.py +816 -0
- wml/img_patch.py +92 -0
- wml/img_utils.py +571 -0
- wml/iotoolkit/__init__.py +17 -0
- wml/iotoolkit/aic_keypoint.py +115 -0
- wml/iotoolkit/baidu_mask_toolkit.py +244 -0
- wml/iotoolkit/base_dataset.py +210 -0
- wml/iotoolkit/bboxes_statistics.py +515 -0
- wml/iotoolkit/build.py +0 -0
- wml/iotoolkit/cityscapes_toolkit.py +183 -0
- wml/iotoolkit/classification_data_statistics.py +25 -0
- wml/iotoolkit/coco_data_fwd.py +225 -0
- wml/iotoolkit/coco_keypoints.py +118 -0
- wml/iotoolkit/coco_keypoints_fmt2.py +103 -0
- wml/iotoolkit/coco_toolkit.py +397 -0
- wml/iotoolkit/coco_wholebody.py +269 -0
- wml/iotoolkit/common.py +108 -0
- wml/iotoolkit/crowd_pose.py +146 -0
- wml/iotoolkit/fast_labelme.py +110 -0
- wml/iotoolkit/image_folder.py +95 -0
- wml/iotoolkit/imgs_cache.py +58 -0
- wml/iotoolkit/imgs_reader_mt.py +73 -0
- wml/iotoolkit/labelme_base.py +102 -0
- wml/iotoolkit/labelme_json_to_img.py +49 -0
- wml/iotoolkit/labelme_toolkit.py +117 -0
- wml/iotoolkit/labelme_toolkit_fwd.py +733 -0
- wml/iotoolkit/labelmemckeypoints_dataset.py +169 -0
- wml/iotoolkit/lspet.py +48 -0
- wml/iotoolkit/mapillary_vistas_toolkit.py +269 -0
- wml/iotoolkit/mat_data.py +90 -0
- wml/iotoolkit/mckeypoints_statistics.py +28 -0
- wml/iotoolkit/mot_datasets.py +62 -0
- wml/iotoolkit/mpii.py +108 -0
- wml/iotoolkit/npmckeypoints_dataset.py +164 -0
- wml/iotoolkit/o365_to_coco.py +136 -0
- wml/iotoolkit/object365_toolkit.py +156 -0
- wml/iotoolkit/object365v2_toolkit.py +71 -0
- wml/iotoolkit/pascal_voc_data.py +51 -0
- wml/iotoolkit/pascal_voc_toolkit.py +194 -0
- wml/iotoolkit/pascal_voc_toolkit_fwd.py +473 -0
- wml/iotoolkit/penn_action.py +57 -0
- wml/iotoolkit/rawframe_dataset.py +129 -0
- wml/iotoolkit/rewrite_pascal_voc.py +28 -0
- wml/iotoolkit/semantic_data.py +49 -0
- wml/iotoolkit/split_file_by_type.py +29 -0
- wml/iotoolkit/sports_mot_datasets.py +78 -0
- wml/iotoolkit/vis_objectdetection_dataset.py +70 -0
- wml/iotoolkit/vis_torch_data.py +39 -0
- wml/iotoolkit/yolo_toolkit.py +38 -0
- wml/object_detection2/__init__.py +4 -0
- wml/object_detection2/basic_visualization.py +37 -0
- wml/object_detection2/bboxes.py +812 -0
- wml/object_detection2/data_process_toolkit.py +146 -0
- wml/object_detection2/keypoints.py +292 -0
- wml/object_detection2/mask.py +120 -0
- wml/object_detection2/metrics/__init__.py +3 -0
- wml/object_detection2/metrics/build.py +15 -0
- wml/object_detection2/metrics/classifier_toolkit.py +440 -0
- wml/object_detection2/metrics/common.py +71 -0
- wml/object_detection2/metrics/mckps_toolkit.py +338 -0
- wml/object_detection2/metrics/toolkit.py +1953 -0
- wml/object_detection2/npod_toolkit.py +361 -0
- wml/object_detection2/odtools.py +243 -0
- wml/object_detection2/standard_names.py +75 -0
- wml/object_detection2/visualization.py +956 -0
- wml/object_detection2/wmath.py +34 -0
- wml/semantic/__init__.py +0 -0
- wml/semantic/basic_toolkit.py +65 -0
- wml/semantic/mask_utils.py +156 -0
- wml/semantic/semantic_test.py +21 -0
- wml/semantic/structures.py +1 -0
- wml/semantic/toolkit.py +105 -0
- wml/semantic/visualization_utils.py +658 -0
- wml/threadtoolkit.py +50 -0
- wml/walgorithm.py +228 -0
- wml/wcollections.py +212 -0
- wml/wfilesystem.py +487 -0
- wml/wml_utils.py +657 -0
- wml/wstructures/__init__.py +4 -0
- wml/wstructures/common.py +9 -0
- wml/wstructures/keypoints_train_toolkit.py +149 -0
- wml/wstructures/kps_structures.py +579 -0
- wml/wstructures/mask_structures.py +1161 -0
- wml/wtorch/__init__.py +8 -0
- wml/wtorch/bboxes.py +104 -0
- wml/wtorch/classes_suppression.py +24 -0
- wml/wtorch/conv_module.py +181 -0
- wml/wtorch/conv_ws.py +144 -0
- wml/wtorch/data/__init__.py +16 -0
- wml/wtorch/data/_utils/__init__.py +45 -0
- wml/wtorch/data/_utils/collate.py +183 -0
- wml/wtorch/data/_utils/fetch.py +47 -0
- wml/wtorch/data/_utils/pin_memory.py +121 -0
- wml/wtorch/data/_utils/signal_handling.py +72 -0
- wml/wtorch/data/_utils/worker.py +227 -0
- wml/wtorch/data/base_data_loader_iter.py +93 -0
- wml/wtorch/data/dataloader.py +501 -0
- wml/wtorch/data/datapipes/__init__.py +1 -0
- wml/wtorch/data/datapipes/iter/__init__.py +12 -0
- wml/wtorch/data/datapipes/iter/batch.py +126 -0
- wml/wtorch/data/datapipes/iter/callable.py +92 -0
- wml/wtorch/data/datapipes/iter/listdirfiles.py +37 -0
- wml/wtorch/data/datapipes/iter/loadfilesfromdisk.py +30 -0
- wml/wtorch/data/datapipes/iter/readfilesfromtar.py +60 -0
- wml/wtorch/data/datapipes/iter/readfilesfromzip.py +63 -0
- wml/wtorch/data/datapipes/iter/sampler.py +94 -0
- wml/wtorch/data/datapipes/utils/__init__.py +0 -0
- wml/wtorch/data/datapipes/utils/common.py +65 -0
- wml/wtorch/data/dataset.py +354 -0
- wml/wtorch/data/datasets/__init__.py +4 -0
- wml/wtorch/data/datasets/common.py +53 -0
- wml/wtorch/data/datasets/listdirfilesdataset.py +36 -0
- wml/wtorch/data/datasets/loadfilesfromdiskdataset.py +30 -0
- wml/wtorch/data/distributed.py +135 -0
- wml/wtorch/data/multi_processing_data_loader_iter.py +866 -0
- wml/wtorch/data/sampler.py +267 -0
- wml/wtorch/data/single_process_data_loader_iter.py +24 -0
- wml/wtorch/data/test_data_loader.py +26 -0
- wml/wtorch/dataset_toolkit.py +67 -0
- wml/wtorch/depthwise_separable_conv_module.py +98 -0
- wml/wtorch/dist.py +591 -0
- wml/wtorch/dropblock/__init__.py +6 -0
- wml/wtorch/dropblock/dropblock.py +228 -0
- wml/wtorch/dropblock/dropout.py +40 -0
- wml/wtorch/dropblock/scheduler.py +48 -0
- wml/wtorch/ema.py +61 -0
- wml/wtorch/fc_module.py +73 -0
- wml/wtorch/functional.py +34 -0
- wml/wtorch/iter_dataset.py +26 -0
- wml/wtorch/loss.py +69 -0
- wml/wtorch/nets/__init__.py +0 -0
- wml/wtorch/nets/ckpt_toolkit.py +219 -0
- wml/wtorch/nets/fpn.py +276 -0
- wml/wtorch/nets/hrnet/__init__.py +0 -0
- wml/wtorch/nets/hrnet/config.py +2 -0
- wml/wtorch/nets/hrnet/hrnet.py +494 -0
- wml/wtorch/nets/misc.py +249 -0
- wml/wtorch/nets/resnet/__init__.py +0 -0
- wml/wtorch/nets/resnet/layers/__init__.py +17 -0
- wml/wtorch/nets/resnet/layers/aspp.py +144 -0
- wml/wtorch/nets/resnet/layers/batch_norm.py +231 -0
- wml/wtorch/nets/resnet/layers/blocks.py +111 -0
- wml/wtorch/nets/resnet/layers/wrappers.py +110 -0
- wml/wtorch/nets/resnet/r50_config.py +38 -0
- wml/wtorch/nets/resnet/resnet.py +691 -0
- wml/wtorch/nets/shape_spec.py +20 -0
- wml/wtorch/nets/simple_fpn.py +101 -0
- wml/wtorch/nms.py +109 -0
- wml/wtorch/nn.py +896 -0
- wml/wtorch/ocr_block.py +193 -0
- wml/wtorch/summary.py +331 -0
- wml/wtorch/train_toolkit.py +603 -0
- wml/wtorch/transformer_blocks.py +266 -0
- wml/wtorch/utils.py +719 -0
- wml/wtorch/wlr_scheduler.py +100 -0
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import numpy as np
|
|
3
|
+
import os.path as osp
|
|
4
|
+
import math
|
|
5
|
+
|
|
6
|
+
'''
|
|
7
|
+
0: 'Right Shoulder',
|
|
8
|
+
1: 'Right Elbow',
|
|
9
|
+
2: 'Right Wrist',
|
|
10
|
+
3: 'Left Shoulder',
|
|
11
|
+
4: 'Left Elbow',
|
|
12
|
+
5: 'Left Wrist',
|
|
13
|
+
6: 'Right Hip',
|
|
14
|
+
7: 'Right Knee',
|
|
15
|
+
8: 'Right Ankle',
|
|
16
|
+
9: 'Left Hip',
|
|
17
|
+
10: 'Left Knee',
|
|
18
|
+
11: 'Left Ankle',
|
|
19
|
+
12: 'Head top',
|
|
20
|
+
13: 'Neck'
|
|
21
|
+
'''
|
|
22
|
+
def read_aic_keypoint(file_path,sub_dir_name=None):
|
|
23
|
+
with open(file_path,"r") as f:
|
|
24
|
+
datas = json.load(f)
|
|
25
|
+
|
|
26
|
+
res = []
|
|
27
|
+
for data in datas:
|
|
28
|
+
img_name = data['image_id']
|
|
29
|
+
kps = []
|
|
30
|
+
bboxes = []
|
|
31
|
+
human_data = data['human_annotations']
|
|
32
|
+
for k,v in data['keypoint_annotations'].items():
|
|
33
|
+
v = np.array(v,dtype=np.float32)
|
|
34
|
+
v = np.reshape(v,[-1,3])
|
|
35
|
+
mask = v[...,-1]==3
|
|
36
|
+
v[mask] = 0
|
|
37
|
+
bbox = np.array(human_data[k],dtype=np.float32)
|
|
38
|
+
kps.append(v)
|
|
39
|
+
bboxes.append(bbox)
|
|
40
|
+
kps = np.array(kps)
|
|
41
|
+
bboxes = np.array(bboxes)
|
|
42
|
+
if sub_dir_name is not None:
|
|
43
|
+
img_name = osp.join(sub_dir_name,img_name)
|
|
44
|
+
res.append([img_name+".jpg",bboxes,kps])
|
|
45
|
+
|
|
46
|
+
return res
|
|
47
|
+
|
|
48
|
+
class Trans2COCO:
|
|
49
|
+
def __init__(self) -> None:
|
|
50
|
+
self.dst_idxs = [5,6,7,8,9,10,11,12,13,14,15,16]
|
|
51
|
+
self.src_idxs = [3,0,4,1,5,2,9,6,10,7,11,8]
|
|
52
|
+
self.coco_idxs = [0,1,2,3,4]
|
|
53
|
+
|
|
54
|
+
@staticmethod
|
|
55
|
+
def get_head_pos(kp):
|
|
56
|
+
p0 = kp[12]
|
|
57
|
+
p1 = kp[13]
|
|
58
|
+
if p0[2]>0 and p1[2]>0:
|
|
59
|
+
dx = p0[0] - p1[0]
|
|
60
|
+
dy = p0[1] - p1[1]
|
|
61
|
+
dis = math.sqrt(dx*dx+dy*dy)*0.75
|
|
62
|
+
cx = (p0[0] + p1[0])/2
|
|
63
|
+
cy = (p0[1] + p1[1])/2
|
|
64
|
+
x0 = cx-dis
|
|
65
|
+
x1 = cx+dis
|
|
66
|
+
y0 = cy-dis
|
|
67
|
+
y1 = cy+dis
|
|
68
|
+
return [x0,y0,x1,y1]
|
|
69
|
+
else:
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
@staticmethod
|
|
73
|
+
def kps_in_bbox(kps,bbox):
|
|
74
|
+
nr = kps.shape[0]
|
|
75
|
+
for i in range(nr):
|
|
76
|
+
if kps[i,2]<= 0:
|
|
77
|
+
return False
|
|
78
|
+
x = kps[i,0]
|
|
79
|
+
y = kps[i,1]
|
|
80
|
+
if x<bbox[0] or x>bbox[2] or y<bbox[1] or y>bbox[3]:
|
|
81
|
+
return False
|
|
82
|
+
return True
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def __call__(self,mpii_kps,coco_kps=None):
|
|
86
|
+
if len(mpii_kps.shape)==2:
|
|
87
|
+
return self.trans_one(mpii_kps,coco_kps)
|
|
88
|
+
res = []
|
|
89
|
+
if coco_kps is not None:
|
|
90
|
+
for mp,coco in zip(mpii_kps,coco_kps):
|
|
91
|
+
res.append(self.trans_one(mp,coco))
|
|
92
|
+
else:
|
|
93
|
+
for mp in mpii_kps:
|
|
94
|
+
res.append(self.trans_one(mp))
|
|
95
|
+
return np.array(res)
|
|
96
|
+
|
|
97
|
+
def trans_one(self,mpii_kps,coco_kps=None):
|
|
98
|
+
'''
|
|
99
|
+
img: [RGB]
|
|
100
|
+
'''
|
|
101
|
+
res = np.zeros([17,3],dtype=np.float32)
|
|
102
|
+
res[self.dst_idxs] = mpii_kps[self.src_idxs]
|
|
103
|
+
if coco_kps is not None:
|
|
104
|
+
left_right_pairs = [[5,6],[11,12]]
|
|
105
|
+
is_good = True
|
|
106
|
+
for pair in left_right_pairs:
|
|
107
|
+
l,r = pair
|
|
108
|
+
if res[l,0]<res[r,0] or res[l,2]<0.1 or res[r,2]<0.1:
|
|
109
|
+
is_good = False
|
|
110
|
+
break
|
|
111
|
+
|
|
112
|
+
head_bbox = self.get_head_pos(mpii_kps)
|
|
113
|
+
if is_good and head_bbox is not None and self.kps_in_bbox(coco_kps[self.coco_idxs],head_bbox):
|
|
114
|
+
res[self.coco_idxs] = coco_kps[self.coco_idxs]
|
|
115
|
+
return res
|
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import cv2
|
|
3
|
+
import wml.wml_utils as wmlu
|
|
4
|
+
import numpy as np
|
|
5
|
+
import os
|
|
6
|
+
import cv2 as cv
|
|
7
|
+
import sys
|
|
8
|
+
import random
|
|
9
|
+
from .common import *
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def get_files(dir_path, img_sub_dir=None,label_sub_dir=None):
|
|
13
|
+
if img_sub_dir is not None:
|
|
14
|
+
img_dir = os.path.join(dir_path, img_sub_dir)
|
|
15
|
+
else:
|
|
16
|
+
img_dir = dir_path
|
|
17
|
+
|
|
18
|
+
if label_sub_dir is not None:
|
|
19
|
+
label_dir = os.path.join(dir_path, label_sub_dir)
|
|
20
|
+
else:
|
|
21
|
+
label_dir = dir_path
|
|
22
|
+
res = []
|
|
23
|
+
json_files = wmlu.recurse_get_filepath_in_dir(label_dir,suffix=".json")
|
|
24
|
+
for jf in json_files:
|
|
25
|
+
base_name = wmlu.base_name(jf)
|
|
26
|
+
igf = os.path.join(img_dir, base_name + ".jpg")
|
|
27
|
+
if os.path.exists(igf):
|
|
28
|
+
res.append((igf, jf))
|
|
29
|
+
else:
|
|
30
|
+
print(f"ERROR: Find {igf} faild, json file is {jf}")
|
|
31
|
+
|
|
32
|
+
return res
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class BaiDuMaskData(object):
|
|
36
|
+
def __init__(self, trans_label=None,shuffle=False, img_sub_dir=None,label_sub_dir=None,ignored_labels=[],label_map={},
|
|
37
|
+
allowed_labels_fn=None,overlap=True):
|
|
38
|
+
self.files = None
|
|
39
|
+
self.shuffle = shuffle
|
|
40
|
+
self.trans_label = trans_label
|
|
41
|
+
self.img_sub_dir = img_sub_dir
|
|
42
|
+
self.label_sub_dir = label_sub_dir
|
|
43
|
+
self.ignored_labels = ignored_labels
|
|
44
|
+
self.label_map = label_map
|
|
45
|
+
self.allowed_labels_fn = None if allowed_labels_fn is None or (isinstance(allowed_labels_fn,list ) and len(allowed_labels_fn)==0) else allowed_labels_fn
|
|
46
|
+
self.overlap = overlap
|
|
47
|
+
if self.allowed_labels_fn is not None and isinstance(self.allowed_labels_fn,list):
|
|
48
|
+
self.allowed_labels_fn = lambda x:x in allowed_labels_fn
|
|
49
|
+
|
|
50
|
+
def read_data(self, dir_path):
|
|
51
|
+
self.files = get_files(dir_path, self.img_sub_dir,self.label_sub_dir)
|
|
52
|
+
if self.shuffle:
|
|
53
|
+
random.shuffle(self.files)
|
|
54
|
+
|
|
55
|
+
def __len__(self):
|
|
56
|
+
return len(self.files)
|
|
57
|
+
|
|
58
|
+
def get_items(self,beg=0,end=None,filter=None):
|
|
59
|
+
'''
|
|
60
|
+
:return:
|
|
61
|
+
binary_masks [N,H,W], value is 0 or 1,
|
|
62
|
+
full_path,img_size,category_ids,category_names,boxes,binary_masks,area,is_crowd,num_annotations_skipped
|
|
63
|
+
'''
|
|
64
|
+
if end is None:
|
|
65
|
+
end = len(self.files)
|
|
66
|
+
if beg is None:
|
|
67
|
+
beg = 0
|
|
68
|
+
for i, (img_file, json_file) in enumerate(self.files[beg:end]):
|
|
69
|
+
if filter is not None and not filter(img_file,json_file):
|
|
70
|
+
continue
|
|
71
|
+
print(img_file,json_file)
|
|
72
|
+
sys.stdout.write('\r>> read data %d/%d' % (i + 1, len(self.files)))
|
|
73
|
+
sys.stdout.flush()
|
|
74
|
+
image, annotations_list = self.read_json(json_file,img_file)
|
|
75
|
+
masks = [ann["segmentation"] for ann in annotations_list]
|
|
76
|
+
labels = [ann['category_id'] for ann in annotations_list]
|
|
77
|
+
labels_names = [ann['category_name'] for ann in annotations_list]
|
|
78
|
+
bboxes = [ann['bbox'] for ann in annotations_list]
|
|
79
|
+
if len(masks) > 0:
|
|
80
|
+
try:
|
|
81
|
+
masks = np.stack(masks, axis=0)
|
|
82
|
+
except:
|
|
83
|
+
print("ERROR: stack masks faild.")
|
|
84
|
+
masks = None
|
|
85
|
+
|
|
86
|
+
yield DetData(img_file, [image['height'], image['width']], labels, labels_names, bboxes, masks, None, None, None)
|
|
87
|
+
|
|
88
|
+
def get_boxes_items(self):
|
|
89
|
+
'''
|
|
90
|
+
:return:
|
|
91
|
+
full_path,img_size,category_ids,boxes,is_crowd
|
|
92
|
+
'''
|
|
93
|
+
for i, (img_file, json_file) in enumerate(self.files):
|
|
94
|
+
sys.stdout.write('\r>> read data %d/%d' % (i + 1, len(self.files)))
|
|
95
|
+
sys.stdout.flush()
|
|
96
|
+
image, annotations_list = self.read_json(json_file,use_semantic=False)
|
|
97
|
+
labels = [ann['category_id'] for ann in annotations_list]
|
|
98
|
+
labels_names = [ann['category_name'] for ann in annotations_list]
|
|
99
|
+
bboxes = [ann['bbox'] for ann in annotations_list]
|
|
100
|
+
#file, img_size,category_ids, labels_text, bboxes, binary_mask, area, is_crowd, _
|
|
101
|
+
yield DetBboxesData(img_file, [image['height'], image['width']], labels, bboxes, None)
|
|
102
|
+
|
|
103
|
+
@staticmethod
|
|
104
|
+
def write_json(save_path,mask,labels2name,labels2color,label_trans=None,epsilon=None):
|
|
105
|
+
'''
|
|
106
|
+
save_path: json save path
|
|
107
|
+
mask:[H,W] or [N,H,W] uint_8, N为目标个数, 分辨率为原图分辨率
|
|
108
|
+
labels2name: list(tuple) (label,label_name}, len(labels2name)为目标个数
|
|
109
|
+
labels2color: {label,label_color}/list of colors, len(labels2color)=类别数
|
|
110
|
+
'''
|
|
111
|
+
shapes = []
|
|
112
|
+
for i,(k,n) in enumerate(labels2name):
|
|
113
|
+
if len(mask.shape)==2:
|
|
114
|
+
mask_tmp = (mask==k).astype(np.uint8)
|
|
115
|
+
else:
|
|
116
|
+
mask_tmp = mask[i]
|
|
117
|
+
if np.sum(mask_tmp)==0:
|
|
118
|
+
continue
|
|
119
|
+
contours, hierarchy = cv.findContours(mask_tmp, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
|
|
120
|
+
for cont in contours:
|
|
121
|
+
if epsilon is not None:
|
|
122
|
+
cont = cv2.approxPolyDP(cont,epsilon,True)
|
|
123
|
+
points = cont
|
|
124
|
+
if len(cont.shape) == 3 and cont.shape[1] == 1:
|
|
125
|
+
points = np.squeeze(points, axis=1)
|
|
126
|
+
points = points.tolist()
|
|
127
|
+
if len(points)<3:
|
|
128
|
+
continue
|
|
129
|
+
tmp_data = {'name':n}
|
|
130
|
+
if label_trans is not None:
|
|
131
|
+
tmp_data['labelIdx'] = int(label_trans[k])
|
|
132
|
+
else:
|
|
133
|
+
tmp_data['labelIdx'] = int(k)
|
|
134
|
+
tmp_data["points"] = points
|
|
135
|
+
tmp_data['color'] = labels2color[k]
|
|
136
|
+
shapes.append(tmp_data)
|
|
137
|
+
with open(save_path,"w") as f:
|
|
138
|
+
json.dump(shapes,f)
|
|
139
|
+
|
|
140
|
+
def read_json(self,file_path,img_path,use_semantic=True):
|
|
141
|
+
annotations_list = []
|
|
142
|
+
image = {}
|
|
143
|
+
img = cv.imread(img_path)
|
|
144
|
+
with open(file_path, "r", encoding="gb18030") as f:
|
|
145
|
+
print(file_path)
|
|
146
|
+
data_str = f.read()
|
|
147
|
+
try:
|
|
148
|
+
json_data = json.loads(data_str)
|
|
149
|
+
img_width = int(img.shape[1])
|
|
150
|
+
img_height = int(img.shape[0])
|
|
151
|
+
image["height"] = int(img_height)
|
|
152
|
+
image["width"] = int(img_width)
|
|
153
|
+
image["file_name"] = wmlu.base_name(file_path)
|
|
154
|
+
for shape in json_data:
|
|
155
|
+
label = shape["labelIdx"]
|
|
156
|
+
label_name = shape['name']
|
|
157
|
+
if self.ignored_labels is not None and label in self.ignored_labels:
|
|
158
|
+
continue
|
|
159
|
+
if self.allowed_labels_fn is not None and not self.allowed_labels_fn(label):
|
|
160
|
+
continue
|
|
161
|
+
if self.label_map is not None and label in self.label_map:
|
|
162
|
+
label = self.label_map[label]
|
|
163
|
+
mask = np.zeros(shape=[img_height, img_width], dtype=np.uint8)
|
|
164
|
+
all_points = np.array([shape["points"]]).astype(np.int32)
|
|
165
|
+
if len(all_points) < 1:
|
|
166
|
+
continue
|
|
167
|
+
points = np.transpose(all_points[0])
|
|
168
|
+
x, y = np.vsplit(points, 2)
|
|
169
|
+
x = np.reshape(x, [-1])
|
|
170
|
+
y = np.reshape(y, [-1])
|
|
171
|
+
x = np.minimum(np.maximum(0, x), img_width - 1)
|
|
172
|
+
y = np.minimum(np.maximum(0, y), img_height - 1)
|
|
173
|
+
xmin = np.min(x)
|
|
174
|
+
xmax = np.max(x)
|
|
175
|
+
ymin = np.min(y)
|
|
176
|
+
ymax = np.max(y)
|
|
177
|
+
if use_semantic:
|
|
178
|
+
segmentation = cv.drawContours(mask, all_points, -1, color=(1), thickness=cv.FILLED)
|
|
179
|
+
annotations_list.append({"bbox": (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1),
|
|
180
|
+
"segmentation": segmentation,
|
|
181
|
+
"category_id": label,
|
|
182
|
+
"category_name": label_name,
|
|
183
|
+
"points_x": x,
|
|
184
|
+
"points_y": y})
|
|
185
|
+
else:
|
|
186
|
+
annotations_list.append({"bbox": (xmin, ymin, xmax - xmin + 1, ymax - ymin + 1),
|
|
187
|
+
"category_id": label,
|
|
188
|
+
"category_name": label_name,
|
|
189
|
+
"points_x": x,
|
|
190
|
+
"points_y": y})
|
|
191
|
+
except:
|
|
192
|
+
print(f"Read file {os.path.basename(file_path)} faild.")
|
|
193
|
+
pass
|
|
194
|
+
if use_semantic and self.overlap:
|
|
195
|
+
'''
|
|
196
|
+
Each pixel only belong to one classes, and the latter annotation will overwrite the previous
|
|
197
|
+
'''
|
|
198
|
+
if len(annotations_list)>2:
|
|
199
|
+
mask = 1-annotations_list[-1]['segmentation']
|
|
200
|
+
for i in reversed(range(len(annotations_list)-1)):
|
|
201
|
+
annotations_list[i]['segmentation'] = np.logical_and(annotations_list[i]['segmentation'], mask)
|
|
202
|
+
mask = np.logical_and(mask,1-annotations_list[i]['segmentation'])
|
|
203
|
+
return image, annotations_list
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
if __name__ == "__main__":
|
|
207
|
+
id = 0
|
|
208
|
+
# data_statistics("/home/vghost/ai/mldata/qualitycontrol/rdatasv3")
|
|
209
|
+
import wml.img_utils as wmli
|
|
210
|
+
import wml.object_detection2.visualization as odv
|
|
211
|
+
import matplotlib.pyplot as plt
|
|
212
|
+
|
|
213
|
+
ID2NAME = {1:"person",2:"seat_belt"}
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
data = BaiDuMaskData( label_sub_dir="label",shuffle=False)
|
|
217
|
+
data.read_data("/home/wj/ai/mldata1/safety_belt/train_1")
|
|
218
|
+
|
|
219
|
+
def filter(x):
|
|
220
|
+
return x in ['general-single', 'parking', 'temporary', 'general-horizontal']
|
|
221
|
+
# return x in ['terrain']
|
|
222
|
+
# return x in ['car']
|
|
223
|
+
|
|
224
|
+
# data.read_data("/home/vghost/ai/mldata2/qualitycontrol/x")
|
|
225
|
+
for x in data.get_items():
|
|
226
|
+
full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
|
|
227
|
+
img = wmli.imread(full_path)
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def text_fn(classes, scores):
|
|
231
|
+
return f"{ID2NAME[classes]}"
|
|
232
|
+
|
|
233
|
+
if len(category_ids) == 0:
|
|
234
|
+
continue
|
|
235
|
+
|
|
236
|
+
odv.draw_bboxes_and_maskv2(
|
|
237
|
+
img=img, classes=category_ids, scores=None, bboxes=boxes, masks=binary_mask, color_fn=None,
|
|
238
|
+
text_fn=text_fn, thickness=4,
|
|
239
|
+
show_text=True,
|
|
240
|
+
fontScale=0.8)
|
|
241
|
+
plt.figure()
|
|
242
|
+
plt.imshow(img)
|
|
243
|
+
plt.show()
|
|
244
|
+
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import os
|
|
3
|
+
import os.path as osp
|
|
4
|
+
import wml.wml_utils as wmlu
|
|
5
|
+
from .common import resample,ignore_case_dict_label_text2id
|
|
6
|
+
from abc import ABCMeta, abstractmethod
|
|
7
|
+
from functools import partial
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BaseDataset(metaclass=ABCMeta):
|
|
12
|
+
def __init__(self,label_text2id=None,
|
|
13
|
+
filter_empty_files=False,filter_error=False,resample_parameters=None,shuffle=True,silent=False,keep_no_ann_imgs=False,absolute_coord=True,mask_on=False):
|
|
14
|
+
'''
|
|
15
|
+
filter_empty_files: remove files without any objects
|
|
16
|
+
keep_no_ann_imgs: keep images without annotation files
|
|
17
|
+
label_text2id: func(name)->int
|
|
18
|
+
'''
|
|
19
|
+
self.files = None
|
|
20
|
+
if isinstance(label_text2id,dict):
|
|
21
|
+
self.label_text2id = partial(ignore_case_dict_label_text2id,
|
|
22
|
+
dict_data=wmlu.trans_dict_key2lower(label_text2id))
|
|
23
|
+
else:
|
|
24
|
+
self.label_text2id = label_text2id
|
|
25
|
+
self.filter_empty_files = filter_empty_files
|
|
26
|
+
self.filter_error = filter_error
|
|
27
|
+
|
|
28
|
+
if resample_parameters is not None:
|
|
29
|
+
self.resample_parameters = {}
|
|
30
|
+
for k,v in resample_parameters.items():
|
|
31
|
+
if isinstance(k,(str,bytes)) and self.label_text2id is not None:
|
|
32
|
+
k = self.label_text2id(k)
|
|
33
|
+
self.resample_parameters[k] = v
|
|
34
|
+
print("resample parameters")
|
|
35
|
+
wmlu.show_dict(self.resample_parameters)
|
|
36
|
+
else:
|
|
37
|
+
self.resample_parameters = None
|
|
38
|
+
|
|
39
|
+
self.shuffle = shuffle
|
|
40
|
+
self.silent = silent
|
|
41
|
+
self.keep_no_ann_imgs = keep_no_ann_imgs
|
|
42
|
+
self.absolute_coord = absolute_coord
|
|
43
|
+
self.mask_on = mask_on
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
def get_files_from_data_list_file(self,file_path):
|
|
47
|
+
res = []
|
|
48
|
+
with open(file_path,"r") as f:
|
|
49
|
+
lines = f.readlines()
|
|
50
|
+
for line in lines:
|
|
51
|
+
line = line.strip()
|
|
52
|
+
if "," in line:
|
|
53
|
+
line = line.split(",")
|
|
54
|
+
res.append(line)
|
|
55
|
+
return res
|
|
56
|
+
|
|
57
|
+
def find_files(self,dir_path,img_suffix):
|
|
58
|
+
dir_path = self.process_path(dir_path)
|
|
59
|
+
if isinstance(dir_path,(list,tuple)) and len(dir_path)==1 and isinstance(dir_path[0],(str,bytes)):
|
|
60
|
+
dir_path = dir_path[0]
|
|
61
|
+
|
|
62
|
+
if isinstance(dir_path,str):
|
|
63
|
+
#从单一目录读取数据
|
|
64
|
+
print(f"Read {dir_path}")
|
|
65
|
+
if not os.path.exists(dir_path):
|
|
66
|
+
print(f"Data path {dir_path} not exists.")
|
|
67
|
+
return False
|
|
68
|
+
files = self.find_files_in_dir_or_dl(dir_path,img_suffix=img_suffix)
|
|
69
|
+
elif isinstance(dir_path,(list,tuple)):
|
|
70
|
+
if isinstance(dir_path[0],(str,bytes)) and os.path.isdir(dir_path[0]):
|
|
71
|
+
'''
|
|
72
|
+
从多个目录读取数据
|
|
73
|
+
Example:
|
|
74
|
+
[/dataset/dir0,/dataset/dir1,/dataset/dir2]
|
|
75
|
+
'''
|
|
76
|
+
files = self.find_files_in_dirs(dir_path, img_suffix=img_suffix)
|
|
77
|
+
elif isinstance(dir_path[0],(list,tuple)) and not isinstance(dir_path[0][1],(str,bytes)):
|
|
78
|
+
'''
|
|
79
|
+
用于对不同目录进行不同的重采样
|
|
80
|
+
Example:
|
|
81
|
+
[(/dataset/dir0,3),(/dataset/dir1,1),(/dataset/dir2,100)]
|
|
82
|
+
'''
|
|
83
|
+
files = self.find_files_in_dirs_with_repeat(dir_path,img_suffix=img_suffix)
|
|
84
|
+
else:
|
|
85
|
+
#文件列表
|
|
86
|
+
files = dir_path
|
|
87
|
+
|
|
88
|
+
return files
|
|
89
|
+
|
|
90
|
+
def find_files_in_dir_or_dl(self,dir_path,img_suffix):
|
|
91
|
+
if osp.isfile(dir_path) and osp.splitext(dir_path)[-1].lower() == ".dl":
|
|
92
|
+
files = self.get_files_from_data_list_file(dir_path)
|
|
93
|
+
else:
|
|
94
|
+
files = self.find_files_in_dir(dir_path,img_suffix=img_suffix)
|
|
95
|
+
|
|
96
|
+
return files
|
|
97
|
+
|
|
98
|
+
@abstractmethod
|
|
99
|
+
def find_files_in_dir(self,dir_path,img_suffix):
|
|
100
|
+
pass
|
|
101
|
+
|
|
102
|
+
def find_files_in_dirs(self,dirs,img_suffix=".jpg"):
|
|
103
|
+
all_files = []
|
|
104
|
+
for dir_path in dirs:
|
|
105
|
+
files = self.find_files_in_dir_or_dl(dir_path,img_suffix)
|
|
106
|
+
print(f"Find {len(files)} in {dir_path}")
|
|
107
|
+
all_files.extend(files)
|
|
108
|
+
|
|
109
|
+
return all_files
|
|
110
|
+
|
|
111
|
+
def find_files_in_dirs_with_repeat(self,dirs,img_suffix=".jpg"):
|
|
112
|
+
all_files = []
|
|
113
|
+
raw_nr = 0
|
|
114
|
+
for dir_path,repeat_nr in dirs:
|
|
115
|
+
files = self.find_files_in_dir_or_dl(dir_path,img_suffix)
|
|
116
|
+
files_nr = len(files)
|
|
117
|
+
if repeat_nr>1:
|
|
118
|
+
files = list(files)*int(repeat_nr)
|
|
119
|
+
|
|
120
|
+
print(f"Find {files_nr} in {dir_path}, repeat to {len(files)} files")
|
|
121
|
+
raw_nr += files_nr
|
|
122
|
+
all_files.extend(list(files))
|
|
123
|
+
|
|
124
|
+
print(f"Total find {raw_nr} files, repeat to {len(all_files)} files.")
|
|
125
|
+
return all_files
|
|
126
|
+
|
|
127
|
+
def read_data(self,dir_path,img_suffix=".jpg;;.bmp;;.png;;.jpeg"):
|
|
128
|
+
_files = self.find_files(dir_path,img_suffix=img_suffix)
|
|
129
|
+
if self.filter_empty_files and self.label_text2id:
|
|
130
|
+
_files = self.apply_filter_empty_files(_files)
|
|
131
|
+
elif self.filter_error:
|
|
132
|
+
_files = self.apply_filter_error_files(_files)
|
|
133
|
+
if self.resample_parameters is not None and self.label_text2id:
|
|
134
|
+
_files = self.resample(_files)
|
|
135
|
+
|
|
136
|
+
self.files = _files
|
|
137
|
+
|
|
138
|
+
if isinstance(dir_path,(str,bytes)):
|
|
139
|
+
print(f"Total find {len(self.files)} in {dir_path}")
|
|
140
|
+
else:
|
|
141
|
+
print(f"Total find {len(self.files)}")
|
|
142
|
+
|
|
143
|
+
if self.shuffle:
|
|
144
|
+
random.shuffle(self.files)
|
|
145
|
+
|
|
146
|
+
print("Files")
|
|
147
|
+
wmlu.show_list(self.files[:100])
|
|
148
|
+
if len(self.files)>100:
|
|
149
|
+
print("...")
|
|
150
|
+
|
|
151
|
+
def apply_filter_empty_files(self,files):
|
|
152
|
+
new_files = []
|
|
153
|
+
for fs in files:
|
|
154
|
+
try:
|
|
155
|
+
labels,labels_names = self.get_labels(fs)
|
|
156
|
+
is_none = [x is None for x in labels]
|
|
157
|
+
if not all(is_none):
|
|
158
|
+
new_files.append(fs)
|
|
159
|
+
else:
|
|
160
|
+
print(f"File {fs[1]} is empty, remove from dataset, labels names {labels_names}, labels {labels}")
|
|
161
|
+
except Exception as e:
|
|
162
|
+
print(f"Read {fs[1]} faild, info: {e}.")
|
|
163
|
+
pass
|
|
164
|
+
|
|
165
|
+
return new_files
|
|
166
|
+
|
|
167
|
+
def apply_filter_error_files(self,files):
|
|
168
|
+
new_files = []
|
|
169
|
+
for fs in files:
|
|
170
|
+
try:
|
|
171
|
+
labels,labels_names = self.get_labels(fs)
|
|
172
|
+
new_files.append(fs)
|
|
173
|
+
except Exception as e:
|
|
174
|
+
print(f"Read {fs[1]} faild, info: {e}.")
|
|
175
|
+
pass
|
|
176
|
+
|
|
177
|
+
return new_files
|
|
178
|
+
|
|
179
|
+
@abstractmethod
|
|
180
|
+
def get_labels(self,fs):
|
|
181
|
+
#return labels,labels_names
|
|
182
|
+
pass
|
|
183
|
+
|
|
184
|
+
def resample(self,files):
|
|
185
|
+
all_labels = []
|
|
186
|
+
for fs in files:
|
|
187
|
+
try:
|
|
188
|
+
labels,_ = self.get_labels(fs)
|
|
189
|
+
all_labels.append(labels)
|
|
190
|
+
except Exception as e:
|
|
191
|
+
print(f"Basedataset resample error: {e}")
|
|
192
|
+
|
|
193
|
+
return resample(files,all_labels,self.resample_parameters)
|
|
194
|
+
|
|
195
|
+
def __len__(self):
|
|
196
|
+
return len(self.files)
|
|
197
|
+
|
|
198
|
+
@staticmethod
|
|
199
|
+
def process_path(path):
|
|
200
|
+
if isinstance(path,(list,tuple)):
|
|
201
|
+
return [BaseDataset.process_path(x) for x in path]
|
|
202
|
+
if isinstance(path,(int,float)):
|
|
203
|
+
return path
|
|
204
|
+
if isinstance(path,(str,bytes)):
|
|
205
|
+
return osp.abspath(osp.expanduser(path))
|
|
206
|
+
return path
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
|