python-wml 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of python-wml might be problematic. Click here for more details.
- python_wml-3.0.0.dist-info/LICENSE +23 -0
- python_wml-3.0.0.dist-info/METADATA +51 -0
- python_wml-3.0.0.dist-info/RECORD +164 -0
- python_wml-3.0.0.dist-info/WHEEL +5 -0
- python_wml-3.0.0.dist-info/top_level.txt +1 -0
- wml/__init__.py +0 -0
- wml/basic_data_def/__init__.py +2 -0
- wml/basic_data_def/detection_data_def.py +279 -0
- wml/basic_data_def/io_data_def.py +2 -0
- wml/basic_img_utils.py +816 -0
- wml/img_patch.py +92 -0
- wml/img_utils.py +571 -0
- wml/iotoolkit/__init__.py +17 -0
- wml/iotoolkit/aic_keypoint.py +115 -0
- wml/iotoolkit/baidu_mask_toolkit.py +244 -0
- wml/iotoolkit/base_dataset.py +210 -0
- wml/iotoolkit/bboxes_statistics.py +515 -0
- wml/iotoolkit/build.py +0 -0
- wml/iotoolkit/cityscapes_toolkit.py +183 -0
- wml/iotoolkit/classification_data_statistics.py +25 -0
- wml/iotoolkit/coco_data_fwd.py +225 -0
- wml/iotoolkit/coco_keypoints.py +118 -0
- wml/iotoolkit/coco_keypoints_fmt2.py +103 -0
- wml/iotoolkit/coco_toolkit.py +397 -0
- wml/iotoolkit/coco_wholebody.py +269 -0
- wml/iotoolkit/common.py +108 -0
- wml/iotoolkit/crowd_pose.py +146 -0
- wml/iotoolkit/fast_labelme.py +110 -0
- wml/iotoolkit/image_folder.py +95 -0
- wml/iotoolkit/imgs_cache.py +58 -0
- wml/iotoolkit/imgs_reader_mt.py +73 -0
- wml/iotoolkit/labelme_base.py +102 -0
- wml/iotoolkit/labelme_json_to_img.py +49 -0
- wml/iotoolkit/labelme_toolkit.py +117 -0
- wml/iotoolkit/labelme_toolkit_fwd.py +733 -0
- wml/iotoolkit/labelmemckeypoints_dataset.py +169 -0
- wml/iotoolkit/lspet.py +48 -0
- wml/iotoolkit/mapillary_vistas_toolkit.py +269 -0
- wml/iotoolkit/mat_data.py +90 -0
- wml/iotoolkit/mckeypoints_statistics.py +28 -0
- wml/iotoolkit/mot_datasets.py +62 -0
- wml/iotoolkit/mpii.py +108 -0
- wml/iotoolkit/npmckeypoints_dataset.py +164 -0
- wml/iotoolkit/o365_to_coco.py +136 -0
- wml/iotoolkit/object365_toolkit.py +156 -0
- wml/iotoolkit/object365v2_toolkit.py +71 -0
- wml/iotoolkit/pascal_voc_data.py +51 -0
- wml/iotoolkit/pascal_voc_toolkit.py +194 -0
- wml/iotoolkit/pascal_voc_toolkit_fwd.py +473 -0
- wml/iotoolkit/penn_action.py +57 -0
- wml/iotoolkit/rawframe_dataset.py +129 -0
- wml/iotoolkit/rewrite_pascal_voc.py +28 -0
- wml/iotoolkit/semantic_data.py +49 -0
- wml/iotoolkit/split_file_by_type.py +29 -0
- wml/iotoolkit/sports_mot_datasets.py +78 -0
- wml/iotoolkit/vis_objectdetection_dataset.py +70 -0
- wml/iotoolkit/vis_torch_data.py +39 -0
- wml/iotoolkit/yolo_toolkit.py +38 -0
- wml/object_detection2/__init__.py +4 -0
- wml/object_detection2/basic_visualization.py +37 -0
- wml/object_detection2/bboxes.py +812 -0
- wml/object_detection2/data_process_toolkit.py +146 -0
- wml/object_detection2/keypoints.py +292 -0
- wml/object_detection2/mask.py +120 -0
- wml/object_detection2/metrics/__init__.py +3 -0
- wml/object_detection2/metrics/build.py +15 -0
- wml/object_detection2/metrics/classifier_toolkit.py +440 -0
- wml/object_detection2/metrics/common.py +71 -0
- wml/object_detection2/metrics/mckps_toolkit.py +338 -0
- wml/object_detection2/metrics/toolkit.py +1953 -0
- wml/object_detection2/npod_toolkit.py +361 -0
- wml/object_detection2/odtools.py +243 -0
- wml/object_detection2/standard_names.py +75 -0
- wml/object_detection2/visualization.py +956 -0
- wml/object_detection2/wmath.py +34 -0
- wml/semantic/__init__.py +0 -0
- wml/semantic/basic_toolkit.py +65 -0
- wml/semantic/mask_utils.py +156 -0
- wml/semantic/semantic_test.py +21 -0
- wml/semantic/structures.py +1 -0
- wml/semantic/toolkit.py +105 -0
- wml/semantic/visualization_utils.py +658 -0
- wml/threadtoolkit.py +50 -0
- wml/walgorithm.py +228 -0
- wml/wcollections.py +212 -0
- wml/wfilesystem.py +487 -0
- wml/wml_utils.py +657 -0
- wml/wstructures/__init__.py +4 -0
- wml/wstructures/common.py +9 -0
- wml/wstructures/keypoints_train_toolkit.py +149 -0
- wml/wstructures/kps_structures.py +579 -0
- wml/wstructures/mask_structures.py +1161 -0
- wml/wtorch/__init__.py +8 -0
- wml/wtorch/bboxes.py +104 -0
- wml/wtorch/classes_suppression.py +24 -0
- wml/wtorch/conv_module.py +181 -0
- wml/wtorch/conv_ws.py +144 -0
- wml/wtorch/data/__init__.py +16 -0
- wml/wtorch/data/_utils/__init__.py +45 -0
- wml/wtorch/data/_utils/collate.py +183 -0
- wml/wtorch/data/_utils/fetch.py +47 -0
- wml/wtorch/data/_utils/pin_memory.py +121 -0
- wml/wtorch/data/_utils/signal_handling.py +72 -0
- wml/wtorch/data/_utils/worker.py +227 -0
- wml/wtorch/data/base_data_loader_iter.py +93 -0
- wml/wtorch/data/dataloader.py +501 -0
- wml/wtorch/data/datapipes/__init__.py +1 -0
- wml/wtorch/data/datapipes/iter/__init__.py +12 -0
- wml/wtorch/data/datapipes/iter/batch.py +126 -0
- wml/wtorch/data/datapipes/iter/callable.py +92 -0
- wml/wtorch/data/datapipes/iter/listdirfiles.py +37 -0
- wml/wtorch/data/datapipes/iter/loadfilesfromdisk.py +30 -0
- wml/wtorch/data/datapipes/iter/readfilesfromtar.py +60 -0
- wml/wtorch/data/datapipes/iter/readfilesfromzip.py +63 -0
- wml/wtorch/data/datapipes/iter/sampler.py +94 -0
- wml/wtorch/data/datapipes/utils/__init__.py +0 -0
- wml/wtorch/data/datapipes/utils/common.py +65 -0
- wml/wtorch/data/dataset.py +354 -0
- wml/wtorch/data/datasets/__init__.py +4 -0
- wml/wtorch/data/datasets/common.py +53 -0
- wml/wtorch/data/datasets/listdirfilesdataset.py +36 -0
- wml/wtorch/data/datasets/loadfilesfromdiskdataset.py +30 -0
- wml/wtorch/data/distributed.py +135 -0
- wml/wtorch/data/multi_processing_data_loader_iter.py +866 -0
- wml/wtorch/data/sampler.py +267 -0
- wml/wtorch/data/single_process_data_loader_iter.py +24 -0
- wml/wtorch/data/test_data_loader.py +26 -0
- wml/wtorch/dataset_toolkit.py +67 -0
- wml/wtorch/depthwise_separable_conv_module.py +98 -0
- wml/wtorch/dist.py +591 -0
- wml/wtorch/dropblock/__init__.py +6 -0
- wml/wtorch/dropblock/dropblock.py +228 -0
- wml/wtorch/dropblock/dropout.py +40 -0
- wml/wtorch/dropblock/scheduler.py +48 -0
- wml/wtorch/ema.py +61 -0
- wml/wtorch/fc_module.py +73 -0
- wml/wtorch/functional.py +34 -0
- wml/wtorch/iter_dataset.py +26 -0
- wml/wtorch/loss.py +69 -0
- wml/wtorch/nets/__init__.py +0 -0
- wml/wtorch/nets/ckpt_toolkit.py +219 -0
- wml/wtorch/nets/fpn.py +276 -0
- wml/wtorch/nets/hrnet/__init__.py +0 -0
- wml/wtorch/nets/hrnet/config.py +2 -0
- wml/wtorch/nets/hrnet/hrnet.py +494 -0
- wml/wtorch/nets/misc.py +249 -0
- wml/wtorch/nets/resnet/__init__.py +0 -0
- wml/wtorch/nets/resnet/layers/__init__.py +17 -0
- wml/wtorch/nets/resnet/layers/aspp.py +144 -0
- wml/wtorch/nets/resnet/layers/batch_norm.py +231 -0
- wml/wtorch/nets/resnet/layers/blocks.py +111 -0
- wml/wtorch/nets/resnet/layers/wrappers.py +110 -0
- wml/wtorch/nets/resnet/r50_config.py +38 -0
- wml/wtorch/nets/resnet/resnet.py +691 -0
- wml/wtorch/nets/shape_spec.py +20 -0
- wml/wtorch/nets/simple_fpn.py +101 -0
- wml/wtorch/nms.py +109 -0
- wml/wtorch/nn.py +896 -0
- wml/wtorch/ocr_block.py +193 -0
- wml/wtorch/summary.py +331 -0
- wml/wtorch/train_toolkit.py +603 -0
- wml/wtorch/transformer_blocks.py +266 -0
- wml/wtorch/utils.py +719 -0
- wml/wtorch/wlr_scheduler.py +100 -0
|
@@ -0,0 +1,733 @@
|
|
|
1
|
+
import wml.wml_utils as wmlu
|
|
2
|
+
import os
|
|
3
|
+
import json
|
|
4
|
+
import numpy as np
|
|
5
|
+
import cv2 as cv
|
|
6
|
+
import copy
|
|
7
|
+
import wml.img_utils as wmli
|
|
8
|
+
import random
|
|
9
|
+
import matplotlib.pyplot as plt
|
|
10
|
+
import sys
|
|
11
|
+
import cv2
|
|
12
|
+
from wml.object_detection2.standard_names import *
|
|
13
|
+
import wml.object_detection2.bboxes as odb
|
|
14
|
+
from functools import partial
|
|
15
|
+
from .common import resample
|
|
16
|
+
from wml.semantic.structures import *
|
|
17
|
+
from wml.semantic.basic_toolkit import findContours
|
|
18
|
+
import glob
|
|
19
|
+
import math
|
|
20
|
+
from wml.walgorithm import points_on_circle
|
|
21
|
+
import copy
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def trans_odresult_to_annotations_list(data):
|
|
27
|
+
labels = data[RD_LABELS]
|
|
28
|
+
res = []
|
|
29
|
+
for i in range(len(labels)):
|
|
30
|
+
annotation = {}
|
|
31
|
+
annotation["category_id"] = labels[i]
|
|
32
|
+
annotation["segmentation"] = data[RD_FULL_SIZE_MASKS]
|
|
33
|
+
res.append(annotation)
|
|
34
|
+
|
|
35
|
+
return res
|
|
36
|
+
|
|
37
|
+
def trans_absolute_coord_to_relative_coord(image_info,annotations_list):
|
|
38
|
+
H = image_info['height']
|
|
39
|
+
W = image_info['width']
|
|
40
|
+
res_bbox = []
|
|
41
|
+
res_segmentation = []
|
|
42
|
+
res_labels = []
|
|
43
|
+
for ann in annotations_list:
|
|
44
|
+
box = ann['bbox']
|
|
45
|
+
xmin = box[0]/W
|
|
46
|
+
ymin = box[1]/H
|
|
47
|
+
xmax = (box[0]+box[2])/W
|
|
48
|
+
ymax = (box[1]+box[3])/H
|
|
49
|
+
res_bbox.append([ymin,xmin,ymax,xmax])
|
|
50
|
+
res_segmentation.append(ann['segmentation'])
|
|
51
|
+
res_labels.append(ann['category_id'])
|
|
52
|
+
|
|
53
|
+
if len(annotations_list)>0:
|
|
54
|
+
return np.array(res_bbox),np.array(res_labels),np.array(res_segmentation)
|
|
55
|
+
else:
|
|
56
|
+
return np.zeros([0,4],dtype=np.float32),np.zeros([0],dtype=np.int32),np.zeros([0,H,W],dtype=np.uint8)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def get_files(data_dir, img_suffix=wmli.BASE_IMG_SUFFIX,keep_no_json_img=False):
|
|
60
|
+
img_files = wmlu.recurse_get_filepath_in_dir(data_dir, suffix=img_suffix)
|
|
61
|
+
res = []
|
|
62
|
+
for img_file in img_files:
|
|
63
|
+
json_file = wmlu.change_suffix(img_file, "json")
|
|
64
|
+
if keep_no_json_img or os.path.exists(json_file):
|
|
65
|
+
res.append((img_file, json_file))
|
|
66
|
+
|
|
67
|
+
return res
|
|
68
|
+
|
|
69
|
+
def _get_shape_points(shape,circle_points_nr=20):
|
|
70
|
+
shape_type = shape['shape_type']
|
|
71
|
+
if shape_type == "polygon":
|
|
72
|
+
return np.array(shape['points']).astype(np.int32)
|
|
73
|
+
elif shape_type == "rectangle":
|
|
74
|
+
points = np.array(shape['points']).astype(np.int32)
|
|
75
|
+
x0 = np.min(points[:,0])
|
|
76
|
+
x1 = np.max(points[:,0])
|
|
77
|
+
y0 = np.min(points[:,1])
|
|
78
|
+
y1 = np.max(points[:,1])
|
|
79
|
+
n_points = np.array([[x0,y0],[x1,y0],[x1,y1],[x0,y1]]).astype(np.int32)
|
|
80
|
+
return n_points
|
|
81
|
+
elif shape_type == "circle":
|
|
82
|
+
points = np.array(shape['points']).astype(np.int32)
|
|
83
|
+
center = points[0]
|
|
84
|
+
d = points[1]-points[0]
|
|
85
|
+
r = math.sqrt(d[0]*d[0]+d[1]*d[1])
|
|
86
|
+
points = points_on_circle(center=center,r=r,points_nr=circle_points_nr)
|
|
87
|
+
return points
|
|
88
|
+
else:
|
|
89
|
+
print(f"WARNING: unsupport labelme shape type {shape_type}")
|
|
90
|
+
return np.zeros([0,2],dtype=np.int32)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
'''
|
|
95
|
+
output:
|
|
96
|
+
image_info: {'height','width'}
|
|
97
|
+
annotations_list: [{'bbox','segmentation','category_id','points_x','points_y'}' #bbox[xmin,ymin,width,height] absolute coordinate,
|
|
98
|
+
'segmentation' [H,W], 全图
|
|
99
|
+
'''
|
|
100
|
+
def read_labelme_data(file_path,label_text_to_id=lambda x:int(x),mask_on=True,use_semantic=True,
|
|
101
|
+
use_polygon_mask=False,
|
|
102
|
+
circle_points_nr=20,
|
|
103
|
+
do_raise=False):
|
|
104
|
+
if mask_on == False:
|
|
105
|
+
use_semantic = False
|
|
106
|
+
annotations_list = []
|
|
107
|
+
image = {}
|
|
108
|
+
try:
|
|
109
|
+
with open(file_path,"r",encoding="utf-8") as f:
|
|
110
|
+
data_str = f.read()
|
|
111
|
+
json_data = json.loads(data_str)
|
|
112
|
+
img_width = int(json_data["imageWidth"])
|
|
113
|
+
img_height = int(json_data["imageHeight"])
|
|
114
|
+
image["height"] = int(img_height)
|
|
115
|
+
image["width"] = int(img_width)
|
|
116
|
+
image["file_name"] = wmlu.base_name(file_path)
|
|
117
|
+
for shape in json_data["shapes"]:
|
|
118
|
+
all_points = _get_shape_points(shape,circle_points_nr=circle_points_nr).astype(np.int32) #[1,N,2]
|
|
119
|
+
if len(all_points)<1:
|
|
120
|
+
continue
|
|
121
|
+
points = np.transpose(all_points)
|
|
122
|
+
x,y = np.vsplit(points,2)
|
|
123
|
+
x = np.reshape(x,[-1])
|
|
124
|
+
y = np.reshape(y,[-1])
|
|
125
|
+
x = np.minimum(np.maximum(0,x),img_width-1)
|
|
126
|
+
y = np.minimum(np.maximum(0,y),img_height-1)
|
|
127
|
+
xmin = np.min(x)
|
|
128
|
+
xmax = np.max(x)
|
|
129
|
+
ymin = np.min(y)
|
|
130
|
+
ymax = np.max(y)
|
|
131
|
+
if mask_on:
|
|
132
|
+
all_points = np.expand_dims(all_points,axis=0)
|
|
133
|
+
if use_polygon_mask:
|
|
134
|
+
segmentation = WPolygonMaskItem(all_points,width=img_width,height=img_height)
|
|
135
|
+
else:
|
|
136
|
+
mask = np.zeros(shape=[img_height,img_width],dtype=np.uint8)
|
|
137
|
+
segmentation = cv.drawContours(mask,all_points,-1,color=(1),thickness=cv.FILLED)
|
|
138
|
+
else:
|
|
139
|
+
segmentation = None
|
|
140
|
+
|
|
141
|
+
flags = shape.get('flags',{})
|
|
142
|
+
difficult = False
|
|
143
|
+
for k,v in flags.items():
|
|
144
|
+
if not v:
|
|
145
|
+
continue
|
|
146
|
+
if k.lower() in ['crowd','ignore','difficult']:
|
|
147
|
+
difficult = True
|
|
148
|
+
|
|
149
|
+
ori_label = shape['label']
|
|
150
|
+
if "*" in ori_label:
|
|
151
|
+
difficult = True
|
|
152
|
+
ori_label = ori_label.replace("*","")
|
|
153
|
+
|
|
154
|
+
if label_text_to_id is not None:
|
|
155
|
+
label = label_text_to_id(ori_label)
|
|
156
|
+
else:
|
|
157
|
+
label = ori_label
|
|
158
|
+
|
|
159
|
+
annotations_list.append({"bbox":(xmin,ymin,xmax-xmin+1,ymax-ymin+1),
|
|
160
|
+
"segmentation":segmentation,
|
|
161
|
+
"category_id":label,
|
|
162
|
+
"points_x":x,
|
|
163
|
+
"points_y":y,
|
|
164
|
+
"difficult":difficult})
|
|
165
|
+
except Exception as e:
|
|
166
|
+
if do_raise:
|
|
167
|
+
raise e
|
|
168
|
+
image["height"] = 1
|
|
169
|
+
image["width"] = 1
|
|
170
|
+
image["file_name"] = wmlu.base_name(file_path)
|
|
171
|
+
print(f"Read file {os.path.basename(file_path)} faild, info {e}.")
|
|
172
|
+
annotations_list = []
|
|
173
|
+
pass
|
|
174
|
+
|
|
175
|
+
if use_semantic and not use_polygon_mask and mask_on:
|
|
176
|
+
'''
|
|
177
|
+
Each pixel only belong to one classes, and the latter annotation will overwrite the previous
|
|
178
|
+
'''
|
|
179
|
+
if len(annotations_list) > 1:
|
|
180
|
+
mask = 1 - annotations_list[-1]['segmentation']
|
|
181
|
+
for i in reversed(range(len(annotations_list) - 1)):
|
|
182
|
+
annotations_list[i]['segmentation'] = np.logical_and(annotations_list[i]['segmentation'], mask)
|
|
183
|
+
mask = np.logical_and(mask, 1 - annotations_list[i]['segmentation'])
|
|
184
|
+
return image,annotations_list
|
|
185
|
+
|
|
186
|
+
def save_labelme_data(file_path,image_path,image,annotations_list,label_to_text=lambda x:str(x)):
|
|
187
|
+
'''
|
|
188
|
+
annotations_list[i]['segmentation'] [H,W] 全图mask
|
|
189
|
+
'''
|
|
190
|
+
data={}
|
|
191
|
+
shapes = []
|
|
192
|
+
data["version"] = "3.10.1"
|
|
193
|
+
data["flags"] = {}
|
|
194
|
+
for ann in annotations_list:
|
|
195
|
+
shape = {}
|
|
196
|
+
shape["label"] = label_to_text(ann["category_id"])
|
|
197
|
+
#shape["line_color"]=None
|
|
198
|
+
#shape["fill_color"]=None
|
|
199
|
+
shape["shape_type"]="polygon"
|
|
200
|
+
contours, hierarchy = cv.findContours(ann["segmentation"], cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
|
|
201
|
+
hierarchy = np.reshape(hierarchy,[-1,4])
|
|
202
|
+
for he,cont in zip(hierarchy,contours):
|
|
203
|
+
if he[-1]>=0 and cv.contourArea(cont) < cv.contourArea(contours[[he[-1]]]):
|
|
204
|
+
continue
|
|
205
|
+
points = cont
|
|
206
|
+
if len(cont.shape)==3 and cont.shape[1]==1:
|
|
207
|
+
points = np.squeeze(points,axis=1)
|
|
208
|
+
points = points.tolist()
|
|
209
|
+
shape["points"] = points
|
|
210
|
+
shapes.append(copy.deepcopy(shape))
|
|
211
|
+
|
|
212
|
+
data["shapes"] = shapes
|
|
213
|
+
data["imagePath"] = os.path.basename(image_path)
|
|
214
|
+
if image is not None:
|
|
215
|
+
data["imageWidth"] = image["width"]
|
|
216
|
+
data["imageHeight"] = image["height"]
|
|
217
|
+
else:
|
|
218
|
+
height,width = wmli.get_img_size(image_path)
|
|
219
|
+
data["imageWidth"] = width
|
|
220
|
+
data["imageHeight"] = height
|
|
221
|
+
|
|
222
|
+
data["imageData"] = None
|
|
223
|
+
with open(file_path,"w") as f:
|
|
224
|
+
json.dump(data,f)
|
|
225
|
+
|
|
226
|
+
def save_labelme_datav2(file_path,image_path,image,annotations_list,label_to_text=lambda x:str(x)):
|
|
227
|
+
'''
|
|
228
|
+
mask 仅包含bboxes中的部分
|
|
229
|
+
annotations_list[i]['bbox'] (x0,y0,x1,y1) 绝对坐标
|
|
230
|
+
annotations_list[i]["segmentation"] (H,W), 仅包含bbox内部分
|
|
231
|
+
'''
|
|
232
|
+
data={}
|
|
233
|
+
shapes = []
|
|
234
|
+
data["version"] = "3.10.1"
|
|
235
|
+
data["flags"] = {}
|
|
236
|
+
if isinstance(label_to_text,dict):
|
|
237
|
+
label_to_text = wmlu.MDict.from_dict(label_to_text)
|
|
238
|
+
for ann in annotations_list:
|
|
239
|
+
shape = {}
|
|
240
|
+
if label_to_text is not None:
|
|
241
|
+
shape["label"] = label_to_text(ann["category_id"])
|
|
242
|
+
else:
|
|
243
|
+
shape["label"] = ann["category_id"]
|
|
244
|
+
#shape["line_color"]=None
|
|
245
|
+
#shape["fill_color"]=None
|
|
246
|
+
shape["shape_type"]="polygon"
|
|
247
|
+
mask = ann["segmentation"]
|
|
248
|
+
x0,y0,x1,y1 = ann['bbox']
|
|
249
|
+
scale = np.reshape(np.array([(x1-x0)/mask.shape[1],(y1-y0)/mask.shape[0]],dtype=np.float32),[1,2])
|
|
250
|
+
offset = np.reshape(np.array([x0,y0],dtype=np.float32),[1,2])
|
|
251
|
+
|
|
252
|
+
contours, hierarchy = cv.findContours(ann["segmentation"], cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
|
|
253
|
+
hierarchy = np.reshape(hierarchy,[-1,4])
|
|
254
|
+
for he,cont in zip(hierarchy,contours):
|
|
255
|
+
if he[-1]>=0 and cv.contourArea(cont) < cv.contourArea(contours[[he[-1]]]):
|
|
256
|
+
continue
|
|
257
|
+
points = cont
|
|
258
|
+
if len(cont.shape)==3 and cont.shape[1]==1:
|
|
259
|
+
points = np.squeeze(points,axis=1)
|
|
260
|
+
points = points*scale+offset
|
|
261
|
+
points = points.astype(np.int32).tolist()
|
|
262
|
+
if len(points)<=2:
|
|
263
|
+
continue
|
|
264
|
+
shape["points"] = points
|
|
265
|
+
shapes.append(copy.deepcopy(shape))
|
|
266
|
+
|
|
267
|
+
data["shapes"] = shapes
|
|
268
|
+
data["imagePath"] = os.path.basename(image_path)
|
|
269
|
+
data["imageWidth"] = image["width"]
|
|
270
|
+
data["imageHeight"] = image["height"]
|
|
271
|
+
data["imageData"] = None
|
|
272
|
+
with open(file_path,"w") as f:
|
|
273
|
+
json.dump(data,f)
|
|
274
|
+
'''
|
|
275
|
+
使用目标检测输出保存文件
|
|
276
|
+
'''
|
|
277
|
+
def save_labelme_datav3(file_path,image_path,image,labels,bboxes,masks,label_to_text=lambda x:str(x)):
|
|
278
|
+
'''
|
|
279
|
+
labels:[N]
|
|
280
|
+
bboxes:[N,4](x0,y0,x1,y1), 绝对坐标
|
|
281
|
+
masks:[N,h,w] 仅包含bbox内的部分
|
|
282
|
+
'''
|
|
283
|
+
annotatios_list = []
|
|
284
|
+
for i in range(len(labels)):
|
|
285
|
+
annotatios = {"category_id":labels[i],
|
|
286
|
+
'segmentation':masks[i].astype(np.uint8),
|
|
287
|
+
'bbox':bboxes[i]}
|
|
288
|
+
annotatios_list.append(annotatios)
|
|
289
|
+
save_labelme_datav2(file_path,image_path,image,annotatios_list,label_to_text=label_to_text)
|
|
290
|
+
|
|
291
|
+
def save_labelme_datav4(file_path,image_path,image,annotations_list,label_to_text=lambda x:str(x)):
|
|
292
|
+
'''
|
|
293
|
+
mask 仅包含bboxes中的部分
|
|
294
|
+
annotations_list[i]['bbox'] (x0,y0,x1,y1) 绝对坐标
|
|
295
|
+
annotations_list[i]["segmentation"] list[(N,2)] points
|
|
296
|
+
'''
|
|
297
|
+
data={}
|
|
298
|
+
shapes = []
|
|
299
|
+
data["version"] = "3.10.1"
|
|
300
|
+
data["flags"] = {}
|
|
301
|
+
if isinstance(label_to_text,dict):
|
|
302
|
+
label_to_text = wmlu.MDict.from_dict(label_to_text)
|
|
303
|
+
for ann in annotations_list:
|
|
304
|
+
masks = ann["segmentation"]
|
|
305
|
+
for mask in masks:
|
|
306
|
+
shape = {}
|
|
307
|
+
if label_to_text is not None:
|
|
308
|
+
shape["label"] = label_to_text(ann["category_id"])
|
|
309
|
+
else:
|
|
310
|
+
shape["label"] = ann["category_id"]
|
|
311
|
+
#shape["line_color"]=None
|
|
312
|
+
#shape["fill_color"]=None
|
|
313
|
+
shape["shape_type"]="polygon"
|
|
314
|
+
if isinstance(mask,np.ndarray):
|
|
315
|
+
mask = mask.tolist()
|
|
316
|
+
shape["points"] = mask
|
|
317
|
+
shapes.append(copy.deepcopy(shape))
|
|
318
|
+
|
|
319
|
+
data["shapes"] = shapes
|
|
320
|
+
data["imagePath"] = os.path.basename(image_path)
|
|
321
|
+
data["imageWidth"] = image["width"]
|
|
322
|
+
data["imageHeight"] = image["height"]
|
|
323
|
+
data["imageData"] = None
|
|
324
|
+
with open(file_path,"w") as f:
|
|
325
|
+
json.dump(data,f)
|
|
326
|
+
|
|
327
|
+
def save_labelme_datav5(file_path,image_path,image,labels,bboxes,masks,label_to_text=lambda x:str(x)):
|
|
328
|
+
'''
|
|
329
|
+
labels:[N]
|
|
330
|
+
bboxes:[N,4](x0,y0,x1,y1), 绝对坐标
|
|
331
|
+
masks:WPolygonMasks
|
|
332
|
+
'''
|
|
333
|
+
annotatios_list = []
|
|
334
|
+
for i in range(len(labels)):
|
|
335
|
+
annotatios = {"category_id":labels[i],
|
|
336
|
+
'segmentation':masks[i].points,
|
|
337
|
+
'bbox':bboxes[i]}
|
|
338
|
+
annotatios_list.append(annotatios)
|
|
339
|
+
save_labelme_datav4(file_path,image_path,image,annotatios_list,label_to_text=label_to_text)
|
|
340
|
+
|
|
341
|
+
def save_labelme_datav6(file_path,image_path,masks,labels,image=None):
|
|
342
|
+
'''
|
|
343
|
+
masks: [N,H,W],整图mask
|
|
344
|
+
'''
|
|
345
|
+
data={}
|
|
346
|
+
shapes = []
|
|
347
|
+
data["version"] = "3.10.1"
|
|
348
|
+
data["flags"] = {}
|
|
349
|
+
if image is None:
|
|
350
|
+
size = wmli.get_img_size(image_path)
|
|
351
|
+
image = dict(width=size[1],height=size[0])
|
|
352
|
+
for label,mask in zip(labels,masks):
|
|
353
|
+
shape = {}
|
|
354
|
+
shape["label"] = label
|
|
355
|
+
#shape["line_color"]=None
|
|
356
|
+
#shape["fill_color"]=None
|
|
357
|
+
shape["shape_type"]="polygon"
|
|
358
|
+
|
|
359
|
+
apoints,_ = findContours(mask.astype(np.uint8))
|
|
360
|
+
for points in apoints:
|
|
361
|
+
points = points.astype(np.int32).tolist()
|
|
362
|
+
if len(points)<=2:
|
|
363
|
+
continue
|
|
364
|
+
shape["points"] = points
|
|
365
|
+
shapes.append(copy.deepcopy(shape))
|
|
366
|
+
|
|
367
|
+
data["shapes"] = shapes
|
|
368
|
+
data["imagePath"] = os.path.basename(image_path)
|
|
369
|
+
data["imageWidth"] = image["width"]
|
|
370
|
+
data["imageHeight"] = image["height"]
|
|
371
|
+
data["imageData"] = None
|
|
372
|
+
with open(file_path,"w") as f:
|
|
373
|
+
json.dump(data,f)
|
|
374
|
+
|
|
375
|
+
def save_labelme_points_data(file_path,image_path,image,points,labels):
|
|
376
|
+
'''
|
|
377
|
+
points: [N,2] (x,y)
|
|
378
|
+
labels: [N]
|
|
379
|
+
'''
|
|
380
|
+
data={}
|
|
381
|
+
shapes = []
|
|
382
|
+
data["version"] = "4.2.9"
|
|
383
|
+
data["flags"] = {}
|
|
384
|
+
|
|
385
|
+
if image is None:
|
|
386
|
+
h,w = wmli.get_img_size(image_path)
|
|
387
|
+
image = dict(width=w,height=h)
|
|
388
|
+
|
|
389
|
+
for point,label in zip(points,labels):
|
|
390
|
+
shape = {}
|
|
391
|
+
shape["label"] = str(label)
|
|
392
|
+
shape['group_id'] = None
|
|
393
|
+
shape["shape_type"]="point"
|
|
394
|
+
shape["points"] = [[int(point[0]),int(point[1])]]
|
|
395
|
+
flags = dict(ignore=False,difficult=False,crowd=False)
|
|
396
|
+
shape['flags'] = flags
|
|
397
|
+
shapes.append(shape)
|
|
398
|
+
|
|
399
|
+
data["shapes"] = shapes
|
|
400
|
+
data["imagePath"] = os.path.basename(image_path)
|
|
401
|
+
data["imageWidth"] = image["width"]
|
|
402
|
+
data["imageHeight"] = image["height"]
|
|
403
|
+
data["imageData"] = None
|
|
404
|
+
|
|
405
|
+
with open(file_path,"w") as f:
|
|
406
|
+
json.dump(data,f)
|
|
407
|
+
|
|
408
|
+
def get_labels_and_bboxes(image,annotations_list,is_relative_coordinate=False):
|
|
409
|
+
labels = []
|
|
410
|
+
bboxes = []
|
|
411
|
+
width = image["width"]
|
|
412
|
+
height = image["height"]
|
|
413
|
+
for ann in annotations_list:
|
|
414
|
+
t_box = ann["bbox"]
|
|
415
|
+
xmin = t_box[0]
|
|
416
|
+
ymin = t_box[1]
|
|
417
|
+
xmax = xmin+t_box[2]
|
|
418
|
+
ymax = ymin+t_box[3]
|
|
419
|
+
bboxes.append([ymin,xmin,ymax,xmax])
|
|
420
|
+
labels.append(ann["category_id"])
|
|
421
|
+
if len(bboxes)>0:
|
|
422
|
+
bboxes = np.array(bboxes,dtype=np.float32)
|
|
423
|
+
else:
|
|
424
|
+
bboxes = np.zeros([0,4],dtype=np.float32)
|
|
425
|
+
if is_relative_coordinate:
|
|
426
|
+
div = np.array([[height,width,height,width]],dtype=np.float32)
|
|
427
|
+
bboxes = bboxes/div
|
|
428
|
+
return np.array(labels),bboxes
|
|
429
|
+
|
|
430
|
+
def get_labels_bboxes_and_masks(image,annotations_list):
|
|
431
|
+
labels = []
|
|
432
|
+
bboxes = []
|
|
433
|
+
masks = []
|
|
434
|
+
width = image["width"]
|
|
435
|
+
height = image["height"]
|
|
436
|
+
for ann in annotations_list:
|
|
437
|
+
t_box = ann["bbox"]
|
|
438
|
+
xmin = t_box[0]/width
|
|
439
|
+
ymin = t_box[1]/height
|
|
440
|
+
xmax = xmin+t_box[2]/width
|
|
441
|
+
ymax = ymin+t_box[3]/height
|
|
442
|
+
bboxes.append([ymin,xmin,ymax,xmax])
|
|
443
|
+
labels.append(ann["category_id"])
|
|
444
|
+
masks.append(ann["segmentation"])
|
|
445
|
+
return np.array(labels),np.array(bboxes),np.array(masks)
|
|
446
|
+
|
|
447
|
+
'''
|
|
448
|
+
output:
|
|
449
|
+
[num_classes,h,w] or [num_classes-1,h,w], value is 0 or 1
|
|
450
|
+
'''
|
|
451
|
+
def get_masks(image,annotations_list,num_classes,no_background=True):
|
|
452
|
+
width = image["width"]
|
|
453
|
+
height = image["height"]
|
|
454
|
+
if no_background:
|
|
455
|
+
get_label = lambda x:max(0,x-1)
|
|
456
|
+
res = np.zeros([num_classes-1,height,width],dtype=np.int32)
|
|
457
|
+
else:
|
|
458
|
+
get_label = lambda x:x
|
|
459
|
+
res = np.zeros([num_classes,height,width],dtype=np.int32)
|
|
460
|
+
|
|
461
|
+
for ann in annotations_list:
|
|
462
|
+
mask = ann["segmentation"]
|
|
463
|
+
label = get_label(ann["category_id"])
|
|
464
|
+
res[label:label+1,:,:] = res[label:label+1,:,:]|np.expand_dims(mask,axis=0)
|
|
465
|
+
|
|
466
|
+
return res
|
|
467
|
+
|
|
468
|
+
def get_image_size(image):
|
|
469
|
+
width = image["width"]
|
|
470
|
+
height = image["height"]
|
|
471
|
+
return (height,width)
|
|
472
|
+
|
|
473
|
+
def save_labelme_datav1(file_path,image_path,image_data,annotations_list,label_to_text=lambda x:str(x)):
|
|
474
|
+
wmli.imsave(image_path,image_data)
|
|
475
|
+
image = {"width":image_data.shape[1],"height":image_data.shape[0]}
|
|
476
|
+
save_labelme_data(file_path,image_path,image,annotations_list,label_to_text)
|
|
477
|
+
|
|
478
|
+
'''
|
|
479
|
+
获取标注box scale倍大小的扩展box(面积为scale*scale倍大)
|
|
480
|
+
box的中心点不变
|
|
481
|
+
'''
|
|
482
|
+
def get_expand_bboxes_in_annotations(annotations,scale=2):
|
|
483
|
+
bboxes = [ann["bbox"] for ann in annotations]
|
|
484
|
+
bboxes = odb.expand_bbox(bboxes,scale)
|
|
485
|
+
return bboxes
|
|
486
|
+
|
|
487
|
+
'''
|
|
488
|
+
获取标注box 扩展为size大小的box
|
|
489
|
+
box的中心点不变
|
|
490
|
+
'''
|
|
491
|
+
def get_expand_bboxes_in_annotationsv2(annotations,size):
|
|
492
|
+
bboxes = [ann["bbox"] for ann in annotations]
|
|
493
|
+
bboxes = odb.expand_bbox_by_size(bboxes,size)
|
|
494
|
+
return bboxes
|
|
495
|
+
|
|
496
|
+
def get_labels(annotations):
|
|
497
|
+
labels = [ann["category_id"] for ann in annotations]
|
|
498
|
+
return labels
|
|
499
|
+
|
|
500
|
+
'''
|
|
501
|
+
size:(h,w)
|
|
502
|
+
'''
|
|
503
|
+
def resize(image,annotations_list,img_data,size):
|
|
504
|
+
res_image = copy.deepcopy(image)
|
|
505
|
+
res_image["width"] = size[1]
|
|
506
|
+
res_image["height"] = size[0]
|
|
507
|
+
res_ann = []
|
|
508
|
+
|
|
509
|
+
res_img_data = wmli.resize_img(img_data,size,keep_aspect_ratio=True)
|
|
510
|
+
x_scale = res_img_data.shape[1]/img_data.shape[1]
|
|
511
|
+
y_scale = res_img_data.shape[0]/img_data.shape[0]
|
|
512
|
+
|
|
513
|
+
for ann in annotations_list:
|
|
514
|
+
bbox = (np.array(ann["bbox"])*np.array([x_scale,y_scale,x_scale,y_scale])).astype(np.int32)
|
|
515
|
+
segmentation = wmli.resize_img(ann["segmentation"],size=size,keep_aspect_ratio=True,
|
|
516
|
+
interpolation=cv2.INTER_NEAREST)
|
|
517
|
+
category = copy.deepcopy(ann["category_id"])
|
|
518
|
+
res_ann.append({"bbox":bbox,"segmentation":segmentation,"category_id":category})
|
|
519
|
+
|
|
520
|
+
return res_image,res_ann,res_img_data
|
|
521
|
+
|
|
522
|
+
'''
|
|
523
|
+
从目标集中随机的选一个目标并从中截图
|
|
524
|
+
随机的概率可以通过weights指定
|
|
525
|
+
'''
|
|
526
|
+
def random_cut(image,annotations_list,img_data,size,weights=None,threshold=0.15):
|
|
527
|
+
x_max = max(0,image["width"]-size[0])
|
|
528
|
+
y_max = max(0,image["height"]-size[1])
|
|
529
|
+
image_info = {}
|
|
530
|
+
image_info["height"] =size[1]
|
|
531
|
+
image_info["width"] =size[0]
|
|
532
|
+
obj_ann_bboxes = get_expand_bboxes_in_annotations(annotations_list,2)
|
|
533
|
+
labels = get_labels(annotations_list)
|
|
534
|
+
if len(annotations_list)==0:
|
|
535
|
+
return None,None,None
|
|
536
|
+
count = 1
|
|
537
|
+
while count<100:
|
|
538
|
+
t_bbox = odb.random_bbox_in_bboxes(obj_ann_bboxes,size,weights,labels)
|
|
539
|
+
t_bbox[1] = max(0,min(t_bbox[1],y_max))
|
|
540
|
+
t_bbox[0] = max(0,min(t_bbox[0],x_max))
|
|
541
|
+
rect = (t_bbox[1],t_bbox[0],t_bbox[1]+t_bbox[3],t_bbox[0]+t_bbox[2])
|
|
542
|
+
new_image_info,new_annotations_list,new_image_data = cut(annotations_list,img_data,rect,threshold=threshold)
|
|
543
|
+
if new_annotations_list is not None and len(new_annotations_list)>0:
|
|
544
|
+
return (new_image_info,new_annotations_list,new_image_data)
|
|
545
|
+
++count
|
|
546
|
+
|
|
547
|
+
return None,None,None
|
|
548
|
+
|
|
549
|
+
'''
|
|
550
|
+
size:[H,W]
|
|
551
|
+
在每一个标目标附近裁剪出一个子图
|
|
552
|
+
'''
|
|
553
|
+
def random_cutv1(image,annotations_list,img_data,size,threshold=0.15,resize_size=None):
|
|
554
|
+
res = []
|
|
555
|
+
x_max = max(0,image["width"]-size[0])
|
|
556
|
+
y_max = max(0,image["height"]-size[1])
|
|
557
|
+
image_info = {}
|
|
558
|
+
image_info["height"] =size[1]
|
|
559
|
+
image_info["width"] =size[0]
|
|
560
|
+
obj_ann_bboxes = get_expand_bboxes_in_annotationsv2(annotations_list,[x//2 for x in size])
|
|
561
|
+
if len(annotations_list)==0:
|
|
562
|
+
return res
|
|
563
|
+
|
|
564
|
+
for t_bbox in obj_ann_bboxes:
|
|
565
|
+
t_bbox = list(t_bbox)
|
|
566
|
+
t_bbox[1] = max(0,min(t_bbox[1],y_max))
|
|
567
|
+
t_bbox[0] = max(0,min(t_bbox[0],x_max))
|
|
568
|
+
t_bbox = odb.random_bbox_in_bbox(t_bbox,size)
|
|
569
|
+
rect = (t_bbox[1],t_bbox[0],t_bbox[1]+t_bbox[3],t_bbox[0]+t_bbox[2])
|
|
570
|
+
new_image_info,new_annotations_list,new_image_data = cut(annotations_list,img_data,rect,threshold=threshold)
|
|
571
|
+
if new_annotations_list is not None and len(new_annotations_list)>0:
|
|
572
|
+
if resize_size is not None:
|
|
573
|
+
new_image_info, new_annotations_list, new_image_data = resize(new_image_info, new_annotations_list,
|
|
574
|
+
new_image_data, resize_size)
|
|
575
|
+
|
|
576
|
+
res.append((new_image_info,new_annotations_list,new_image_data))
|
|
577
|
+
return res
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
'''
|
|
581
|
+
ref_bbox: [N,4] relative coordinate,[ymin,xmin,ymax,xmax]
|
|
582
|
+
'''
|
|
583
|
+
def random_cutv2(image,annotations_list,ref_bbox,img_data,size,weights=None):
|
|
584
|
+
x_max = max(0,image["width"]-size[0])
|
|
585
|
+
y_max = max(0,image["height"]-size[1])
|
|
586
|
+
image_info = {}
|
|
587
|
+
image_info["height"] =size[1]
|
|
588
|
+
image_info["width"] =size[0]
|
|
589
|
+
obj_ann_bboxes = []
|
|
590
|
+
for bbox in ref_bbox:
|
|
591
|
+
ymin = int(bbox[0]*image["height"])
|
|
592
|
+
xmin = int(bbox[1]*image["width"])
|
|
593
|
+
width = int((bbox[3]-bbox[1])*image["width"])
|
|
594
|
+
height = int((bbox[2]-bbox[0])*image["height"])
|
|
595
|
+
obj_ann_bboxes.append([xmin,ymin,width,height])
|
|
596
|
+
labels = get_labels(annotations_list)
|
|
597
|
+
t_bbox = odb.random_bbox_in_bboxes(obj_ann_bboxes,size,weights,labels)
|
|
598
|
+
t_bbox[1] = max(0,min(t_bbox[1],y_max))
|
|
599
|
+
t_bbox[0] = max(0,min(t_bbox[0],x_max))
|
|
600
|
+
rect = (t_bbox[1],t_bbox[0],t_bbox[1]+t_bbox[3],t_bbox[0]+t_bbox[2])
|
|
601
|
+
new_image_info,new_annotations_list,new_image_data = cut(annotations_list,img_data,rect,return_none_if_no_ann=False)
|
|
602
|
+
return (new_image_info,new_annotations_list,new_image_data)
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
'''
|
|
606
|
+
image_data:[h,w,c]
|
|
607
|
+
bbox:[ymin,xmin,ymax,xmax)
|
|
608
|
+
output:
|
|
609
|
+
image_info: {'height','width'}
|
|
610
|
+
annotations_list: [{'bbox','segmentation','category_id'}' #bbox[xmin,ymin,width,height] absolute coordinate, 'segmentation' [H,W]
|
|
611
|
+
image_data:[H,W,3]
|
|
612
|
+
'''
|
|
613
|
+
def cut(annotations_list,img_data,bbox,threshold=0.15,return_none_if_no_ann=True):
|
|
614
|
+
bbox = list(bbox)
|
|
615
|
+
bbox[0] = max(0,bbox[0])
|
|
616
|
+
bbox[1] = max(0,bbox[1])
|
|
617
|
+
bbox[2] = min(bbox[2],img_data.shape[0])
|
|
618
|
+
bbox[3] = min(bbox[3],img_data.shape[1])
|
|
619
|
+
|
|
620
|
+
size = (bbox[3]-bbox[1],bbox[2]-bbox[0])
|
|
621
|
+
new_annotations_list = []
|
|
622
|
+
image_info = {}
|
|
623
|
+
image_info["height"] =size[1]
|
|
624
|
+
image_info["width"] =size[0]
|
|
625
|
+
area = size[1]*size[0]
|
|
626
|
+
image_info["file_name"] = f"IMG_L{bbox[1]:06}_T{bbox[0]:06}_W{bbox[3]-bbox[1]:06}_H{bbox[2]-bbox[0]:06}"
|
|
627
|
+
for obj_ann in annotations_list:
|
|
628
|
+
cnts,bboxes,ratios = odb.cut_contourv2(obj_ann["segmentation"],bbox)
|
|
629
|
+
label = obj_ann["category_id"]
|
|
630
|
+
if len(cnts)>0:
|
|
631
|
+
for i,cnt in enumerate(cnts):
|
|
632
|
+
ratio = ratios[i]
|
|
633
|
+
t_bbox = odb.to_xyminwh(odb.bbox_of_contour(cnt))
|
|
634
|
+
if ratio<threshold:
|
|
635
|
+
continue
|
|
636
|
+
mask = np.zeros(shape=[size[1],size[0]],dtype=np.uint8)
|
|
637
|
+
segmentation = cv.drawContours(mask,np.array([cnt]),-1,color=(1),thickness=cv.FILLED)
|
|
638
|
+
new_annotations_list.append({"bbox":t_bbox,"segmentation":segmentation,"category_id":label})
|
|
639
|
+
if (len(new_annotations_list)>0) or (not return_none_if_no_ann):
|
|
640
|
+
return (image_info,new_annotations_list,wmli.sub_image(img_data,bbox))
|
|
641
|
+
else:
|
|
642
|
+
return None,None,None
|
|
643
|
+
|
|
644
|
+
|
|
645
|
+
|
|
646
|
+
def remove_instance(image,annotations_list,remove_pred_fn,default_value=[127, 127, 127]):
|
|
647
|
+
res = []
|
|
648
|
+
removed_image = np.ones_like(image) * np.array([[default_value]], dtype=np.uint8)
|
|
649
|
+
|
|
650
|
+
chl = image.shape[-1]
|
|
651
|
+
for ann in annotations_list:
|
|
652
|
+
if remove_pred_fn(ann):
|
|
653
|
+
mask = ann['segmentation']
|
|
654
|
+
select = np.greater(mask, 0)
|
|
655
|
+
select = np.expand_dims(select,axis=-1)
|
|
656
|
+
select = np.tile(select, [1, 1, chl])
|
|
657
|
+
image = np.where(select, removed_image, image)
|
|
658
|
+
else:
|
|
659
|
+
res.append(ann)
|
|
660
|
+
|
|
661
|
+
return image,res
|
|
662
|
+
|
|
663
|
+
def read_labelme_kp_data(file_path,label_text_to_id=lambda x:int(x)):
|
|
664
|
+
'''
|
|
665
|
+
|
|
666
|
+
Args:
|
|
667
|
+
file_path: json file path
|
|
668
|
+
label_text_to_id: int f(string)
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
labels:[N]
|
|
672
|
+
points:[N,2]
|
|
673
|
+
'''
|
|
674
|
+
labels = []
|
|
675
|
+
points = []
|
|
676
|
+
image_info = {}
|
|
677
|
+
with open(file_path,"r") as f:
|
|
678
|
+
data = json.load(f)
|
|
679
|
+
|
|
680
|
+
for d in data['shapes']:
|
|
681
|
+
label = d['label']
|
|
682
|
+
point = d['points'][0]
|
|
683
|
+
if label_text_to_id is not None:
|
|
684
|
+
label = label_text_to_id(label)
|
|
685
|
+
labels.append(label)
|
|
686
|
+
points.append(point)
|
|
687
|
+
|
|
688
|
+
image_info['width'] = int(data['imageWidth'])
|
|
689
|
+
image_info['height'] = int(data["imageHeight"])
|
|
690
|
+
image_info['file_name'] = wmlu.base_name(data["imagePath"])
|
|
691
|
+
|
|
692
|
+
return image_info,labels,points
|
|
693
|
+
|
|
694
|
+
|
|
695
|
+
def read_labelme_mckp_data(file_path,label_text_to_id=None,keep_no_json_img=False):
|
|
696
|
+
'''
|
|
697
|
+
|
|
698
|
+
Args:
|
|
699
|
+
file_path: json file path
|
|
700
|
+
label_text_to_id: int f(string)
|
|
701
|
+
|
|
702
|
+
Returns:
|
|
703
|
+
labels:[N]
|
|
704
|
+
points:list of [N,2] points, [x,y]
|
|
705
|
+
'''
|
|
706
|
+
labels = []
|
|
707
|
+
points = []
|
|
708
|
+
image_info = {}
|
|
709
|
+
|
|
710
|
+
kp_datas = wmlu.MDict(dtype=list)
|
|
711
|
+
|
|
712
|
+
if os.path.exists(file_path):
|
|
713
|
+
with open(file_path,"r") as f:
|
|
714
|
+
data = json.load(f)
|
|
715
|
+
for d in data['shapes']:
|
|
716
|
+
label = d['label']
|
|
717
|
+
point = np.reshape(np.array(d['points']),[-1,2])
|
|
718
|
+
if label_text_to_id is not None:
|
|
719
|
+
label = label_text_to_id(label)
|
|
720
|
+
kp_datas[label.lower()].append(point)
|
|
721
|
+
|
|
722
|
+
image_info[WIDTH] = int(data['imageWidth'])
|
|
723
|
+
image_info[HEIGHT] = int(data["imageHeight"])
|
|
724
|
+
image_info[FILENAME] = wmlu.base_name(data["imagePath"])
|
|
725
|
+
image_info[FILEPATH] = data["imagePath"]
|
|
726
|
+
|
|
727
|
+
for k,v in kp_datas.items():
|
|
728
|
+
#v is [N,2]
|
|
729
|
+
labels.append(k)
|
|
730
|
+
points.append(np.concatenate(v,axis=0))
|
|
731
|
+
|
|
732
|
+
return image_info,labels,points
|
|
733
|
+
|