python-wml 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of python-wml might be problematic. Click here for more details.
- python_wml-3.0.0.dist-info/LICENSE +23 -0
- python_wml-3.0.0.dist-info/METADATA +51 -0
- python_wml-3.0.0.dist-info/RECORD +164 -0
- python_wml-3.0.0.dist-info/WHEEL +5 -0
- python_wml-3.0.0.dist-info/top_level.txt +1 -0
- wml/__init__.py +0 -0
- wml/basic_data_def/__init__.py +2 -0
- wml/basic_data_def/detection_data_def.py +279 -0
- wml/basic_data_def/io_data_def.py +2 -0
- wml/basic_img_utils.py +816 -0
- wml/img_patch.py +92 -0
- wml/img_utils.py +571 -0
- wml/iotoolkit/__init__.py +17 -0
- wml/iotoolkit/aic_keypoint.py +115 -0
- wml/iotoolkit/baidu_mask_toolkit.py +244 -0
- wml/iotoolkit/base_dataset.py +210 -0
- wml/iotoolkit/bboxes_statistics.py +515 -0
- wml/iotoolkit/build.py +0 -0
- wml/iotoolkit/cityscapes_toolkit.py +183 -0
- wml/iotoolkit/classification_data_statistics.py +25 -0
- wml/iotoolkit/coco_data_fwd.py +225 -0
- wml/iotoolkit/coco_keypoints.py +118 -0
- wml/iotoolkit/coco_keypoints_fmt2.py +103 -0
- wml/iotoolkit/coco_toolkit.py +397 -0
- wml/iotoolkit/coco_wholebody.py +269 -0
- wml/iotoolkit/common.py +108 -0
- wml/iotoolkit/crowd_pose.py +146 -0
- wml/iotoolkit/fast_labelme.py +110 -0
- wml/iotoolkit/image_folder.py +95 -0
- wml/iotoolkit/imgs_cache.py +58 -0
- wml/iotoolkit/imgs_reader_mt.py +73 -0
- wml/iotoolkit/labelme_base.py +102 -0
- wml/iotoolkit/labelme_json_to_img.py +49 -0
- wml/iotoolkit/labelme_toolkit.py +117 -0
- wml/iotoolkit/labelme_toolkit_fwd.py +733 -0
- wml/iotoolkit/labelmemckeypoints_dataset.py +169 -0
- wml/iotoolkit/lspet.py +48 -0
- wml/iotoolkit/mapillary_vistas_toolkit.py +269 -0
- wml/iotoolkit/mat_data.py +90 -0
- wml/iotoolkit/mckeypoints_statistics.py +28 -0
- wml/iotoolkit/mot_datasets.py +62 -0
- wml/iotoolkit/mpii.py +108 -0
- wml/iotoolkit/npmckeypoints_dataset.py +164 -0
- wml/iotoolkit/o365_to_coco.py +136 -0
- wml/iotoolkit/object365_toolkit.py +156 -0
- wml/iotoolkit/object365v2_toolkit.py +71 -0
- wml/iotoolkit/pascal_voc_data.py +51 -0
- wml/iotoolkit/pascal_voc_toolkit.py +194 -0
- wml/iotoolkit/pascal_voc_toolkit_fwd.py +473 -0
- wml/iotoolkit/penn_action.py +57 -0
- wml/iotoolkit/rawframe_dataset.py +129 -0
- wml/iotoolkit/rewrite_pascal_voc.py +28 -0
- wml/iotoolkit/semantic_data.py +49 -0
- wml/iotoolkit/split_file_by_type.py +29 -0
- wml/iotoolkit/sports_mot_datasets.py +78 -0
- wml/iotoolkit/vis_objectdetection_dataset.py +70 -0
- wml/iotoolkit/vis_torch_data.py +39 -0
- wml/iotoolkit/yolo_toolkit.py +38 -0
- wml/object_detection2/__init__.py +4 -0
- wml/object_detection2/basic_visualization.py +37 -0
- wml/object_detection2/bboxes.py +812 -0
- wml/object_detection2/data_process_toolkit.py +146 -0
- wml/object_detection2/keypoints.py +292 -0
- wml/object_detection2/mask.py +120 -0
- wml/object_detection2/metrics/__init__.py +3 -0
- wml/object_detection2/metrics/build.py +15 -0
- wml/object_detection2/metrics/classifier_toolkit.py +440 -0
- wml/object_detection2/metrics/common.py +71 -0
- wml/object_detection2/metrics/mckps_toolkit.py +338 -0
- wml/object_detection2/metrics/toolkit.py +1953 -0
- wml/object_detection2/npod_toolkit.py +361 -0
- wml/object_detection2/odtools.py +243 -0
- wml/object_detection2/standard_names.py +75 -0
- wml/object_detection2/visualization.py +956 -0
- wml/object_detection2/wmath.py +34 -0
- wml/semantic/__init__.py +0 -0
- wml/semantic/basic_toolkit.py +65 -0
- wml/semantic/mask_utils.py +156 -0
- wml/semantic/semantic_test.py +21 -0
- wml/semantic/structures.py +1 -0
- wml/semantic/toolkit.py +105 -0
- wml/semantic/visualization_utils.py +658 -0
- wml/threadtoolkit.py +50 -0
- wml/walgorithm.py +228 -0
- wml/wcollections.py +212 -0
- wml/wfilesystem.py +487 -0
- wml/wml_utils.py +657 -0
- wml/wstructures/__init__.py +4 -0
- wml/wstructures/common.py +9 -0
- wml/wstructures/keypoints_train_toolkit.py +149 -0
- wml/wstructures/kps_structures.py +579 -0
- wml/wstructures/mask_structures.py +1161 -0
- wml/wtorch/__init__.py +8 -0
- wml/wtorch/bboxes.py +104 -0
- wml/wtorch/classes_suppression.py +24 -0
- wml/wtorch/conv_module.py +181 -0
- wml/wtorch/conv_ws.py +144 -0
- wml/wtorch/data/__init__.py +16 -0
- wml/wtorch/data/_utils/__init__.py +45 -0
- wml/wtorch/data/_utils/collate.py +183 -0
- wml/wtorch/data/_utils/fetch.py +47 -0
- wml/wtorch/data/_utils/pin_memory.py +121 -0
- wml/wtorch/data/_utils/signal_handling.py +72 -0
- wml/wtorch/data/_utils/worker.py +227 -0
- wml/wtorch/data/base_data_loader_iter.py +93 -0
- wml/wtorch/data/dataloader.py +501 -0
- wml/wtorch/data/datapipes/__init__.py +1 -0
- wml/wtorch/data/datapipes/iter/__init__.py +12 -0
- wml/wtorch/data/datapipes/iter/batch.py +126 -0
- wml/wtorch/data/datapipes/iter/callable.py +92 -0
- wml/wtorch/data/datapipes/iter/listdirfiles.py +37 -0
- wml/wtorch/data/datapipes/iter/loadfilesfromdisk.py +30 -0
- wml/wtorch/data/datapipes/iter/readfilesfromtar.py +60 -0
- wml/wtorch/data/datapipes/iter/readfilesfromzip.py +63 -0
- wml/wtorch/data/datapipes/iter/sampler.py +94 -0
- wml/wtorch/data/datapipes/utils/__init__.py +0 -0
- wml/wtorch/data/datapipes/utils/common.py +65 -0
- wml/wtorch/data/dataset.py +354 -0
- wml/wtorch/data/datasets/__init__.py +4 -0
- wml/wtorch/data/datasets/common.py +53 -0
- wml/wtorch/data/datasets/listdirfilesdataset.py +36 -0
- wml/wtorch/data/datasets/loadfilesfromdiskdataset.py +30 -0
- wml/wtorch/data/distributed.py +135 -0
- wml/wtorch/data/multi_processing_data_loader_iter.py +866 -0
- wml/wtorch/data/sampler.py +267 -0
- wml/wtorch/data/single_process_data_loader_iter.py +24 -0
- wml/wtorch/data/test_data_loader.py +26 -0
- wml/wtorch/dataset_toolkit.py +67 -0
- wml/wtorch/depthwise_separable_conv_module.py +98 -0
- wml/wtorch/dist.py +591 -0
- wml/wtorch/dropblock/__init__.py +6 -0
- wml/wtorch/dropblock/dropblock.py +228 -0
- wml/wtorch/dropblock/dropout.py +40 -0
- wml/wtorch/dropblock/scheduler.py +48 -0
- wml/wtorch/ema.py +61 -0
- wml/wtorch/fc_module.py +73 -0
- wml/wtorch/functional.py +34 -0
- wml/wtorch/iter_dataset.py +26 -0
- wml/wtorch/loss.py +69 -0
- wml/wtorch/nets/__init__.py +0 -0
- wml/wtorch/nets/ckpt_toolkit.py +219 -0
- wml/wtorch/nets/fpn.py +276 -0
- wml/wtorch/nets/hrnet/__init__.py +0 -0
- wml/wtorch/nets/hrnet/config.py +2 -0
- wml/wtorch/nets/hrnet/hrnet.py +494 -0
- wml/wtorch/nets/misc.py +249 -0
- wml/wtorch/nets/resnet/__init__.py +0 -0
- wml/wtorch/nets/resnet/layers/__init__.py +17 -0
- wml/wtorch/nets/resnet/layers/aspp.py +144 -0
- wml/wtorch/nets/resnet/layers/batch_norm.py +231 -0
- wml/wtorch/nets/resnet/layers/blocks.py +111 -0
- wml/wtorch/nets/resnet/layers/wrappers.py +110 -0
- wml/wtorch/nets/resnet/r50_config.py +38 -0
- wml/wtorch/nets/resnet/resnet.py +691 -0
- wml/wtorch/nets/shape_spec.py +20 -0
- wml/wtorch/nets/simple_fpn.py +101 -0
- wml/wtorch/nms.py +109 -0
- wml/wtorch/nn.py +896 -0
- wml/wtorch/ocr_block.py +193 -0
- wml/wtorch/summary.py +331 -0
- wml/wtorch/train_toolkit.py +603 -0
- wml/wtorch/transformer_blocks.py +266 -0
- wml/wtorch/utils.py +719 -0
- wml/wtorch/wlr_scheduler.py +100 -0
wml/iotoolkit/mpii.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from wml.iotoolkit.mat_data import MatData
|
|
2
|
+
import wml.object_detection2.keypoints as odk
|
|
3
|
+
import numpy as np
|
|
4
|
+
import wml.object_detection2.bboxes as odb
|
|
5
|
+
import math
|
|
6
|
+
'''
|
|
7
|
+
id - joint id (0 - r ankle, 1 - r knee, 2 - r hip,
|
|
8
|
+
3 - l hip, 4 - l knee, 5 - l ankle, 6 - pelvis,
|
|
9
|
+
7 - thorax, 8 - upper neck, 9 - head top,
|
|
10
|
+
10 - r wrist, 11 - r elbow, 12 - r shoulder, 13 - l shoulder, 14 - l elbow, 15 - l wrist)
|
|
11
|
+
'''
|
|
12
|
+
def read_mpii_data(path):
|
|
13
|
+
mpii_data = MatData(path).RELEASE
|
|
14
|
+
annolist = mpii_data.annolist
|
|
15
|
+
img_train = mpii_data.img_train
|
|
16
|
+
|
|
17
|
+
res = []
|
|
18
|
+
for i in range(0,len(annolist)):
|
|
19
|
+
annorect_item = annolist.annorect.get_array_item(i)
|
|
20
|
+
if annorect_item is None:
|
|
21
|
+
continue
|
|
22
|
+
image = str(annolist.image[i].name)
|
|
23
|
+
image_datas = []
|
|
24
|
+
try:
|
|
25
|
+
for j in range(len(annorect_item)):
|
|
26
|
+
person_anno = annorect_item[j]
|
|
27
|
+
x1 = person_anno.x1
|
|
28
|
+
y1 = person_anno.y1
|
|
29
|
+
x2 = person_anno.x2
|
|
30
|
+
y2 = person_anno.y2
|
|
31
|
+
if 'annopoints' not in person_anno.data_keys():
|
|
32
|
+
continue
|
|
33
|
+
points = person_anno.annopoints
|
|
34
|
+
if points is None:
|
|
35
|
+
continue
|
|
36
|
+
kps = person_anno.annopoints.point
|
|
37
|
+
res_kps = np.zeros([16,3],dtype=np.float32)
|
|
38
|
+
for k in range(len(kps)):
|
|
39
|
+
kp = kps[k]
|
|
40
|
+
id = kp.id
|
|
41
|
+
res_kps[id,0] = kp.x
|
|
42
|
+
res_kps[id,1] = kp.y
|
|
43
|
+
is_visible = kp.is_visible
|
|
44
|
+
res_kps[id,2] = float(is_visible)+1 if is_visible is not None else 0
|
|
45
|
+
person_data = [np.array([x1,y1,x2,y2]),res_kps]
|
|
46
|
+
image_datas.append(person_data)
|
|
47
|
+
except Exception as e:
|
|
48
|
+
if img_train.data[i]==1:
|
|
49
|
+
print(f"ERROR",i,e,img_train.data[i])
|
|
50
|
+
if len(image_datas)>0:
|
|
51
|
+
image_datas = list(zip(*image_datas))
|
|
52
|
+
bboxes = np.array(image_datas[0])
|
|
53
|
+
head_bboxes = bboxes
|
|
54
|
+
kps = np.array(image_datas[1])
|
|
55
|
+
_bboxes = odk.npbatchget_bboxes(kps)
|
|
56
|
+
r_bboxes = []
|
|
57
|
+
for bbox0,bbox1 in zip(bboxes,_bboxes):
|
|
58
|
+
bbox = odb.bbox_of_boxes([bbox0,bbox1])
|
|
59
|
+
r_bboxes.append(bbox)
|
|
60
|
+
bboxes = np.array(r_bboxes,dtype=np.float32)
|
|
61
|
+
res.append([image,bboxes,kps,head_bboxes])
|
|
62
|
+
|
|
63
|
+
return res
|
|
64
|
+
|
|
65
|
+
class Trans2COCO:
|
|
66
|
+
def __init__(self) -> None:
|
|
67
|
+
self.dst_idxs = [5,6,7,8,9,10,11,12,13,14,15,16]
|
|
68
|
+
self.src_idxs = [13,12,14,11,15,10,3,2,4,1,5,0]
|
|
69
|
+
self.coco_idxs = [0,1,2,3,4]
|
|
70
|
+
|
|
71
|
+
@staticmethod
|
|
72
|
+
def kps_in_bbox(kps,bbox):
|
|
73
|
+
nr = kps.shape[0]
|
|
74
|
+
for i in range(nr):
|
|
75
|
+
if kps[i,2]<= 0:
|
|
76
|
+
return False
|
|
77
|
+
x = kps[i,0]
|
|
78
|
+
y = kps[i,1]
|
|
79
|
+
if x<bbox[0] or x>bbox[2] or y<bbox[1] or y>bbox[3]:
|
|
80
|
+
return False
|
|
81
|
+
return True
|
|
82
|
+
|
|
83
|
+
def __call__(self,mpii_kps,coco_kps,head_bboxes):
|
|
84
|
+
if len(mpii_kps.shape)==2:
|
|
85
|
+
return self.trans_one(mpii_kps,coco_kps,head_bboxes)
|
|
86
|
+
res = []
|
|
87
|
+
for mp,coco,head_bbox in zip(mpii_kps,coco_kps,head_bboxes):
|
|
88
|
+
res.append(self.trans_one(mp,coco,head_bbox))
|
|
89
|
+
return np.array(res)
|
|
90
|
+
|
|
91
|
+
def trans_one(self,mpii_kps,coco_kps,head_bbox):
|
|
92
|
+
'''
|
|
93
|
+
img: [RGB]
|
|
94
|
+
'''
|
|
95
|
+
res = np.zeros([17,3],dtype=np.float32)
|
|
96
|
+
res[self.dst_idxs] = mpii_kps[self.src_idxs]
|
|
97
|
+
if coco_kps is not None:
|
|
98
|
+
left_right_pairs = [[5,6],[11,12]]
|
|
99
|
+
is_good = True
|
|
100
|
+
for pair in left_right_pairs:
|
|
101
|
+
l,r = pair
|
|
102
|
+
if res[l,0]<res[r,0] or res[l,2]<0.1 or res[r,2]<0.1:
|
|
103
|
+
is_good = False
|
|
104
|
+
break
|
|
105
|
+
|
|
106
|
+
if is_good and head_bbox is not None and self.kps_in_bbox(coco_kps[self.coco_idxs],head_bbox):
|
|
107
|
+
res[self.coco_idxs] = coco_kps[self.coco_idxs]
|
|
108
|
+
return res
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
#coding=utf-8
|
|
2
|
+
import numpy as np
|
|
3
|
+
import matplotlib.image as mpimg
|
|
4
|
+
import xml.etree.ElementTree as ET
|
|
5
|
+
from xml.dom.minidom import Document
|
|
6
|
+
import random
|
|
7
|
+
import os
|
|
8
|
+
import math
|
|
9
|
+
import wml.wml_utils
|
|
10
|
+
import logging
|
|
11
|
+
import shutil
|
|
12
|
+
from functools import partial
|
|
13
|
+
import wml.wml_utils as wmlu
|
|
14
|
+
import wml.img_utils as wmli
|
|
15
|
+
import copy
|
|
16
|
+
from wml.object_detection2.standard_names import *
|
|
17
|
+
import pickle
|
|
18
|
+
from .common import *
|
|
19
|
+
import wml.object_detection2.bboxes as odb
|
|
20
|
+
from .pascal_voc_toolkit_fwd import *
|
|
21
|
+
from wml.wstructures import WMCKeypoints
|
|
22
|
+
from easydict import EasyDict
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class NPMCKeypointsDataset(object):
|
|
26
|
+
def __init__(self, label_text2id=None, shuffle=False,
|
|
27
|
+
filter_empty_files=False,
|
|
28
|
+
resample_parameters=None,
|
|
29
|
+
ignore_case=True):
|
|
30
|
+
'''
|
|
31
|
+
|
|
32
|
+
:param label_text2id: trans a single label text to id: int func(str)
|
|
33
|
+
:param shuffle:
|
|
34
|
+
:param image_sub_dir:
|
|
35
|
+
:param xml_sub_dir:
|
|
36
|
+
可以使用datasets_trans/trans_labelme_to_npkp.py将labelme转换为NPMCKeypointsDataset可用的格式
|
|
37
|
+
'''
|
|
38
|
+
self.files = None
|
|
39
|
+
self.shuffle = shuffle
|
|
40
|
+
self.filter_empty_files = filter_empty_files
|
|
41
|
+
if isinstance(label_text2id,dict):
|
|
42
|
+
if ignore_case:
|
|
43
|
+
new_dict = dict([(k.lower(),v) for k,v in label_text2id.items()])
|
|
44
|
+
self.label_text2id = partial(ignore_case_dict_label_text2id,dict_data=new_dict)
|
|
45
|
+
else:
|
|
46
|
+
self.label_text2id = partial(dict_label_text2id,dict_data=label_text2id)
|
|
47
|
+
else:
|
|
48
|
+
self.label_text2id = label_text2id
|
|
49
|
+
|
|
50
|
+
if resample_parameters is not None:
|
|
51
|
+
self.resample_parameters = {}
|
|
52
|
+
for k,v in resample_parameters.items():
|
|
53
|
+
if isinstance(k,(str,bytes)):
|
|
54
|
+
k = self.label_text2id(k)
|
|
55
|
+
self.resample_parameters[k] = v
|
|
56
|
+
print("resample parameters")
|
|
57
|
+
wmlu.show_dict(self.resample_parameters)
|
|
58
|
+
else:
|
|
59
|
+
self.resample_parameters = None
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def __len__(self):
|
|
63
|
+
return len(self.files)
|
|
64
|
+
|
|
65
|
+
def __getitem__(self,idx):
|
|
66
|
+
try:
|
|
67
|
+
bin_file = self.files[idx]
|
|
68
|
+
except Exception as e:
|
|
69
|
+
print(f"ERROR: {e} {self.files[idx]}")
|
|
70
|
+
print(self.files)
|
|
71
|
+
#print(xml_file)
|
|
72
|
+
if not os.path.exists(bin_file):
|
|
73
|
+
return None
|
|
74
|
+
try:
|
|
75
|
+
with open(bin_file,"rb") as f:
|
|
76
|
+
data = pickle.load(f)
|
|
77
|
+
labels_names = data[GT_LABELS]
|
|
78
|
+
img_info = data[IMG_INFO]
|
|
79
|
+
if self.label_text2id is not None:
|
|
80
|
+
labels = [self.label_text2id(x) for x in labels_names]
|
|
81
|
+
keypoints = data[GT_KEYPOINTS]
|
|
82
|
+
keep = [x is not None for x in labels]
|
|
83
|
+
labels = [x if x is not None else -1 for x in labels]
|
|
84
|
+
labels = np.array(labels,dtype=np.int32)
|
|
85
|
+
labels = labels[keep]
|
|
86
|
+
keypoints = [keypoints[i] for i in np.where(keep)[0]]
|
|
87
|
+
data[GT_LABELS] = labels
|
|
88
|
+
data[GT_KEYPOINTS] = WMCKeypoints(keypoints,width=img_info[WIDTH],height=img_info[HEIGHT])
|
|
89
|
+
else:
|
|
90
|
+
labels = None
|
|
91
|
+
|
|
92
|
+
except Exception as e:
|
|
93
|
+
print(f"Read {bin_file} {e} faild.")
|
|
94
|
+
return None
|
|
95
|
+
return EasyDict(data)
|
|
96
|
+
|
|
97
|
+
def read_data(self,dir_path,suffix=".bin"):
|
|
98
|
+
if isinstance(dir_path,str):
|
|
99
|
+
print(f"Read {dir_path}")
|
|
100
|
+
if not os.path.exists(dir_path):
|
|
101
|
+
print(f"Data path {dir_path} not exists.")
|
|
102
|
+
return False
|
|
103
|
+
self.files = wmlu.get_files(dir_path,suffix=suffix)
|
|
104
|
+
elif isinstance(dir_path,(list,tuple)) and isinstance(dir_path[0],(str,bytes)) and os.path.isdir(dir_path[0]):
|
|
105
|
+
self.files = self.get_files_from_dirs(dir_path,
|
|
106
|
+
suffix=suffix)
|
|
107
|
+
else:
|
|
108
|
+
self.files = dir_path
|
|
109
|
+
if self.filter_empty_files and self.label_text2id:
|
|
110
|
+
self.files = self.apply_filter_empty_files(self.files)
|
|
111
|
+
if self.resample_parameters is not None and self.label_text2id:
|
|
112
|
+
self.files = self.resample(self.files)
|
|
113
|
+
|
|
114
|
+
if len(self.files) == 0:
|
|
115
|
+
return False
|
|
116
|
+
|
|
117
|
+
if self.shuffle:
|
|
118
|
+
random.shuffle(self.files)
|
|
119
|
+
|
|
120
|
+
return True
|
|
121
|
+
|
|
122
|
+
def get_files_from_dirs(self,dirs,suffix=".bin"):
|
|
123
|
+
all_files = []
|
|
124
|
+
for dir_path in dirs:
|
|
125
|
+
files = wmlu.get_files(dir_path,
|
|
126
|
+
suffix=suffix)
|
|
127
|
+
all_files.extend(files)
|
|
128
|
+
|
|
129
|
+
return all_files
|
|
130
|
+
|
|
131
|
+
def apply_filter_empty_files(self,files):
|
|
132
|
+
new_files = []
|
|
133
|
+
for fs in files:
|
|
134
|
+
with open(fs,"rb") as f:
|
|
135
|
+
data = pickle.load(f)
|
|
136
|
+
labels_name = data[GT_LABELS]
|
|
137
|
+
labels = [self.label_text2id(x) for x in labels_name]
|
|
138
|
+
is_none = [x is None for x in labels]
|
|
139
|
+
if not all(is_none):
|
|
140
|
+
new_files.append(fs)
|
|
141
|
+
else:
|
|
142
|
+
print(f"File {fs} is empty, labels names {labels_name}, labels {labels}")
|
|
143
|
+
|
|
144
|
+
return new_files
|
|
145
|
+
|
|
146
|
+
def resample(self,files):
|
|
147
|
+
all_labels = []
|
|
148
|
+
for fs in files:
|
|
149
|
+
with open(fs,"rb") as f:
|
|
150
|
+
data = pickle.load(f)
|
|
151
|
+
labels_name = data[GT_LABELS]
|
|
152
|
+
labels = [self.label_text2id(x) for x in labels_name]
|
|
153
|
+
all_labels.append(labels)
|
|
154
|
+
|
|
155
|
+
return resample(files,all_labels,self.resample_parameters)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def get_items(self):
|
|
159
|
+
'''
|
|
160
|
+
:return:
|
|
161
|
+
full_path,img_size,category_ids,category_names,boxes,binary_masks,area,is_crowd,num_annotations_skipped
|
|
162
|
+
'''
|
|
163
|
+
for i in range(len(self.files)):
|
|
164
|
+
yield self.__getitem__(i)
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
o365_id_to_coco_id = {1:1, # Person:person, 57827
|
|
2
|
+
3:62, # Chair:chair, 9319
|
|
3
|
+
6:3, # Car:car, 4560
|
|
4
|
+
9:44, # Bottle:bottle, 10983
|
|
5
|
+
10:67, # Desk:dining table, 3614
|
|
6
|
+
11:47, # Cup:cup, 12214
|
|
7
|
+
14:31, # Handbag/Satchel:handbag, 1644
|
|
8
|
+
16:51, # Plate:bowl, 4573
|
|
9
|
+
19:84, # Book:book, 2390
|
|
10
|
+
22:9, # Boat:boat, 1939
|
|
11
|
+
25:15, # Bench:bench, 827
|
|
12
|
+
26:64, # Potted Plant:potted plant, 2292
|
|
13
|
+
27:51, # Bowl/Basin:bowl, 12888
|
|
14
|
+
31:86, # Vase:vase, 1162
|
|
15
|
+
35:3, # SUV:car, 1149
|
|
16
|
+
36:46, # Wine Glass:wine glass, 2350
|
|
17
|
+
38:72, # Moniter/TV:tv, 7522
|
|
18
|
+
39:27, # Backpack:backpack, 510
|
|
19
|
+
40:28, # Umbrella:umbrella, 1664
|
|
20
|
+
41:10, # Traffic Light:traffic light, 2109
|
|
21
|
+
44:32, # Tie:tie, 1310
|
|
22
|
+
47:2, # Bicycle:bicycle, 1658
|
|
23
|
+
48:62, # Stool:chair, 2881
|
|
24
|
+
50:3, # Van:car, 968
|
|
25
|
+
51:63, # Couch:couch, 1424
|
|
26
|
+
56:6, # Bus:bus, 730
|
|
27
|
+
57:16, # Wild Bird:bird, 3584
|
|
28
|
+
59:4, # Motorcycle:motorcycle, 1117
|
|
29
|
+
62:77, # Cell Phone:cell phone, 2574
|
|
30
|
+
63:54, # Bread:sandwich, 1396
|
|
31
|
+
65:44, # Canned:bottle, 957
|
|
32
|
+
66:8, # Truck:truck, 1350
|
|
33
|
+
71:88, # Stuffed Toy:teddy bear, 992
|
|
34
|
+
73:9, # Sailboat:boat, 650
|
|
35
|
+
74:73, # Laptop:laptop, 3936
|
|
36
|
+
76:65, # Bed:bed, 1663
|
|
37
|
+
79:19, # Horse:horse, 1071
|
|
38
|
+
82:81, # Sink:sink, 8335
|
|
39
|
+
83:53, # Apple:apple, 4267
|
|
40
|
+
85:49, # Knife:knife, 5252
|
|
41
|
+
88:8, # Pickup Truck:truck, 617
|
|
42
|
+
89:48, # Fork:fork, 5267
|
|
43
|
+
93:18, # Dog:dog, 1075
|
|
44
|
+
94:50, # Spoon:spoon, 4535
|
|
45
|
+
95:85, # Clock:clock, 1289
|
|
46
|
+
97:21, # Cow:cow, 509
|
|
47
|
+
98:61, # Cake:cake, 1834
|
|
48
|
+
99:67, # Dinning Table:dining table, 2437
|
|
49
|
+
100:20, # Sheep:sheep, 460
|
|
50
|
+
105:55, # Orange/Tangerine:orange, 3676
|
|
51
|
+
106:44, # Toiletry:bottle, 6567
|
|
52
|
+
107:76, # Keyboard:keyboard, 5054
|
|
53
|
+
110:8, # Machinery Vehicle:truck, 1010
|
|
54
|
+
113:52, # Banana:banana, 5140
|
|
55
|
+
114:40, # Baseball Glove:baseball glove, 110
|
|
56
|
+
115:5, # Airplane:airplane, 4769
|
|
57
|
+
116:74, # Mouse:mouse, 4689
|
|
58
|
+
117:7, # Train:train, 1270
|
|
59
|
+
119:37, # Soccer:sports ball, 528
|
|
60
|
+
120:35, # Skiboard:skis, 204
|
|
61
|
+
121:33, # Luggage:suitcase, 1321
|
|
62
|
+
127:3, # Sports Car:car, 2115
|
|
63
|
+
128:13, # Stop Sign:stop sign, 435
|
|
64
|
+
129:61, # Dessert:cake, 3077
|
|
65
|
+
130:4, # Scooter:motorcycle, 1518
|
|
66
|
+
133:75, # Remote:remote, 1455
|
|
67
|
+
134:82, # Refrigerator:refrigerator, 2257
|
|
68
|
+
135:79, # Oven:oven, 1765
|
|
69
|
+
136:55, # Lemon:orange, 1638
|
|
70
|
+
137:16, # Duck:bird, 8646
|
|
71
|
+
138:39, # Baseball Bat:baseball bat, 254
|
|
72
|
+
140:17, # Cat:cat, 1374
|
|
73
|
+
142:56, # Broccoli:broccoli, 3389
|
|
74
|
+
144:59, # Pizza:pizza, 1504
|
|
75
|
+
145:22, # Elephant:elephant, 2502
|
|
76
|
+
146:41, # Skateboard:skateboard, 143
|
|
77
|
+
147:42, # Surfboard:surfboard, 1026
|
|
78
|
+
151:60, # Donut:donut, 2980
|
|
79
|
+
152:32, # Bow Tie:tie, 522
|
|
80
|
+
153:57, # Carrot:carrot, 3960
|
|
81
|
+
154:70, # Toilet:toilet, 3117
|
|
82
|
+
155:38, # Kite:kite, 1379
|
|
83
|
+
157:37, # Other Balls:sports ball, 1149
|
|
84
|
+
162:44, # Cleaning Products:bottle, 2660
|
|
85
|
+
164:78, # Microwave:microwave, 2085
|
|
86
|
+
165:16, # Pigeon:bird, 3948
|
|
87
|
+
166:37, # Baseball:sports ball, 509
|
|
88
|
+
168:67, # Coffee Table:dining table, 44
|
|
89
|
+
170:87, # Scissors:scissors, 1488
|
|
90
|
+
172:54, # Pie:sandwich, 516
|
|
91
|
+
174:36, # Snowboard:snowboard, 274
|
|
92
|
+
177:11, # Fire Hydrant:fire hydrant, 222
|
|
93
|
+
178:37, # Basketball:sports ball, 363
|
|
94
|
+
179:24, # Zebra:zebra, 2019
|
|
95
|
+
181:25, # Giraffe:giraffe, 1084
|
|
96
|
+
189:8, # Fire Truck:truck, 525
|
|
97
|
+
190:37, # Billards:sports ball, 2809
|
|
98
|
+
193:62, # Wheelchair:chair, 40*
|
|
99
|
+
195:33, # Briefcase:suitcase, 764
|
|
100
|
+
200:8, # Heavy Truck:truck, 639
|
|
101
|
+
201:54, # Hamburger:sandwich, 1227
|
|
102
|
+
205:43, # Tennis Racket:tennis racket, 193
|
|
103
|
+
207:37, # American Football:sports ball, 165
|
|
104
|
+
208:16, # earphone:bird, 9
|
|
105
|
+
211:37, # Tennis:sports ball, 523
|
|
106
|
+
212:9, # Ship:boat, 1004
|
|
107
|
+
220:34, # Frisbee:frisbee, 330
|
|
108
|
+
222:16, # Chicken:bird, 2043
|
|
109
|
+
227:90, # Toothbrush:toothbrush, 2152
|
|
110
|
+
235:58, # Hot dog:hot dog, 547
|
|
111
|
+
237:53, # Peach:apple, 1085
|
|
112
|
+
240:37, # Volleyball:sports ball, 198
|
|
113
|
+
242:16, # Goose:bird, 2717
|
|
114
|
+
248:37, # Golf Ball:sports ball, 565
|
|
115
|
+
249:8, # Ambulance:truck, 715
|
|
116
|
+
250:14, # Parking meter:parking meter, 59
|
|
117
|
+
258:16, # Penguin:bird, 1751
|
|
118
|
+
263:16, # Swan:bird, 1863
|
|
119
|
+
264:5, # Helicopter:airplane, 114
|
|
120
|
+
266:54, # Sandwich:sandwich, 789
|
|
121
|
+
278:80, # Toaster:toaster, 498
|
|
122
|
+
281:61, # Cheese:cake, 200
|
|
123
|
+
291:44, # Flask:bottle, 84
|
|
124
|
+
296:23, # Bear:bear, 133
|
|
125
|
+
309:3, # Formula 1 :car, 199
|
|
126
|
+
310:53, # Pomegranate:apple, 266
|
|
127
|
+
319:20, # Antelope:sheep, 62
|
|
128
|
+
320:16, # Parrot:bird, 342
|
|
129
|
+
|
|
130
|
+
326:70, # Urinal:toilet, 460
|
|
131
|
+
329:89, # Hair Dryer:hair drier, 315
|
|
132
|
+
330:61, # Egg tart:cake, 150
|
|
133
|
+
345:21, # Yak:cow, 413
|
|
134
|
+
356:90, # Cosmetics Brush/Eyeliner Pencil:toothbrush, 5
|
|
135
|
+
365:37, # Table Tennis :sports ball, 13
|
|
136
|
+
}
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os.path as osp
|
|
3
|
+
import wml.object_detection2.bboxes as odb
|
|
4
|
+
import numpy as np
|
|
5
|
+
import wml.object_detection2.visualization as odv
|
|
6
|
+
import wml.wml_utils as wmlu
|
|
7
|
+
import PIL.Image as Image
|
|
8
|
+
import wml.img_utils as wmli
|
|
9
|
+
from .common import *
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Object365:
|
|
13
|
+
def __init__(self,absolute_coord=True):
|
|
14
|
+
self.json_path = None
|
|
15
|
+
self.img_dir = None
|
|
16
|
+
self.data = None
|
|
17
|
+
self.absolute_coord = absolute_coord
|
|
18
|
+
self.images_data = {}
|
|
19
|
+
self.annotation_data = {}
|
|
20
|
+
self.ids = []
|
|
21
|
+
self.id2name = {}
|
|
22
|
+
|
|
23
|
+
def read_data(self,json_path,img_dir):
|
|
24
|
+
self.json_path = json_path
|
|
25
|
+
self.img_dir = img_dir
|
|
26
|
+
with open(self.json_path,"r") as f:
|
|
27
|
+
self.data = json.load(f)
|
|
28
|
+
self.images_data = {}
|
|
29
|
+
self.ids = []
|
|
30
|
+
self.annotation_data = {}
|
|
31
|
+
|
|
32
|
+
for d in self.data['images']:
|
|
33
|
+
id = d['id']
|
|
34
|
+
self.ids.append(id)
|
|
35
|
+
self.images_data[id] = d
|
|
36
|
+
for d in self.data['annotations']:
|
|
37
|
+
id = d['image_id']
|
|
38
|
+
self.add_anno_data(self.annotation_data,id,d)
|
|
39
|
+
name_dict = {}
|
|
40
|
+
for x in self.data['categories']:
|
|
41
|
+
name_dict[x['id']] = x['name']
|
|
42
|
+
self.id2name = name_dict
|
|
43
|
+
info = list(name_dict.items())
|
|
44
|
+
info.sort(key=lambda x:x[0])
|
|
45
|
+
print(f"Category")
|
|
46
|
+
wmlu.show_list(info)
|
|
47
|
+
|
|
48
|
+
@staticmethod
|
|
49
|
+
def add_anno_data(dict_data,id,item_data):
|
|
50
|
+
if id in dict_data:
|
|
51
|
+
dict_data[id].append(item_data)
|
|
52
|
+
else:
|
|
53
|
+
dict_data[id] = [item_data]
|
|
54
|
+
|
|
55
|
+
def __len__(self):
|
|
56
|
+
return len(self.ids)
|
|
57
|
+
|
|
58
|
+
def getitem_by_id(self,id):
|
|
59
|
+
idx = self.ids.index(id)
|
|
60
|
+
return self.__getitem__(idx)
|
|
61
|
+
|
|
62
|
+
def __getitem__(self, item):
|
|
63
|
+
try:
|
|
64
|
+
id = self.ids[item]
|
|
65
|
+
item_data = self.annotation_data[id]
|
|
66
|
+
is_crowd = []
|
|
67
|
+
bboxes = []
|
|
68
|
+
labels = []
|
|
69
|
+
labels_names = []
|
|
70
|
+
for data in item_data:
|
|
71
|
+
is_crowd.append(data['iscrowd'])
|
|
72
|
+
bboxes.append(data['bbox'])
|
|
73
|
+
label = data['category_id']
|
|
74
|
+
label_text = self.id2name[label]
|
|
75
|
+
labels.append(label)
|
|
76
|
+
labels_names.append(label_text)
|
|
77
|
+
bboxes = np.array(bboxes,dtype=np.float32)
|
|
78
|
+
bboxes[...,2:] = bboxes[...,:2]+bboxes[...,2:]
|
|
79
|
+
bboxes = odb.npchangexyorder(bboxes)
|
|
80
|
+
is_crowd = np.array(is_crowd,dtype=np.float32)
|
|
81
|
+
image_data = self.images_data[id]
|
|
82
|
+
shape = [image_data['height'],image_data['width']]
|
|
83
|
+
if not self.absolute_coord:
|
|
84
|
+
bboxes = odb.absolutely_boxes_to_relative_boxes(bboxes,width=shape[1],height=shape[0])
|
|
85
|
+
img_name = image_data['file_name']
|
|
86
|
+
img_file = osp.join(self.img_dir,img_name)
|
|
87
|
+
'''
|
|
88
|
+
bboxes: [N,4] (y0,x0,y1,x1)
|
|
89
|
+
'''
|
|
90
|
+
return DetData(img_file, shape, labels, labels_names, bboxes, None, None, is_crowd, None)
|
|
91
|
+
except Exception as e:
|
|
92
|
+
print(e)
|
|
93
|
+
return None
|
|
94
|
+
|
|
95
|
+
def get_items(self):
|
|
96
|
+
for i in range(len(self.ids)):
|
|
97
|
+
res = self.__getitem__(i)
|
|
98
|
+
if res is None:
|
|
99
|
+
continue
|
|
100
|
+
yield res
|
|
101
|
+
|
|
102
|
+
class TorchObject365(Object365):
|
|
103
|
+
def __init__(self, img_dir,anno_path,absolute_coord=True):
|
|
104
|
+
super().__init__(absolute_coord)
|
|
105
|
+
super().read_data(anno_path,img_dir)
|
|
106
|
+
|
|
107
|
+
def __getitem__(self, item):
|
|
108
|
+
x = super().__getitem__(item)
|
|
109
|
+
full_path, shape,category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
|
|
110
|
+
img = wmli.imread(full_path)
|
|
111
|
+
img = Image.fromarray(img)
|
|
112
|
+
res = []
|
|
113
|
+
nr = len(category_ids)
|
|
114
|
+
boxes = odb.npchangexyorder(boxes)
|
|
115
|
+
boxes[...,2:] = boxes[...,2:] - boxes[...,:2]
|
|
116
|
+
for i in range(nr):
|
|
117
|
+
item = {"bbox":boxes[i],"category_id":category_ids[i],"iscrowd":is_crowd[i]}
|
|
118
|
+
res.append(item)
|
|
119
|
+
|
|
120
|
+
return img,res
|
|
121
|
+
|
|
122
|
+
if __name__ == "__main__":
|
|
123
|
+
import wml.img_utils as wmli
|
|
124
|
+
import random
|
|
125
|
+
import matplotlib.pyplot as plt
|
|
126
|
+
import time
|
|
127
|
+
|
|
128
|
+
random.seed(time.time())
|
|
129
|
+
|
|
130
|
+
save_dir = "/home/wj/ai/mldata1/Objects365/tmp"
|
|
131
|
+
|
|
132
|
+
wmlu.create_empty_dir_remove_if(save_dir)
|
|
133
|
+
data = Object365(absolute_coord=False)
|
|
134
|
+
data.read_data("/home/wj/ai/mldata1/Objects365/Annotations/train/train.json","/home/wj/ai/mldata1/Objects365/Images/train/train")
|
|
135
|
+
idxs = list(range(len(data)))
|
|
136
|
+
random.shuffle(idxs)
|
|
137
|
+
max_nr = 100
|
|
138
|
+
idxs = idxs[:max_nr]
|
|
139
|
+
for idx in idxs:
|
|
140
|
+
x = data[idx]
|
|
141
|
+
full_path, shape,category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
|
|
142
|
+
img = wmli.imread(full_path)
|
|
143
|
+
|
|
144
|
+
def text_fn(classes, scores):
|
|
145
|
+
return data.id2name[classes]
|
|
146
|
+
|
|
147
|
+
img = odv.draw_bboxes(
|
|
148
|
+
img=img, classes=category_ids, scores=None, bboxes=boxes, color_fn=None,
|
|
149
|
+
text_fn=text_fn, thickness=2,
|
|
150
|
+
show_text=True,
|
|
151
|
+
font_scale=0.8)
|
|
152
|
+
save_path = osp.join(save_dir,osp.basename(full_path))
|
|
153
|
+
wmli.imwrite(save_path,img)
|
|
154
|
+
'''plt.figure()
|
|
155
|
+
plt.imshow(img)
|
|
156
|
+
plt.show()'''
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
from wml.iotoolkit.coco_toolkit import *
|
|
2
|
+
import os.path as osp
|
|
3
|
+
from .o365_to_coco import *
|
|
4
|
+
from .coco_data_fwd import ID_TO_TEXT
|
|
5
|
+
from functools import partial
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def trans_file_name(filename,image_dir):
|
|
9
|
+
'''
|
|
10
|
+
object365文件名使用了类似于:'images/v1/patch8/objects365_v1_00420917.jpg'格式,而实际存的为
|
|
11
|
+
data_root/train/pathch8/objects365_v1_00420917.jpg这种格式,这里仅保留最后的目录名及文件名,也就是输出
|
|
12
|
+
pathch8/objects365_v1_00420917.jpg
|
|
13
|
+
注: read_data的image_dir需要设置为data_root/train/
|
|
14
|
+
'''
|
|
15
|
+
names = filename.split("/")[-2:]
|
|
16
|
+
return osp.join(*names)
|
|
17
|
+
|
|
18
|
+
def trans_label2coco(id,fn=None):
|
|
19
|
+
if id in o365_id_to_coco_id:
|
|
20
|
+
id = o365_id_to_coco_id[id]
|
|
21
|
+
if fn is not None:
|
|
22
|
+
return fn(id)
|
|
23
|
+
else:
|
|
24
|
+
return id
|
|
25
|
+
else:
|
|
26
|
+
return None
|
|
27
|
+
|
|
28
|
+
class Object365V2(COCOData):
|
|
29
|
+
def __init__(self,is_relative_coordinate=False,trans2coco=False,remove_crowd=True,trans_label=None):
|
|
30
|
+
super().__init__(is_relative_coordinate=is_relative_coordinate,remove_crowd=remove_crowd,trans_label=trans_label,include_masks=False)
|
|
31
|
+
if trans2coco and trans_label is not None:
|
|
32
|
+
self.trans_label = partial(trans_label2coco,fn=trans_label)
|
|
33
|
+
elif trans2coco:
|
|
34
|
+
self.trans_label = trans_label2coco
|
|
35
|
+
elif trans_label is not None:
|
|
36
|
+
self.trans_label = trans_label
|
|
37
|
+
self.trans_file_name = trans_file_name
|
|
38
|
+
if trans2coco:
|
|
39
|
+
self.id2name = {}
|
|
40
|
+
for k,info in ID_TO_TEXT.items():
|
|
41
|
+
self.id2name[k] = info['name']
|
|
42
|
+
|
|
43
|
+
def read_data(self,annotations_file,image_dir=None):
|
|
44
|
+
if image_dir is None:
|
|
45
|
+
image_dir = osp.dirname(osp.abspath(annotations_file))
|
|
46
|
+
return super().read_data(annotations_file,image_dir)
|
|
47
|
+
|
|
48
|
+
class TorchObject365V2(Object365V2):
|
|
49
|
+
def __init__(self, img_dir, anno_path,trans2coco=False,trans_label=None):
|
|
50
|
+
super().__init__(is_relative_coordinate=False,trans2coco=trans2coco,trans_label=trans_label)
|
|
51
|
+
super().read_data(anno_path, img_dir)
|
|
52
|
+
|
|
53
|
+
def __getitem__(self, item):
|
|
54
|
+
x = super().__getitem__(item)
|
|
55
|
+
full_path, shape, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
|
|
56
|
+
try:
|
|
57
|
+
img = wmli.imread(full_path)
|
|
58
|
+
except Exception as e:
|
|
59
|
+
print(f"Read {full_path} faild, error:{e}")
|
|
60
|
+
img = np.zeros([shape[0],shape[1],3],dtype=np.uint8)
|
|
61
|
+
img = Image.fromarray(img)
|
|
62
|
+
res = []
|
|
63
|
+
nr = len(category_ids)
|
|
64
|
+
boxes = odb.npchangexyorder(boxes)
|
|
65
|
+
boxes[..., 2:] = boxes[..., 2:] - boxes[..., :2]
|
|
66
|
+
#new bboxes is [N,4], [x0,y0,w,h]
|
|
67
|
+
for i in range(nr):
|
|
68
|
+
item = {"bbox": boxes[i], "category_id": category_ids[i], "iscrowd": is_crowd[i],"area":area[i]}
|
|
69
|
+
res.append(item)
|
|
70
|
+
|
|
71
|
+
return img, res
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
#coding=utf-8
|
|
2
|
+
ID_TO_TEXT= {
|
|
3
|
+
1:'aeroplane',
|
|
4
|
+
2:'bicycle',
|
|
5
|
+
3:'bird',
|
|
6
|
+
4:'boat',
|
|
7
|
+
5:'bottle',
|
|
8
|
+
6:'bus',
|
|
9
|
+
7:'car',
|
|
10
|
+
8:'cat',
|
|
11
|
+
9:'chair',
|
|
12
|
+
10:'cow',
|
|
13
|
+
11:'diningtable',
|
|
14
|
+
12:'dog',
|
|
15
|
+
13:'horse',
|
|
16
|
+
14:'motorbike',
|
|
17
|
+
15:'person',
|
|
18
|
+
16:'pottedplant',
|
|
19
|
+
17:'sheep',
|
|
20
|
+
18:'sofa',
|
|
21
|
+
19:'train',
|
|
22
|
+
20:'tvmonitor'
|
|
23
|
+
}
|
|
24
|
+
TEXT_TO_ID = {
|
|
25
|
+
'aeroplane': 1,
|
|
26
|
+
'bicycle': 2,
|
|
27
|
+
'bird': 3,
|
|
28
|
+
'boat': 4,
|
|
29
|
+
'bottle': 5,
|
|
30
|
+
'bus': 6,
|
|
31
|
+
'car': 7,
|
|
32
|
+
'cat': 8,
|
|
33
|
+
'chair': 9,
|
|
34
|
+
'cow': 10,
|
|
35
|
+
'diningtable': 11,
|
|
36
|
+
'dog': 12,
|
|
37
|
+
'horse': 13,
|
|
38
|
+
'motorbike': 14,
|
|
39
|
+
'person': 15,
|
|
40
|
+
'pottedplant': 16,
|
|
41
|
+
'sheep': 17,
|
|
42
|
+
'sofa': 18,
|
|
43
|
+
'train': 19,
|
|
44
|
+
'tvmonitor':20
|
|
45
|
+
}
|
|
46
|
+
def get_id_to_textv2():
|
|
47
|
+
res = {}
|
|
48
|
+
for k,v in ID_TO_TEXT.items():
|
|
49
|
+
res[k] = {"name":v}
|
|
50
|
+
return res
|
|
51
|
+
|