python-wml 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of python-wml might be problematic. Click here for more details.
- python_wml-3.0.0.dist-info/LICENSE +23 -0
- python_wml-3.0.0.dist-info/METADATA +51 -0
- python_wml-3.0.0.dist-info/RECORD +164 -0
- python_wml-3.0.0.dist-info/WHEEL +5 -0
- python_wml-3.0.0.dist-info/top_level.txt +1 -0
- wml/__init__.py +0 -0
- wml/basic_data_def/__init__.py +2 -0
- wml/basic_data_def/detection_data_def.py +279 -0
- wml/basic_data_def/io_data_def.py +2 -0
- wml/basic_img_utils.py +816 -0
- wml/img_patch.py +92 -0
- wml/img_utils.py +571 -0
- wml/iotoolkit/__init__.py +17 -0
- wml/iotoolkit/aic_keypoint.py +115 -0
- wml/iotoolkit/baidu_mask_toolkit.py +244 -0
- wml/iotoolkit/base_dataset.py +210 -0
- wml/iotoolkit/bboxes_statistics.py +515 -0
- wml/iotoolkit/build.py +0 -0
- wml/iotoolkit/cityscapes_toolkit.py +183 -0
- wml/iotoolkit/classification_data_statistics.py +25 -0
- wml/iotoolkit/coco_data_fwd.py +225 -0
- wml/iotoolkit/coco_keypoints.py +118 -0
- wml/iotoolkit/coco_keypoints_fmt2.py +103 -0
- wml/iotoolkit/coco_toolkit.py +397 -0
- wml/iotoolkit/coco_wholebody.py +269 -0
- wml/iotoolkit/common.py +108 -0
- wml/iotoolkit/crowd_pose.py +146 -0
- wml/iotoolkit/fast_labelme.py +110 -0
- wml/iotoolkit/image_folder.py +95 -0
- wml/iotoolkit/imgs_cache.py +58 -0
- wml/iotoolkit/imgs_reader_mt.py +73 -0
- wml/iotoolkit/labelme_base.py +102 -0
- wml/iotoolkit/labelme_json_to_img.py +49 -0
- wml/iotoolkit/labelme_toolkit.py +117 -0
- wml/iotoolkit/labelme_toolkit_fwd.py +733 -0
- wml/iotoolkit/labelmemckeypoints_dataset.py +169 -0
- wml/iotoolkit/lspet.py +48 -0
- wml/iotoolkit/mapillary_vistas_toolkit.py +269 -0
- wml/iotoolkit/mat_data.py +90 -0
- wml/iotoolkit/mckeypoints_statistics.py +28 -0
- wml/iotoolkit/mot_datasets.py +62 -0
- wml/iotoolkit/mpii.py +108 -0
- wml/iotoolkit/npmckeypoints_dataset.py +164 -0
- wml/iotoolkit/o365_to_coco.py +136 -0
- wml/iotoolkit/object365_toolkit.py +156 -0
- wml/iotoolkit/object365v2_toolkit.py +71 -0
- wml/iotoolkit/pascal_voc_data.py +51 -0
- wml/iotoolkit/pascal_voc_toolkit.py +194 -0
- wml/iotoolkit/pascal_voc_toolkit_fwd.py +473 -0
- wml/iotoolkit/penn_action.py +57 -0
- wml/iotoolkit/rawframe_dataset.py +129 -0
- wml/iotoolkit/rewrite_pascal_voc.py +28 -0
- wml/iotoolkit/semantic_data.py +49 -0
- wml/iotoolkit/split_file_by_type.py +29 -0
- wml/iotoolkit/sports_mot_datasets.py +78 -0
- wml/iotoolkit/vis_objectdetection_dataset.py +70 -0
- wml/iotoolkit/vis_torch_data.py +39 -0
- wml/iotoolkit/yolo_toolkit.py +38 -0
- wml/object_detection2/__init__.py +4 -0
- wml/object_detection2/basic_visualization.py +37 -0
- wml/object_detection2/bboxes.py +812 -0
- wml/object_detection2/data_process_toolkit.py +146 -0
- wml/object_detection2/keypoints.py +292 -0
- wml/object_detection2/mask.py +120 -0
- wml/object_detection2/metrics/__init__.py +3 -0
- wml/object_detection2/metrics/build.py +15 -0
- wml/object_detection2/metrics/classifier_toolkit.py +440 -0
- wml/object_detection2/metrics/common.py +71 -0
- wml/object_detection2/metrics/mckps_toolkit.py +338 -0
- wml/object_detection2/metrics/toolkit.py +1953 -0
- wml/object_detection2/npod_toolkit.py +361 -0
- wml/object_detection2/odtools.py +243 -0
- wml/object_detection2/standard_names.py +75 -0
- wml/object_detection2/visualization.py +956 -0
- wml/object_detection2/wmath.py +34 -0
- wml/semantic/__init__.py +0 -0
- wml/semantic/basic_toolkit.py +65 -0
- wml/semantic/mask_utils.py +156 -0
- wml/semantic/semantic_test.py +21 -0
- wml/semantic/structures.py +1 -0
- wml/semantic/toolkit.py +105 -0
- wml/semantic/visualization_utils.py +658 -0
- wml/threadtoolkit.py +50 -0
- wml/walgorithm.py +228 -0
- wml/wcollections.py +212 -0
- wml/wfilesystem.py +487 -0
- wml/wml_utils.py +657 -0
- wml/wstructures/__init__.py +4 -0
- wml/wstructures/common.py +9 -0
- wml/wstructures/keypoints_train_toolkit.py +149 -0
- wml/wstructures/kps_structures.py +579 -0
- wml/wstructures/mask_structures.py +1161 -0
- wml/wtorch/__init__.py +8 -0
- wml/wtorch/bboxes.py +104 -0
- wml/wtorch/classes_suppression.py +24 -0
- wml/wtorch/conv_module.py +181 -0
- wml/wtorch/conv_ws.py +144 -0
- wml/wtorch/data/__init__.py +16 -0
- wml/wtorch/data/_utils/__init__.py +45 -0
- wml/wtorch/data/_utils/collate.py +183 -0
- wml/wtorch/data/_utils/fetch.py +47 -0
- wml/wtorch/data/_utils/pin_memory.py +121 -0
- wml/wtorch/data/_utils/signal_handling.py +72 -0
- wml/wtorch/data/_utils/worker.py +227 -0
- wml/wtorch/data/base_data_loader_iter.py +93 -0
- wml/wtorch/data/dataloader.py +501 -0
- wml/wtorch/data/datapipes/__init__.py +1 -0
- wml/wtorch/data/datapipes/iter/__init__.py +12 -0
- wml/wtorch/data/datapipes/iter/batch.py +126 -0
- wml/wtorch/data/datapipes/iter/callable.py +92 -0
- wml/wtorch/data/datapipes/iter/listdirfiles.py +37 -0
- wml/wtorch/data/datapipes/iter/loadfilesfromdisk.py +30 -0
- wml/wtorch/data/datapipes/iter/readfilesfromtar.py +60 -0
- wml/wtorch/data/datapipes/iter/readfilesfromzip.py +63 -0
- wml/wtorch/data/datapipes/iter/sampler.py +94 -0
- wml/wtorch/data/datapipes/utils/__init__.py +0 -0
- wml/wtorch/data/datapipes/utils/common.py +65 -0
- wml/wtorch/data/dataset.py +354 -0
- wml/wtorch/data/datasets/__init__.py +4 -0
- wml/wtorch/data/datasets/common.py +53 -0
- wml/wtorch/data/datasets/listdirfilesdataset.py +36 -0
- wml/wtorch/data/datasets/loadfilesfromdiskdataset.py +30 -0
- wml/wtorch/data/distributed.py +135 -0
- wml/wtorch/data/multi_processing_data_loader_iter.py +866 -0
- wml/wtorch/data/sampler.py +267 -0
- wml/wtorch/data/single_process_data_loader_iter.py +24 -0
- wml/wtorch/data/test_data_loader.py +26 -0
- wml/wtorch/dataset_toolkit.py +67 -0
- wml/wtorch/depthwise_separable_conv_module.py +98 -0
- wml/wtorch/dist.py +591 -0
- wml/wtorch/dropblock/__init__.py +6 -0
- wml/wtorch/dropblock/dropblock.py +228 -0
- wml/wtorch/dropblock/dropout.py +40 -0
- wml/wtorch/dropblock/scheduler.py +48 -0
- wml/wtorch/ema.py +61 -0
- wml/wtorch/fc_module.py +73 -0
- wml/wtorch/functional.py +34 -0
- wml/wtorch/iter_dataset.py +26 -0
- wml/wtorch/loss.py +69 -0
- wml/wtorch/nets/__init__.py +0 -0
- wml/wtorch/nets/ckpt_toolkit.py +219 -0
- wml/wtorch/nets/fpn.py +276 -0
- wml/wtorch/nets/hrnet/__init__.py +0 -0
- wml/wtorch/nets/hrnet/config.py +2 -0
- wml/wtorch/nets/hrnet/hrnet.py +494 -0
- wml/wtorch/nets/misc.py +249 -0
- wml/wtorch/nets/resnet/__init__.py +0 -0
- wml/wtorch/nets/resnet/layers/__init__.py +17 -0
- wml/wtorch/nets/resnet/layers/aspp.py +144 -0
- wml/wtorch/nets/resnet/layers/batch_norm.py +231 -0
- wml/wtorch/nets/resnet/layers/blocks.py +111 -0
- wml/wtorch/nets/resnet/layers/wrappers.py +110 -0
- wml/wtorch/nets/resnet/r50_config.py +38 -0
- wml/wtorch/nets/resnet/resnet.py +691 -0
- wml/wtorch/nets/shape_spec.py +20 -0
- wml/wtorch/nets/simple_fpn.py +101 -0
- wml/wtorch/nms.py +109 -0
- wml/wtorch/nn.py +896 -0
- wml/wtorch/ocr_block.py +193 -0
- wml/wtorch/summary.py +331 -0
- wml/wtorch/train_toolkit.py +603 -0
- wml/wtorch/transformer_blocks.py +266 -0
- wml/wtorch/utils.py +719 -0
- wml/wtorch/wlr_scheduler.py +100 -0
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
|
2
|
+
import math
|
|
3
|
+
import fvcore.nn.weight_init as weight_init
|
|
4
|
+
import torch
|
|
5
|
+
import torch.nn.functional as F
|
|
6
|
+
from torch import nn
|
|
7
|
+
from wml.wtorch.nn import Conv2d
|
|
8
|
+
from wml.wtorch.nn import get_norm
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SimpleFPN(torch.nn.Module):
|
|
12
|
+
"""
|
|
13
|
+
This module implements :paper:`FPN`.
|
|
14
|
+
It creates pyramid features built on top of some input feature maps.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
_fuse_type: torch.jit.Final[str]
|
|
18
|
+
|
|
19
|
+
def __init__( self, in_channels, out_channels, norm="", fuse_type="sum",interpolate_mode="nearest"
|
|
20
|
+
):
|
|
21
|
+
"""
|
|
22
|
+
Args:
|
|
23
|
+
out_channels (int): number of channels in the output feature maps.
|
|
24
|
+
norm (str): the normalization to use.
|
|
25
|
+
fuse_type (str): types for fusing the top down features and the lateral
|
|
26
|
+
ones. It can be "sum" (default), which sums up element-wise; or "avg",
|
|
27
|
+
which takes the element-wise mean of the two.
|
|
28
|
+
"""
|
|
29
|
+
super().__init__()
|
|
30
|
+
|
|
31
|
+
# Feature map strides and channels from the bottom up network (e.g. ResNet)
|
|
32
|
+
in_channels_per_feature = in_channels
|
|
33
|
+
self.interpolate_mode = interpolate_mode
|
|
34
|
+
|
|
35
|
+
lateral_convs = []
|
|
36
|
+
output_convs = []
|
|
37
|
+
|
|
38
|
+
use_bias = norm == ""
|
|
39
|
+
for idx, in_channels in enumerate(in_channels_per_feature):
|
|
40
|
+
lateral_norm = get_norm(norm, out_channels)
|
|
41
|
+
output_norm = get_norm(norm, out_channels)
|
|
42
|
+
|
|
43
|
+
lateral_conv = Conv2d(
|
|
44
|
+
in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
|
|
45
|
+
)
|
|
46
|
+
output_conv = Conv2d(
|
|
47
|
+
out_channels,
|
|
48
|
+
out_channels,
|
|
49
|
+
kernel_size=3,
|
|
50
|
+
stride=1,
|
|
51
|
+
padding=1,
|
|
52
|
+
bias=use_bias,
|
|
53
|
+
norm=output_norm,
|
|
54
|
+
)
|
|
55
|
+
weight_init.c2_xavier_fill(lateral_conv)
|
|
56
|
+
weight_init.c2_xavier_fill(output_conv)
|
|
57
|
+
self.add_module("fpn_lateral{}".format(idx), lateral_conv)
|
|
58
|
+
self.add_module("fpn_output{}".format(idx), output_conv)
|
|
59
|
+
|
|
60
|
+
lateral_convs.append(lateral_conv)
|
|
61
|
+
output_convs.append(output_conv)
|
|
62
|
+
# Place convs into top-down order (from low to high resolution)
|
|
63
|
+
# to make the top-down computation in forward clearer.
|
|
64
|
+
self.lateral_convs = lateral_convs[::-1]
|
|
65
|
+
self.output_convs = output_convs[::-1]
|
|
66
|
+
# Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
|
|
67
|
+
|
|
68
|
+
assert fuse_type in {"avg", "sum"}
|
|
69
|
+
self._fuse_type = fuse_type
|
|
70
|
+
|
|
71
|
+
def forward(self, xs):
|
|
72
|
+
"""
|
|
73
|
+
Args:
|
|
74
|
+
xs: list of tensors, shape like [H,W],[H//2,W//2],[H//4,W//4],...
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
list of tensors
|
|
78
|
+
"""
|
|
79
|
+
results = []
|
|
80
|
+
prev_features = self.lateral_convs[0](xs[-1])
|
|
81
|
+
results.append(self.output_convs[0](prev_features))
|
|
82
|
+
|
|
83
|
+
# Reverse feature maps into top-down order (from low to high resolution)
|
|
84
|
+
for idx, (lateral_conv, output_conv) in enumerate(
|
|
85
|
+
zip(self.lateral_convs, self.output_convs)
|
|
86
|
+
):
|
|
87
|
+
# Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336
|
|
88
|
+
# Therefore we loop over all modules but skip the first one
|
|
89
|
+
if idx > 0:
|
|
90
|
+
features = xs[-idx - 1]
|
|
91
|
+
if self.interpolate_mode == "nearest":
|
|
92
|
+
top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest")
|
|
93
|
+
else:
|
|
94
|
+
top_down_features = F.interpolate(prev_features, size=features.shape[-2:],mode="bilinear")
|
|
95
|
+
lateral_features = lateral_conv(features)
|
|
96
|
+
prev_features = lateral_features + top_down_features
|
|
97
|
+
if self._fuse_type == "avg":
|
|
98
|
+
prev_features /= 2
|
|
99
|
+
results.insert(0, output_conv(prev_features))
|
|
100
|
+
|
|
101
|
+
return results
|
wml/wtorch/nms.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import numpy as np
|
|
3
|
+
import torch
|
|
4
|
+
import torchvision
|
|
5
|
+
from typing import Tuple
|
|
6
|
+
from torch import Tensor
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def __soft_nms(dets, box_scores, thresh=0.001, sigma=0.5):
|
|
10
|
+
"""
|
|
11
|
+
Build a pytorch implement of Soft NMS algorithm.
|
|
12
|
+
# Augments
|
|
13
|
+
dets: boxes coordinate tensor (format:[y1, x1, y2, x2])
|
|
14
|
+
box_scores: box score tensors
|
|
15
|
+
sigma: variance of Gaussian function, decay=exp(-(iou*iou)/sigma), smaller sigma, decay faster.
|
|
16
|
+
thresh: score thresh
|
|
17
|
+
cuda: CUDA flag
|
|
18
|
+
# Return
|
|
19
|
+
the index of the selected boxes
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
# Indexes concatenate boxes with the last column
|
|
23
|
+
N = dets.shape[0]
|
|
24
|
+
device = dets.device
|
|
25
|
+
indexes = torch.arange(0, N, dtype=torch.float).view(N, 1)
|
|
26
|
+
indexes = indexes.to(device)
|
|
27
|
+
dets = torch.cat((dets, indexes), dim=1)
|
|
28
|
+
|
|
29
|
+
# The order of boxes coordinate is [y1,x1,y2,x2]
|
|
30
|
+
y1 = dets[:, 0]
|
|
31
|
+
x1 = dets[:, 1]
|
|
32
|
+
y2 = dets[:, 2]
|
|
33
|
+
x2 = dets[:, 3]
|
|
34
|
+
scores = box_scores
|
|
35
|
+
areas = (x2 - x1) * (y2 - y1)
|
|
36
|
+
|
|
37
|
+
for i in range(N):
|
|
38
|
+
# intermediate parameters for later parameters exchange
|
|
39
|
+
tscore = scores[i].clone()
|
|
40
|
+
pos = i + 1
|
|
41
|
+
|
|
42
|
+
if i != N - 1:
|
|
43
|
+
maxscore, maxpos = torch.max(scores[pos:], dim=0)
|
|
44
|
+
if tscore < maxscore:
|
|
45
|
+
dets[i], dets[maxpos + i + 1] = dets[maxpos + i + 1].clone(), dets[i].clone()
|
|
46
|
+
scores[i], scores[maxpos + i + 1] = scores[maxpos + i + 1].clone(), scores[i].clone()
|
|
47
|
+
areas[i], areas[maxpos + i + 1] = areas[maxpos + i + 1].clone(), areas[i].clone()
|
|
48
|
+
|
|
49
|
+
# IoU calculate
|
|
50
|
+
yy1 = torch.maximum(dets[i, 0][None], dets[pos:, 0])
|
|
51
|
+
xx1 = torch.maximum(dets[i, 1][None], dets[pos:, 1])
|
|
52
|
+
yy2 = torch.minimum(dets[i, 2][None], dets[pos:, 2])
|
|
53
|
+
xx2 = torch.minimum(dets[i, 3][None], dets[pos:, 3])
|
|
54
|
+
|
|
55
|
+
zeros = torch.zeros_like(yy1)
|
|
56
|
+
w = torch.maximum(zeros, xx2 - xx1)
|
|
57
|
+
h = torch.maximum(zeros, yy2 - yy1)
|
|
58
|
+
inter = w * h
|
|
59
|
+
ious = torch.div(inter, (areas[i] + areas[pos:] - inter))
|
|
60
|
+
|
|
61
|
+
# Gaussian decay
|
|
62
|
+
weight = torch.exp(-(ious* ious) / sigma)
|
|
63
|
+
scores[pos:] = weight * scores[pos:]
|
|
64
|
+
|
|
65
|
+
# select the boxes and keep the corresponding indexes
|
|
66
|
+
valid_mask = scores>thresh
|
|
67
|
+
keep = dets[:, 4][valid_mask].long()
|
|
68
|
+
valid_scores = scores[valid_mask]
|
|
69
|
+
|
|
70
|
+
return keep,valid_scores
|
|
71
|
+
|
|
72
|
+
def soft_nms(dets, box_scores, thresh=0.001, sigma=0.5):
|
|
73
|
+
keep,_ = __soft_nms(dets,box_scores,thresh=thresh,sigma=sigma)
|
|
74
|
+
return keep
|
|
75
|
+
|
|
76
|
+
def soft_nmsv2(dets, box_scores, thresh=0.001, sigma=0.5, cuda=0):
|
|
77
|
+
keep,scores = __soft_nms(dets,box_scores,thresh=thresh,sigma=sigma)
|
|
78
|
+
|
|
79
|
+
return keep,scores
|
|
80
|
+
|
|
81
|
+
def group_nms(bboxes,scores,ids,nms_threshold:float=0.7,max_value:float=20000.0):
|
|
82
|
+
'''
|
|
83
|
+
boxes (Tensor[N, 4])): boxes to perform NMS on. They
|
|
84
|
+
are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and
|
|
85
|
+
``0 <= y1 < y2``.
|
|
86
|
+
'''
|
|
87
|
+
if max_value is None:
|
|
88
|
+
max_value = torch.max(bboxes)+1.0
|
|
89
|
+
bboxes = bboxes.float()
|
|
90
|
+
tmp_bboxes = bboxes+ids[:,None].to(bboxes.dtype)*max_value
|
|
91
|
+
idxs = torchvision.ops.nms(tmp_bboxes,scores,nms_threshold)
|
|
92
|
+
return idxs
|
|
93
|
+
|
|
94
|
+
def nms(bboxes:Tensor,scores:Tensor,labels:Tensor,nms_threshold:float=0.5,max_num:int=1000)->Tuple[Tensor,Tensor,Tensor]:
|
|
95
|
+
"""
|
|
96
|
+
boxes (Tensor[N, 4])): boxes to perform NMS on. They
|
|
97
|
+
are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and
|
|
98
|
+
``0 <= y1 < y2``.
|
|
99
|
+
"""
|
|
100
|
+
#if bboxes.numel() == 0:
|
|
101
|
+
#return bboxes.new_zeros([0,5]), labels,labels.new_zeros([0]).to(torch.int64)
|
|
102
|
+
|
|
103
|
+
keep = torchvision.ops.nms(bboxes,scores,iou_threshold=nms_threshold)
|
|
104
|
+
if max_num>0:
|
|
105
|
+
keep = keep[:max_num]
|
|
106
|
+
|
|
107
|
+
dets = torch.cat([bboxes[keep],scores[keep].unsqueeze(-1)],dim=-1)
|
|
108
|
+
|
|
109
|
+
return dets,labels[keep],keep.to(torch.int64)
|