python-wml 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of python-wml might be problematic. Click here for more details.

Files changed (164) hide show
  1. python_wml-3.0.0.dist-info/LICENSE +23 -0
  2. python_wml-3.0.0.dist-info/METADATA +51 -0
  3. python_wml-3.0.0.dist-info/RECORD +164 -0
  4. python_wml-3.0.0.dist-info/WHEEL +5 -0
  5. python_wml-3.0.0.dist-info/top_level.txt +1 -0
  6. wml/__init__.py +0 -0
  7. wml/basic_data_def/__init__.py +2 -0
  8. wml/basic_data_def/detection_data_def.py +279 -0
  9. wml/basic_data_def/io_data_def.py +2 -0
  10. wml/basic_img_utils.py +816 -0
  11. wml/img_patch.py +92 -0
  12. wml/img_utils.py +571 -0
  13. wml/iotoolkit/__init__.py +17 -0
  14. wml/iotoolkit/aic_keypoint.py +115 -0
  15. wml/iotoolkit/baidu_mask_toolkit.py +244 -0
  16. wml/iotoolkit/base_dataset.py +210 -0
  17. wml/iotoolkit/bboxes_statistics.py +515 -0
  18. wml/iotoolkit/build.py +0 -0
  19. wml/iotoolkit/cityscapes_toolkit.py +183 -0
  20. wml/iotoolkit/classification_data_statistics.py +25 -0
  21. wml/iotoolkit/coco_data_fwd.py +225 -0
  22. wml/iotoolkit/coco_keypoints.py +118 -0
  23. wml/iotoolkit/coco_keypoints_fmt2.py +103 -0
  24. wml/iotoolkit/coco_toolkit.py +397 -0
  25. wml/iotoolkit/coco_wholebody.py +269 -0
  26. wml/iotoolkit/common.py +108 -0
  27. wml/iotoolkit/crowd_pose.py +146 -0
  28. wml/iotoolkit/fast_labelme.py +110 -0
  29. wml/iotoolkit/image_folder.py +95 -0
  30. wml/iotoolkit/imgs_cache.py +58 -0
  31. wml/iotoolkit/imgs_reader_mt.py +73 -0
  32. wml/iotoolkit/labelme_base.py +102 -0
  33. wml/iotoolkit/labelme_json_to_img.py +49 -0
  34. wml/iotoolkit/labelme_toolkit.py +117 -0
  35. wml/iotoolkit/labelme_toolkit_fwd.py +733 -0
  36. wml/iotoolkit/labelmemckeypoints_dataset.py +169 -0
  37. wml/iotoolkit/lspet.py +48 -0
  38. wml/iotoolkit/mapillary_vistas_toolkit.py +269 -0
  39. wml/iotoolkit/mat_data.py +90 -0
  40. wml/iotoolkit/mckeypoints_statistics.py +28 -0
  41. wml/iotoolkit/mot_datasets.py +62 -0
  42. wml/iotoolkit/mpii.py +108 -0
  43. wml/iotoolkit/npmckeypoints_dataset.py +164 -0
  44. wml/iotoolkit/o365_to_coco.py +136 -0
  45. wml/iotoolkit/object365_toolkit.py +156 -0
  46. wml/iotoolkit/object365v2_toolkit.py +71 -0
  47. wml/iotoolkit/pascal_voc_data.py +51 -0
  48. wml/iotoolkit/pascal_voc_toolkit.py +194 -0
  49. wml/iotoolkit/pascal_voc_toolkit_fwd.py +473 -0
  50. wml/iotoolkit/penn_action.py +57 -0
  51. wml/iotoolkit/rawframe_dataset.py +129 -0
  52. wml/iotoolkit/rewrite_pascal_voc.py +28 -0
  53. wml/iotoolkit/semantic_data.py +49 -0
  54. wml/iotoolkit/split_file_by_type.py +29 -0
  55. wml/iotoolkit/sports_mot_datasets.py +78 -0
  56. wml/iotoolkit/vis_objectdetection_dataset.py +70 -0
  57. wml/iotoolkit/vis_torch_data.py +39 -0
  58. wml/iotoolkit/yolo_toolkit.py +38 -0
  59. wml/object_detection2/__init__.py +4 -0
  60. wml/object_detection2/basic_visualization.py +37 -0
  61. wml/object_detection2/bboxes.py +812 -0
  62. wml/object_detection2/data_process_toolkit.py +146 -0
  63. wml/object_detection2/keypoints.py +292 -0
  64. wml/object_detection2/mask.py +120 -0
  65. wml/object_detection2/metrics/__init__.py +3 -0
  66. wml/object_detection2/metrics/build.py +15 -0
  67. wml/object_detection2/metrics/classifier_toolkit.py +440 -0
  68. wml/object_detection2/metrics/common.py +71 -0
  69. wml/object_detection2/metrics/mckps_toolkit.py +338 -0
  70. wml/object_detection2/metrics/toolkit.py +1953 -0
  71. wml/object_detection2/npod_toolkit.py +361 -0
  72. wml/object_detection2/odtools.py +243 -0
  73. wml/object_detection2/standard_names.py +75 -0
  74. wml/object_detection2/visualization.py +956 -0
  75. wml/object_detection2/wmath.py +34 -0
  76. wml/semantic/__init__.py +0 -0
  77. wml/semantic/basic_toolkit.py +65 -0
  78. wml/semantic/mask_utils.py +156 -0
  79. wml/semantic/semantic_test.py +21 -0
  80. wml/semantic/structures.py +1 -0
  81. wml/semantic/toolkit.py +105 -0
  82. wml/semantic/visualization_utils.py +658 -0
  83. wml/threadtoolkit.py +50 -0
  84. wml/walgorithm.py +228 -0
  85. wml/wcollections.py +212 -0
  86. wml/wfilesystem.py +487 -0
  87. wml/wml_utils.py +657 -0
  88. wml/wstructures/__init__.py +4 -0
  89. wml/wstructures/common.py +9 -0
  90. wml/wstructures/keypoints_train_toolkit.py +149 -0
  91. wml/wstructures/kps_structures.py +579 -0
  92. wml/wstructures/mask_structures.py +1161 -0
  93. wml/wtorch/__init__.py +8 -0
  94. wml/wtorch/bboxes.py +104 -0
  95. wml/wtorch/classes_suppression.py +24 -0
  96. wml/wtorch/conv_module.py +181 -0
  97. wml/wtorch/conv_ws.py +144 -0
  98. wml/wtorch/data/__init__.py +16 -0
  99. wml/wtorch/data/_utils/__init__.py +45 -0
  100. wml/wtorch/data/_utils/collate.py +183 -0
  101. wml/wtorch/data/_utils/fetch.py +47 -0
  102. wml/wtorch/data/_utils/pin_memory.py +121 -0
  103. wml/wtorch/data/_utils/signal_handling.py +72 -0
  104. wml/wtorch/data/_utils/worker.py +227 -0
  105. wml/wtorch/data/base_data_loader_iter.py +93 -0
  106. wml/wtorch/data/dataloader.py +501 -0
  107. wml/wtorch/data/datapipes/__init__.py +1 -0
  108. wml/wtorch/data/datapipes/iter/__init__.py +12 -0
  109. wml/wtorch/data/datapipes/iter/batch.py +126 -0
  110. wml/wtorch/data/datapipes/iter/callable.py +92 -0
  111. wml/wtorch/data/datapipes/iter/listdirfiles.py +37 -0
  112. wml/wtorch/data/datapipes/iter/loadfilesfromdisk.py +30 -0
  113. wml/wtorch/data/datapipes/iter/readfilesfromtar.py +60 -0
  114. wml/wtorch/data/datapipes/iter/readfilesfromzip.py +63 -0
  115. wml/wtorch/data/datapipes/iter/sampler.py +94 -0
  116. wml/wtorch/data/datapipes/utils/__init__.py +0 -0
  117. wml/wtorch/data/datapipes/utils/common.py +65 -0
  118. wml/wtorch/data/dataset.py +354 -0
  119. wml/wtorch/data/datasets/__init__.py +4 -0
  120. wml/wtorch/data/datasets/common.py +53 -0
  121. wml/wtorch/data/datasets/listdirfilesdataset.py +36 -0
  122. wml/wtorch/data/datasets/loadfilesfromdiskdataset.py +30 -0
  123. wml/wtorch/data/distributed.py +135 -0
  124. wml/wtorch/data/multi_processing_data_loader_iter.py +866 -0
  125. wml/wtorch/data/sampler.py +267 -0
  126. wml/wtorch/data/single_process_data_loader_iter.py +24 -0
  127. wml/wtorch/data/test_data_loader.py +26 -0
  128. wml/wtorch/dataset_toolkit.py +67 -0
  129. wml/wtorch/depthwise_separable_conv_module.py +98 -0
  130. wml/wtorch/dist.py +591 -0
  131. wml/wtorch/dropblock/__init__.py +6 -0
  132. wml/wtorch/dropblock/dropblock.py +228 -0
  133. wml/wtorch/dropblock/dropout.py +40 -0
  134. wml/wtorch/dropblock/scheduler.py +48 -0
  135. wml/wtorch/ema.py +61 -0
  136. wml/wtorch/fc_module.py +73 -0
  137. wml/wtorch/functional.py +34 -0
  138. wml/wtorch/iter_dataset.py +26 -0
  139. wml/wtorch/loss.py +69 -0
  140. wml/wtorch/nets/__init__.py +0 -0
  141. wml/wtorch/nets/ckpt_toolkit.py +219 -0
  142. wml/wtorch/nets/fpn.py +276 -0
  143. wml/wtorch/nets/hrnet/__init__.py +0 -0
  144. wml/wtorch/nets/hrnet/config.py +2 -0
  145. wml/wtorch/nets/hrnet/hrnet.py +494 -0
  146. wml/wtorch/nets/misc.py +249 -0
  147. wml/wtorch/nets/resnet/__init__.py +0 -0
  148. wml/wtorch/nets/resnet/layers/__init__.py +17 -0
  149. wml/wtorch/nets/resnet/layers/aspp.py +144 -0
  150. wml/wtorch/nets/resnet/layers/batch_norm.py +231 -0
  151. wml/wtorch/nets/resnet/layers/blocks.py +111 -0
  152. wml/wtorch/nets/resnet/layers/wrappers.py +110 -0
  153. wml/wtorch/nets/resnet/r50_config.py +38 -0
  154. wml/wtorch/nets/resnet/resnet.py +691 -0
  155. wml/wtorch/nets/shape_spec.py +20 -0
  156. wml/wtorch/nets/simple_fpn.py +101 -0
  157. wml/wtorch/nms.py +109 -0
  158. wml/wtorch/nn.py +896 -0
  159. wml/wtorch/ocr_block.py +193 -0
  160. wml/wtorch/summary.py +331 -0
  161. wml/wtorch/train_toolkit.py +603 -0
  162. wml/wtorch/transformer_blocks.py +266 -0
  163. wml/wtorch/utils.py +719 -0
  164. wml/wtorch/wlr_scheduler.py +100 -0
@@ -0,0 +1,111 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ import fvcore.nn.weight_init as weight_init
5
+ from torch import nn
6
+
7
+ from .batch_norm import FrozenBatchNorm2d, get_norm
8
+ from .wrappers import Conv2d
9
+
10
+
11
+ """
12
+ CNN building blocks.
13
+ """
14
+
15
+
16
+ class CNNBlockBase(nn.Module):
17
+ """
18
+ A CNN block is assumed to have input channels, output channels and a stride.
19
+ The input and output of `forward()` method must be NCHW tensors.
20
+ The method can perform arbitrary computation but must match the given
21
+ channels and stride specification.
22
+
23
+ Attribute:
24
+ in_channels (int):
25
+ out_channels (int):
26
+ stride (int):
27
+ """
28
+
29
+ def __init__(self, in_channels, out_channels, stride):
30
+ """
31
+ The `__init__` method of any subclass should also contain these arguments.
32
+
33
+ Args:
34
+ in_channels (int):
35
+ out_channels (int):
36
+ stride (int):
37
+ """
38
+ super().__init__()
39
+ self.in_channels = in_channels
40
+ self.out_channels = out_channels
41
+ self.stride = stride
42
+
43
+ def freeze(self):
44
+ """
45
+ Make this block not trainable.
46
+ This method sets all parameters to `requires_grad=False`,
47
+ and convert all BatchNorm layers to FrozenBatchNorm
48
+
49
+ Returns:
50
+ the block itself
51
+ """
52
+ for p in self.parameters():
53
+ p.requires_grad = False
54
+ FrozenBatchNorm2d.convert_frozen_batchnorm(self)
55
+ return self
56
+
57
+
58
+ class DepthwiseSeparableConv2d(nn.Module):
59
+ """
60
+ A kxk depthwise convolution + a 1x1 convolution.
61
+
62
+ In :paper:`xception`, norm & activation are applied on the second conv.
63
+ :paper:`mobilenet` uses norm & activation on both convs.
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ in_channels,
69
+ out_channels,
70
+ kernel_size=3,
71
+ padding=1,
72
+ dilation=1,
73
+ *,
74
+ norm1=None,
75
+ activation1=None,
76
+ norm2=None,
77
+ activation2=None,
78
+ ):
79
+ """
80
+ Args:
81
+ norm1, norm2 (str or callable): normalization for the two conv layers.
82
+ activation1, activation2 (callable(Tensor) -> Tensor): activation
83
+ function for the two conv layers.
84
+ """
85
+ super().__init__()
86
+ self.depthwise = Conv2d(
87
+ in_channels,
88
+ in_channels,
89
+ kernel_size=kernel_size,
90
+ padding=padding,
91
+ dilation=dilation,
92
+ groups=in_channels,
93
+ bias=not norm1,
94
+ norm=get_norm(norm1, in_channels),
95
+ activation=activation1,
96
+ )
97
+ self.pointwise = Conv2d(
98
+ in_channels,
99
+ out_channels,
100
+ kernel_size=1,
101
+ bias=not norm2,
102
+ norm=get_norm(norm2, out_channels),
103
+ activation=activation2,
104
+ )
105
+
106
+ # default initialization
107
+ weight_init.c2_msra_fill(self.depthwise)
108
+ weight_init.c2_msra_fill(self.pointwise)
109
+
110
+ def forward(self, x):
111
+ return self.pointwise(self.depthwise(x))
@@ -0,0 +1,110 @@
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ """
3
+ Wrappers around on some nn functions, mainly to support empty tensors.
4
+
5
+ Ideally, add support directly in PyTorch to empty tensors in those functions.
6
+
7
+ These can be removed once https://github.com/pytorch/pytorch/issues/12013
8
+ is implemented
9
+ """
10
+
11
+ from typing import List
12
+ import torch
13
+ from torch.nn import functional as F
14
+
15
+
16
+ def cat(tensors: List[torch.Tensor], dim: int = 0):
17
+ """
18
+ Efficient version of torch.cat that avoids a copy if there is only a single element in a list
19
+ """
20
+ assert isinstance(tensors, (list, tuple))
21
+ if len(tensors) == 1:
22
+ return tensors[0]
23
+ return torch.cat(tensors, dim)
24
+
25
+
26
+ def cross_entropy(input, target, *, reduction="mean", **kwargs):
27
+ """
28
+ Same as `torch.nn.functional.cross_entropy`, but returns 0 (instead of nan)
29
+ for empty inputs.
30
+ """
31
+ if target.numel() == 0 and reduction == "mean":
32
+ return input.sum() * 0.0 # connect the gradient
33
+ return F.cross_entropy(input, target, **kwargs)
34
+
35
+
36
+ class _NewEmptyTensorOp(torch.autograd.Function):
37
+ @staticmethod
38
+ def forward(ctx, x, new_shape):
39
+ ctx.shape = x.shape
40
+ return x.new_empty(new_shape)
41
+
42
+ @staticmethod
43
+ def backward(ctx, grad):
44
+ shape = ctx.shape
45
+ return _NewEmptyTensorOp.apply(grad, shape), None
46
+
47
+
48
+ class Conv2d(torch.nn.Conv2d):
49
+ """
50
+ A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
51
+ """
52
+
53
+ def __init__(self, *args, **kwargs):
54
+ """
55
+ Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
56
+
57
+ Args:
58
+ norm (nn.Module, optional): a normalization layer
59
+ activation (callable(Tensor) -> Tensor): a callable activation function
60
+
61
+ It assumes that norm layer is used before activation.
62
+ """
63
+ norm = kwargs.pop("norm", None)
64
+ activation = kwargs.pop("activation", None)
65
+ super().__init__(*args, **kwargs)
66
+
67
+ self.norm = norm
68
+ self.activation = activation
69
+
70
+ def forward(self, x):
71
+ # torchscript does not support SyncBatchNorm yet
72
+ # https://github.com/pytorch/pytorch/issues/40507
73
+ # and we skip these codes in torchscript since:
74
+ # 1. currently we only support torchscript in evaluation mode
75
+ # 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or
76
+ # later version, `Conv2d` in these PyTorch versions has already supported empty inputs.
77
+ if not torch.jit.is_scripting():
78
+ if x.numel() == 0 and self.training:
79
+ # https://github.com/pytorch/pytorch/issues/12013
80
+ assert not isinstance(
81
+ self.norm, torch.nn.SyncBatchNorm
82
+ ), "SyncBatchNorm does not support empty inputs!"
83
+
84
+ x = F.conv2d(
85
+ x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups
86
+ )
87
+ if self.norm is not None:
88
+ x = self.norm(x)
89
+ if self.activation is not None:
90
+ x = self.activation(x)
91
+ return x
92
+
93
+
94
+ ConvTranspose2d = torch.nn.ConvTranspose2d
95
+ BatchNorm2d = torch.nn.BatchNorm2d
96
+ interpolate = F.interpolate
97
+ Linear = torch.nn.Linear
98
+
99
+
100
+ def nonzero_tuple(x):
101
+ """
102
+ A 'as_tuple=True' version of torch.nonzero to support torchscript.
103
+ because of https://github.com/pytorch/pytorch/issues/38718
104
+ """
105
+ if torch.jit.is_scripting():
106
+ if x.dim() == 0:
107
+ return x.unsqueeze(0).nonzero().unbind(1)
108
+ return x.nonzero().unbind(1)
109
+ else:
110
+ return x.nonzero(as_tuple=True)
@@ -0,0 +1,38 @@
1
+ from fvcore.common.config import CfgNode as CN
2
+ _C = CN()
3
+ _C.MODEL = CN()
4
+ _C.MODEL.RESNETS = CN()
5
+
6
+ _C.MODEL.RESNETS.DEPTH = 50
7
+ _C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone
8
+
9
+ # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
10
+ _C.MODEL.RESNETS.NUM_GROUPS = 1
11
+
12
+ # Options: FrozenBN, GN, "SyncBN", "BN"
13
+ _C.MODEL.RESNETS.NORM = "BN"
14
+
15
+ # Baseline width of each group.
16
+ # Scaling this parameters will scale the width of all bottleneck layers.
17
+ _C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
18
+
19
+ # Place the stride 2 conv on the 1x1 filter
20
+ # Use True only for the original MSRA ResNet; use False for C2 and Torch models
21
+ _C.MODEL.RESNETS.STRIDE_IN_1X1 = True
22
+
23
+ # Apply dilation in stage "res5"
24
+ _C.MODEL.RESNETS.RES5_DILATION = 1
25
+
26
+ # Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
27
+ # For R18 and R34, this needs to be set to 64
28
+ _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
29
+ _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
30
+
31
+ # Apply Deformable Convolution in stages
32
+ # Specify if apply deform_conv on Res2, Res3, Res4, Res5
33
+ _C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
34
+ # Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168);
35
+ # Use False for DeformableV1.
36
+ _C.MODEL.RESNETS.DEFORM_MODULATED = False
37
+ # Number of groups in deformable conv.
38
+ _C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1