ultralytics-thop 0.2.7__tar.gz → 0.2.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/PKG-INFO +7 -3
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/README.md +6 -2
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/tests/test_matmul.py +1 -1
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/tests/test_relu.py +1 -2
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/thop/__init__.py +1 -1
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/thop/fx_profile.py +5 -8
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/thop/vision/basic_hooks.py +1 -10
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/ultralytics_thop.egg-info/PKG-INFO +7 -3
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/LICENSE +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/pyproject.toml +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/setup.cfg +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/tests/test_conv2d.py +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/tests/test_utils.py +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/thop/profile.py +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/thop/rnn_hooks.py +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/thop/utils.py +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/thop/vision/__init__.py +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/thop/vision/calc_func.py +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/ultralytics_thop.egg-info/SOURCES.txt +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/ultralytics_thop.egg-info/dependency_links.txt +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/ultralytics_thop.egg-info/requires.txt +0 -0
 - {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/ultralytics_thop.egg-info/top_level.txt +0 -0
 
| 
         @@ -1,6 +1,6 @@ 
     | 
|
| 
       1 
1 
     | 
    
         
             
            Metadata-Version: 2.1
         
     | 
| 
       2 
2 
     | 
    
         
             
            Name: ultralytics-thop
         
     | 
| 
       3 
     | 
    
         
            -
            Version: 0.2. 
     | 
| 
      
 3 
     | 
    
         
            +
            Version: 0.2.9
         
     | 
| 
       4 
4 
     | 
    
         
             
            Summary: Ultralytics THOP package for fast computation of PyTorch model FLOPs and parameters.
         
     | 
| 
       5 
5 
     | 
    
         
             
            Author-email: Ligeng Zhu <ligeng.zhu+github@gmail.com>
         
     | 
| 
       6 
6 
     | 
    
         
             
            Maintainer: Glenn Jocher
         
     | 
| 
         @@ -74,7 +74,7 @@ import torch 
     | 
|
| 
       74 
74 
     | 
    
         | 
| 
       75 
75 
     | 
    
         
             
            model = resnet50()
         
     | 
| 
       76 
76 
     | 
    
         
             
            input = torch.randn(1, 3, 224, 224)
         
     | 
| 
       77 
     | 
    
         
            -
            macs, params = profile(model, inputs=(input, 
     | 
| 
      
 77 
     | 
    
         
            +
            macs, params = profile(model, inputs=(input,))
         
     | 
| 
       78 
78 
     | 
    
         
             
            ```
         
     | 
| 
       79 
79 
     | 
    
         | 
| 
       80 
80 
     | 
    
         
             
            ### Define Custom Rules for Third-Party Modules
         
     | 
| 
         @@ -84,16 +84,19 @@ You can define custom rules for unsupported modules: 
     | 
|
| 
       84 
84 
     | 
    
         
             
            ```python
         
     | 
| 
       85 
85 
     | 
    
         
             
            import torch.nn as nn
         
     | 
| 
       86 
86 
     | 
    
         | 
| 
      
 87 
     | 
    
         
            +
             
     | 
| 
       87 
88 
     | 
    
         
             
            class YourModule(nn.Module):
         
     | 
| 
       88 
89 
     | 
    
         
             
                # your definition
         
     | 
| 
       89 
90 
     | 
    
         
             
                pass
         
     | 
| 
       90 
91 
     | 
    
         | 
| 
      
 92 
     | 
    
         
            +
             
     | 
| 
       91 
93 
     | 
    
         
             
            def count_your_model(model, x, y):
         
     | 
| 
       92 
94 
     | 
    
         
             
                # your rule here
         
     | 
| 
       93 
95 
     | 
    
         
             
                pass
         
     | 
| 
       94 
96 
     | 
    
         | 
| 
      
 97 
     | 
    
         
            +
             
     | 
| 
       95 
98 
     | 
    
         
             
            input = torch.randn(1, 3, 224, 224)
         
     | 
| 
       96 
     | 
    
         
            -
            macs, params = profile(model, inputs=(input, 
     | 
| 
      
 99 
     | 
    
         
            +
            macs, params = profile(model, inputs=(input,), custom_ops={YourModule: count_your_model})
         
     | 
| 
       97 
100 
     | 
    
         
             
            ```
         
     | 
| 
       98 
101 
     | 
    
         | 
| 
       99 
102 
     | 
    
         
             
            ### Improve Output Readability
         
     | 
| 
         @@ -102,6 +105,7 @@ Use `thop.clever_format` for a more readable output: 
     | 
|
| 
       102 
105 
     | 
    
         | 
| 
       103 
106 
     | 
    
         
             
            ```python
         
     | 
| 
       104 
107 
     | 
    
         
             
            from thop import clever_format
         
     | 
| 
      
 108 
     | 
    
         
            +
             
     | 
| 
       105 
109 
     | 
    
         
             
            macs, params = clever_format([macs, params], "%.3f")
         
     | 
| 
       106 
110 
     | 
    
         
             
            ```
         
     | 
| 
       107 
111 
     | 
    
         | 
| 
         @@ -40,7 +40,7 @@ import torch 
     | 
|
| 
       40 
40 
     | 
    
         | 
| 
       41 
41 
     | 
    
         
             
            model = resnet50()
         
     | 
| 
       42 
42 
     | 
    
         
             
            input = torch.randn(1, 3, 224, 224)
         
     | 
| 
       43 
     | 
    
         
            -
            macs, params = profile(model, inputs=(input, 
     | 
| 
      
 43 
     | 
    
         
            +
            macs, params = profile(model, inputs=(input,))
         
     | 
| 
       44 
44 
     | 
    
         
             
            ```
         
     | 
| 
       45 
45 
     | 
    
         | 
| 
       46 
46 
     | 
    
         
             
            ### Define Custom Rules for Third-Party Modules
         
     | 
| 
         @@ -50,16 +50,19 @@ You can define custom rules for unsupported modules: 
     | 
|
| 
       50 
50 
     | 
    
         
             
            ```python
         
     | 
| 
       51 
51 
     | 
    
         
             
            import torch.nn as nn
         
     | 
| 
       52 
52 
     | 
    
         | 
| 
      
 53 
     | 
    
         
            +
             
     | 
| 
       53 
54 
     | 
    
         
             
            class YourModule(nn.Module):
         
     | 
| 
       54 
55 
     | 
    
         
             
                # your definition
         
     | 
| 
       55 
56 
     | 
    
         
             
                pass
         
     | 
| 
       56 
57 
     | 
    
         | 
| 
      
 58 
     | 
    
         
            +
             
     | 
| 
       57 
59 
     | 
    
         
             
            def count_your_model(model, x, y):
         
     | 
| 
       58 
60 
     | 
    
         
             
                # your rule here
         
     | 
| 
       59 
61 
     | 
    
         
             
                pass
         
     | 
| 
       60 
62 
     | 
    
         | 
| 
      
 63 
     | 
    
         
            +
             
     | 
| 
       61 
64 
     | 
    
         
             
            input = torch.randn(1, 3, 224, 224)
         
     | 
| 
       62 
     | 
    
         
            -
            macs, params = profile(model, inputs=(input, 
     | 
| 
      
 65 
     | 
    
         
            +
            macs, params = profile(model, inputs=(input,), custom_ops={YourModule: count_your_model})
         
     | 
| 
       63 
66 
     | 
    
         
             
            ```
         
     | 
| 
       64 
67 
     | 
    
         | 
| 
       65 
68 
     | 
    
         
             
            ### Improve Output Readability
         
     | 
| 
         @@ -68,6 +71,7 @@ Use `thop.clever_format` for a more readable output: 
     | 
|
| 
       68 
71 
     | 
    
         | 
| 
       69 
72 
     | 
    
         
             
            ```python
         
     | 
| 
       70 
73 
     | 
    
         
             
            from thop import clever_format
         
     | 
| 
      
 74 
     | 
    
         
            +
             
     | 
| 
       71 
75 
     | 
    
         
             
            macs, params = clever_format([macs, params], "%.3f")
         
     | 
| 
       72 
76 
     | 
    
         
             
            ```
         
     | 
| 
       73 
77 
     | 
    
         | 
| 
         @@ -15,7 +15,7 @@ class TestUtils: 
     | 
|
| 
       15 
15 
     | 
    
         | 
| 
       16 
16 
     | 
    
         
             
                def test_matmul_case2(self):
         
     | 
| 
       17 
17 
     | 
    
         
             
                    """Tests matrix multiplication to assert FLOPs and parameters of nn.Linear layer using random dimensions."""
         
     | 
| 
       18 
     | 
    
         
            -
                    for  
     | 
| 
      
 18 
     | 
    
         
            +
                    for _ in range(10):
         
     | 
| 
       19 
19 
     | 
    
         
             
                        n, in_c, out_c = torch.randint(1, 500, (3,)).tolist()
         
     | 
| 
       20 
20 
     | 
    
         
             
                        net = nn.Linear(in_c, out_c)
         
     | 
| 
       21 
21 
     | 
    
         
             
                        flops, params = profile(net, inputs=(torch.randn(n, in_c),))
         
     | 
| 
         @@ -9,8 +9,7 @@ class TestUtils: 
     | 
|
| 
       9 
9 
     | 
    
         
             
                    """Tests the ReLU activation function to ensure it has zero FLOPs and checks parameter count using THOP
         
     | 
| 
       10 
10 
     | 
    
         
             
                    profiling.
         
     | 
| 
       11 
11 
     | 
    
         
             
                    """
         
     | 
| 
       12 
     | 
    
         
            -
                    n, in_c,  
     | 
| 
       13 
     | 
    
         
            -
                    data = torch.randn(n, in_c)
         
     | 
| 
      
 12 
     | 
    
         
            +
                    n, in_c, _out_c = 1, 100, 200
         
     | 
| 
       14 
13 
     | 
    
         
             
                    net = nn.ReLU()
         
     | 
| 
       15 
14 
     | 
    
         
             
                    flops, params = profile(net, inputs=(torch.randn(n, in_c),))
         
     | 
| 
       16 
15 
     | 
    
         
             
                    print(flops, params)
         
     | 
| 
         @@ -33,10 +33,10 @@ def count_matmul(input_shapes, output_shapes): 
     | 
|
| 
       33 
33 
     | 
    
         | 
| 
       34 
34 
     | 
    
         
             
            def count_fn_linear(input_shapes, output_shapes, *args, **kwargs):
         
     | 
| 
       35 
35 
     | 
    
         
             
                """Calculates total operations (FLOPs) for a linear layer given input and output shapes."""
         
     | 
| 
       36 
     | 
    
         
            -
                 
     | 
| 
      
 36 
     | 
    
         
            +
                flops = count_matmul(input_shapes, output_shapes)
         
     | 
| 
       37 
37 
     | 
    
         
             
                if "bias" in kwargs:
         
     | 
| 
       38 
     | 
    
         
            -
                     
     | 
| 
       39 
     | 
    
         
            -
                return  
     | 
| 
      
 38 
     | 
    
         
            +
                    flops += output_shapes[0].numel()
         
     | 
| 
      
 39 
     | 
    
         
            +
                return flops
         
     | 
| 
       40 
40 
     | 
    
         | 
| 
       41 
41 
     | 
    
         | 
| 
       42 
42 
     | 
    
         
             
            from .vision.calc_func import calculate_conv
         
     | 
| 
         @@ -131,7 +131,6 @@ def fx_profile(mod: nn.Module, input: th.Tensor, verbose=False): 
     | 
|
| 
       131 
131 
     | 
    
         
             
                information if verbose.
         
     | 
| 
       132 
132 
     | 
    
         
             
                """
         
     | 
| 
       133 
133 
     | 
    
         
             
                gm: torch.fx.GraphModule = symbolic_trace(mod)
         
     | 
| 
       134 
     | 
    
         
            -
                g = gm.graph
         
     | 
| 
       135 
134 
     | 
    
         
             
                ShapeProp(gm).propagate(input)
         
     | 
| 
       136 
135 
     | 
    
         | 
| 
       137 
136 
     | 
    
         
             
                fprint = null_print
         
     | 
| 
         @@ -148,7 +147,6 @@ def fx_profile(mod: nn.Module, input: th.Tensor, verbose=False): 
     | 
|
| 
       148 
147 
     | 
    
         
             
                    node_flops = None
         
     | 
| 
       149 
148 
     | 
    
         | 
| 
       150 
149 
     | 
    
         
             
                    input_shapes = []
         
     | 
| 
       151 
     | 
    
         
            -
                    output_shapes = []
         
     | 
| 
       152 
150 
     | 
    
         
             
                    fprint("input_shape:", end="\t")
         
     | 
| 
       153 
151 
     | 
    
         
             
                    for arg in node.args:
         
     | 
| 
       154 
152 
     | 
    
         
             
                        if str(arg) not in v_maps:
         
     | 
| 
         @@ -157,8 +155,7 @@ def fx_profile(mod: nn.Module, input: th.Tensor, verbose=False): 
     | 
|
| 
       157 
155 
     | 
    
         
             
                        input_shapes.append(v_maps[str(arg)])
         
     | 
| 
       158 
156 
     | 
    
         
             
                    fprint()
         
     | 
| 
       159 
157 
     | 
    
         
             
                    fprint(f"output_shape:\t{node.meta['tensor_meta'].shape}")
         
     | 
| 
       160 
     | 
    
         
            -
                    output_shapes 
     | 
| 
       161 
     | 
    
         
            -
             
     | 
| 
      
 158 
     | 
    
         
            +
                    output_shapes = [node.meta["tensor_meta"].shape]
         
     | 
| 
       162 
159 
     | 
    
         
             
                    if node.op in ["output", "placeholder"]:
         
     | 
| 
       163 
160 
     | 
    
         
             
                        node_flops = 0
         
     | 
| 
       164 
161 
     | 
    
         
             
                    elif node.op == "call_function":
         
     | 
| 
         @@ -194,7 +191,7 @@ def fx_profile(mod: nn.Module, input: th.Tensor, verbose=False): 
     | 
|
| 
       194 
191 
     | 
    
         
             
                            print("weight_shape: None")
         
     | 
| 
       195 
192 
     | 
    
         
             
                        else:
         
     | 
| 
       196 
193 
     | 
    
         
             
                            print(type(m))
         
     | 
| 
       197 
     | 
    
         
            -
                            print(f"weight_shape: {mod.state_dict()[node.target 
     | 
| 
      
 194 
     | 
    
         
            +
                            print(f"weight_shape: {mod.state_dict()[f'{node.target}.weight'].shape}")
         
     | 
| 
       198 
195 
     | 
    
         | 
| 
       199 
196 
     | 
    
         
             
                    v_maps[str(node.name)] = node.meta["tensor_meta"].shape
         
     | 
| 
       200 
197 
     | 
    
         
             
                    if node_flops is not None:
         
     | 
| 
         @@ -10,9 +10,6 @@ multiply_adds = 1 
     | 
|
| 
       10 
10 
     | 
    
         | 
| 
       11 
11 
     | 
    
         
             
            def count_parameters(m, x, y):
         
     | 
| 
       12 
12 
     | 
    
         
             
                """Calculate and update the total number of parameters in a given PyTorch model."""
         
     | 
| 
       13 
     | 
    
         
            -
                total_params = 0
         
     | 
| 
       14 
     | 
    
         
            -
                for p in m.parameters():
         
     | 
| 
       15 
     | 
    
         
            -
                    total_params += torch.DoubleTensor([p.numel()])
         
     | 
| 
       16 
13 
     | 
    
         
             
                m.total_params[0] = calculate_parameters(m.parameters())
         
     | 
| 
       17 
14 
     | 
    
         | 
| 
       18 
15 
     | 
    
         | 
| 
         @@ -24,10 +21,7 @@ def zero_ops(m, x, y): 
     | 
|
| 
       24 
21 
     | 
    
         
             
            def count_convNd(m: _ConvNd, x, y: torch.Tensor):
         
     | 
| 
       25 
22 
     | 
    
         
             
                """Calculate and add the number of convolutional operations (FLOPs) to the model's total operations count."""
         
     | 
| 
       26 
23 
     | 
    
         
             
                x = x[0]
         
     | 
| 
       27 
     | 
    
         
            -
             
     | 
| 
       28 
     | 
    
         
            -
                kernel_ops = torch.zeros(m.weight.size()[2:]).numel()  # Kw x Kh
         
     | 
| 
       29 
     | 
    
         
            -
                bias_ops = 1 if m.bias is not None else 0
         
     | 
| 
       30 
     | 
    
         
            -
             
     | 
| 
      
 24 
     | 
    
         
            +
                
         
     | 
| 
       31 
25 
     | 
    
         
             
                m.total_ops += calculate_conv2d_flops(
         
     | 
| 
       32 
26 
     | 
    
         
             
                    input_size=list(x.shape),
         
     | 
| 
       33 
27 
     | 
    
         
             
                    output_size=list(y.shape),
         
     | 
| 
         @@ -97,9 +91,6 @@ def count_prelu(m, x, y): 
     | 
|
| 
       97 
91 
     | 
    
         
             
            def count_relu(m, x, y):
         
     | 
| 
       98 
92 
     | 
    
         
             
                """Calculate and update the total operation counts for a ReLU layer."""
         
     | 
| 
       99 
93 
     | 
    
         
             
                x = x[0]
         
     | 
| 
       100 
     | 
    
         
            -
             
     | 
| 
       101 
     | 
    
         
            -
                nelements = x.numel()
         
     | 
| 
       102 
     | 
    
         
            -
             
     | 
| 
       103 
94 
     | 
    
         
             
                m.total_ops += calculate_relu_flops(list(x.shape))
         
     | 
| 
       104 
95 
     | 
    
         | 
| 
       105 
96 
     | 
    
         | 
| 
         @@ -1,6 +1,6 @@ 
     | 
|
| 
       1 
1 
     | 
    
         
             
            Metadata-Version: 2.1
         
     | 
| 
       2 
2 
     | 
    
         
             
            Name: ultralytics-thop
         
     | 
| 
       3 
     | 
    
         
            -
            Version: 0.2. 
     | 
| 
      
 3 
     | 
    
         
            +
            Version: 0.2.9
         
     | 
| 
       4 
4 
     | 
    
         
             
            Summary: Ultralytics THOP package for fast computation of PyTorch model FLOPs and parameters.
         
     | 
| 
       5 
5 
     | 
    
         
             
            Author-email: Ligeng Zhu <ligeng.zhu+github@gmail.com>
         
     | 
| 
       6 
6 
     | 
    
         
             
            Maintainer: Glenn Jocher
         
     | 
| 
         @@ -74,7 +74,7 @@ import torch 
     | 
|
| 
       74 
74 
     | 
    
         | 
| 
       75 
75 
     | 
    
         
             
            model = resnet50()
         
     | 
| 
       76 
76 
     | 
    
         
             
            input = torch.randn(1, 3, 224, 224)
         
     | 
| 
       77 
     | 
    
         
            -
            macs, params = profile(model, inputs=(input, 
     | 
| 
      
 77 
     | 
    
         
            +
            macs, params = profile(model, inputs=(input,))
         
     | 
| 
       78 
78 
     | 
    
         
             
            ```
         
     | 
| 
       79 
79 
     | 
    
         | 
| 
       80 
80 
     | 
    
         
             
            ### Define Custom Rules for Third-Party Modules
         
     | 
| 
         @@ -84,16 +84,19 @@ You can define custom rules for unsupported modules: 
     | 
|
| 
       84 
84 
     | 
    
         
             
            ```python
         
     | 
| 
       85 
85 
     | 
    
         
             
            import torch.nn as nn
         
     | 
| 
       86 
86 
     | 
    
         | 
| 
      
 87 
     | 
    
         
            +
             
     | 
| 
       87 
88 
     | 
    
         
             
            class YourModule(nn.Module):
         
     | 
| 
       88 
89 
     | 
    
         
             
                # your definition
         
     | 
| 
       89 
90 
     | 
    
         
             
                pass
         
     | 
| 
       90 
91 
     | 
    
         | 
| 
      
 92 
     | 
    
         
            +
             
     | 
| 
       91 
93 
     | 
    
         
             
            def count_your_model(model, x, y):
         
     | 
| 
       92 
94 
     | 
    
         
             
                # your rule here
         
     | 
| 
       93 
95 
     | 
    
         
             
                pass
         
     | 
| 
       94 
96 
     | 
    
         | 
| 
      
 97 
     | 
    
         
            +
             
     | 
| 
       95 
98 
     | 
    
         
             
            input = torch.randn(1, 3, 224, 224)
         
     | 
| 
       96 
     | 
    
         
            -
            macs, params = profile(model, inputs=(input, 
     | 
| 
      
 99 
     | 
    
         
            +
            macs, params = profile(model, inputs=(input,), custom_ops={YourModule: count_your_model})
         
     | 
| 
       97 
100 
     | 
    
         
             
            ```
         
     | 
| 
       98 
101 
     | 
    
         | 
| 
       99 
102 
     | 
    
         
             
            ### Improve Output Readability
         
     | 
| 
         @@ -102,6 +105,7 @@ Use `thop.clever_format` for a more readable output: 
     | 
|
| 
       102 
105 
     | 
    
         | 
| 
       103 
106 
     | 
    
         
             
            ```python
         
     | 
| 
       104 
107 
     | 
    
         
             
            from thop import clever_format
         
     | 
| 
      
 108 
     | 
    
         
            +
             
     | 
| 
       105 
109 
     | 
    
         
             
            macs, params = clever_format([macs, params], "%.3f")
         
     | 
| 
       106 
110 
     | 
    
         
             
            ```
         
     | 
| 
       107 
111 
     | 
    
         | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
    
        {ultralytics_thop-0.2.7 → ultralytics_thop-0.2.9}/ultralytics_thop.egg-info/dependency_links.txt
    RENAMED
    
    | 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     | 
| 
         
            File without changes
         
     |