ultralytics-thop 2.0.0__py3-none-any.whl → 2.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
thop/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "2.0.0"
1
+ __version__ = "2.0.2"
2
2
 
3
3
  import torch
4
4
 
thop/fx_profile.py CHANGED
@@ -13,7 +13,7 @@ if LooseVersion(torch.__version__) < LooseVersion("1.8.0"):
13
13
 
14
14
 
15
15
  def count_clamp(input_shapes, output_shapes):
16
- """Ensures proper array sizes for tensors by clamping input and output shapes."""
16
+ """Ensures tensor array sizes are appropriate by clamping specified input and output shapes."""
17
17
  return 0
18
18
 
19
19
 
@@ -23,7 +23,7 @@ def count_mul(input_shapes, output_shapes):
23
23
 
24
24
 
25
25
  def count_matmul(input_shapes, output_shapes):
26
- """Calculates the total number of operations for a matrix multiplication given input and output shapes."""
26
+ """Calculates matrix multiplication ops based on input and output tensor shapes for performance profiling."""
27
27
  in_shape = input_shapes[0]
28
28
  out_shape = output_shapes[0]
29
29
  in_features = in_shape[-1]
@@ -32,7 +32,7 @@ def count_matmul(input_shapes, output_shapes):
32
32
 
33
33
 
34
34
  def count_fn_linear(input_shapes, output_shapes, *args, **kwargs):
35
- """Calculates total operations (FLOPs) for a linear layer given input and output shapes."""
35
+ """Calculates the total FLOPs for a linear layer, including bias operations if specified."""
36
36
  flops = count_matmul(input_shapes, output_shapes)
37
37
  if "bias" in kwargs:
38
38
  flops += output_shapes[0].numel()
@@ -43,7 +43,9 @@ from .vision.calc_func import calculate_conv
43
43
 
44
44
 
45
45
  def count_fn_conv2d(input_shapes, output_shapes, *args, **kwargs):
46
- """Calculates total operations (FLOPs) for a 2D convolutional layer given input and output shapes."""
46
+ """Calculates total operations (FLOPs) for a 2D conv layer based on input and output shapes using
47
+ `calculate_conv`.
48
+ """
47
49
  inputs, weight, bias, stride, padding, dilation, groups = args
48
50
  if len(input_shapes) == 2:
49
51
  x_shape, k_shape = input_shapes
@@ -65,12 +67,12 @@ def count_nn_linear(module: nn.Module, input_shapes, output_shapes):
65
67
 
66
68
 
67
69
  def count_zero_ops(module: nn.Module, input_shapes, output_shapes, *args, **kwargs):
68
- """Returns 0 for the given neural network module, input shapes, and output shapes."""
70
+ """Returns 0 for a neural network module, input shapes, and output shapes in PyTorch."""
69
71
  return 0
70
72
 
71
73
 
72
74
  def count_nn_conv2d(module: nn.Conv2d, input_shapes, output_shapes):
73
- """Calculates total operations for a 2D convolutional neural network layer in a given neural network module."""
75
+ """Calculates FLOPs for a 2D Conv2D layer in an nn.Module using input and output shapes."""
74
76
  bias_op = 1 if module.bias is not None else 0
75
77
  out_shape = output_shapes[0]
76
78
 
@@ -82,7 +84,7 @@ def count_nn_conv2d(module: nn.Conv2d, input_shapes, output_shapes):
82
84
 
83
85
 
84
86
  def count_nn_bn2d(module: nn.BatchNorm2d, input_shapes, output_shapes):
85
- """Calculate the total operations for a given nn.BatchNorm2d module based on its output shape."""
87
+ """Calculate FLOPs for an nn.BatchNorm2d layer based on the given output shape."""
86
88
  assert len(output_shapes) == 1, "nn.BatchNorm2d should only have one output"
87
89
  y = output_shapes[0]
88
90
  return 2 * y.numel()
@@ -127,9 +129,7 @@ def null_print(*args, **kwargs):
127
129
 
128
130
 
129
131
  def fx_profile(mod: nn.Module, input: th.Tensor, verbose=False):
130
- """Profiles the given torch.nn Module to calculate total FLOPs for each operation and prints detailed node
131
- information if verbose.
132
- """
132
+ """Profiles nn.Module for total FLOPs per operation and prints detailed nodes if verbose."""
133
133
  gm: torch.fx.GraphModule = symbolic_trace(mod)
134
134
  ShapeProp(gm).propagate(input)
135
135
 
thop/profile.py CHANGED
@@ -55,9 +55,7 @@ register_hooks = {
55
55
 
56
56
 
57
57
  def profile_origin(model, inputs, custom_ops=None, verbose=True, report_missing=False):
58
- """Profiles a PyTorch model's operations and parameters by applying custom or default hooks and returns total
59
- operations and parameters.
60
- """
58
+ """Profiles a PyTorch model's operations and parameters, applying either custom or default hooks."""
61
59
  handler_collection = []
62
60
  types_collection = set()
63
61
  if custom_ops is None:
@@ -145,6 +143,7 @@ def profile(
145
143
  ret_layer_info=False,
146
144
  report_missing=False,
147
145
  ):
146
+ """Profiles a PyTorch model, returning total operations, parameters, and optionally layer-wise details."""
148
147
  handler_collection = {}
149
148
  types_collection = set()
150
149
  if custom_ops is None:
thop/rnn_hooks.py CHANGED
@@ -4,7 +4,7 @@ from torch.nn.utils.rnn import PackedSequence
4
4
 
5
5
 
6
6
  def _count_rnn_cell(input_size, hidden_size, bias=True):
7
- """Calculate the total operations for an RNN cell based on input size, hidden size, and bias configuration."""
7
+ """Calculate the total operations for an RNN cell given input size, hidden size, and optional bias."""
8
8
  total_ops = hidden_size * (input_size + hidden_size) + hidden_size
9
9
  if bias:
10
10
  total_ops += hidden_size * 2
@@ -13,7 +13,7 @@ def _count_rnn_cell(input_size, hidden_size, bias=True):
13
13
 
14
14
 
15
15
  def count_rnn_cell(m: nn.RNNCell, x: torch.Tensor, y: torch.Tensor):
16
- """Counts RNN cell operations based on input, hidden size, bias, and batch size."""
16
+ """Counts the total RNN cell operations based on input tensor, hidden size, bias, and batch size."""
17
17
  total_ops = _count_rnn_cell(m.input_size, m.hidden_size, m.bias)
18
18
 
19
19
  batch_size = x[0].size(0)
@@ -23,7 +23,7 @@ def count_rnn_cell(m: nn.RNNCell, x: torch.Tensor, y: torch.Tensor):
23
23
 
24
24
 
25
25
  def _count_gru_cell(input_size, hidden_size, bias=True):
26
- """Counts the total operations for a GRU cell based on input size, hidden size, and bias."""
26
+ """Counts the total operations for a GRU cell based on input size, hidden size, and bias configuration."""
27
27
  total_ops = 0
28
28
  # r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\
29
29
  # z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\
@@ -57,9 +57,7 @@ def count_gru_cell(m: nn.GRUCell, x: torch.Tensor, y: torch.Tensor):
57
57
 
58
58
 
59
59
  def _count_lstm_cell(input_size, hidden_size, bias=True):
60
- """Calculates the total operations for an LSTM cell during inference given input size, hidden size, and optional
61
- bias.
62
- """
60
+ """Counts LSTM cell operations during inference based on input size, hidden size, and bias configuration."""
63
61
  total_ops = 0
64
62
 
65
63
  # i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\
@@ -82,9 +80,7 @@ def _count_lstm_cell(input_size, hidden_size, bias=True):
82
80
 
83
81
 
84
82
  def count_lstm_cell(m: nn.LSTMCell, x: torch.Tensor, y: torch.Tensor):
85
- """Count the number of operations for a single LSTM cell in a given batch, updating the model's total operations
86
- count.
87
- """
83
+ """Counts and updates the total operations for an LSTM cell in a mini-batch during inference."""
88
84
  total_ops = _count_lstm_cell(m.input_size, m.hidden_size, m.bias)
89
85
 
90
86
  batch_size = x[0].size(0)
@@ -94,7 +90,7 @@ def count_lstm_cell(m: nn.LSTMCell, x: torch.Tensor, y: torch.Tensor):
94
90
 
95
91
 
96
92
  def count_rnn(m: nn.RNN, x, y):
97
- """Calculate and update the total number of operations for a single RNN cell in a given batch."""
93
+ """Calculate and update the total number of operations for each RNN cell in a given batch."""
98
94
  bias = m.bias
99
95
  input_size = m.input_size
100
96
  hidden_size = m.hidden_size
@@ -131,7 +127,7 @@ def count_rnn(m: nn.RNN, x, y):
131
127
 
132
128
 
133
129
  def count_gru(m: nn.GRU, x, y):
134
- """Calculate the total number of operations for a GRU layer in a neural network model."""
130
+ """Calculates total operations for a GRU layer, updating the model's operation count based on batch size."""
135
131
  bias = m.bias
136
132
  input_size = m.input_size
137
133
  hidden_size = m.hidden_size
@@ -168,9 +164,7 @@ def count_gru(m: nn.GRU, x, y):
168
164
 
169
165
 
170
166
  def count_lstm(m: nn.LSTM, x, y):
171
- """Calculate the total operations for LSTM layers in a network, accounting for input size, hidden size, bias, and
172
- bidirectionality.
173
- """
167
+ """Calculate total operations for LSTM layers, including bidirectional, updating model's total operations."""
174
168
  bias = m.bias
175
169
  input_size = m.input_size
176
170
  hidden_size = m.hidden_size
thop/utils.py CHANGED
@@ -22,7 +22,7 @@ prYellow = colorful_print(print, color=COLOR_YELLOW)
22
22
 
23
23
 
24
24
  def clever_format(nums, format="%.2f"):
25
- """Formats numerical values into a more readable string with units (K, M, G, T) based on their magnitude."""
25
+ """Formats numbers into human-readable strings with units (K for thousand, M for million, etc.)."""
26
26
  if not isinstance(nums, Iterable):
27
27
  nums = [nums]
28
28
  clever_nums = []
@@ -9,19 +9,19 @@ multiply_adds = 1
9
9
 
10
10
 
11
11
  def count_parameters(m, x, y):
12
- """Calculate and update the total number of parameters in a given PyTorch model."""
12
+ """Calculate and return the total number of learnable parameters in a given PyTorch model."""
13
13
  m.total_params[0] = calculate_parameters(m.parameters())
14
14
 
15
15
 
16
16
  def zero_ops(m, x, y):
17
- """Incrementally add the number of zero operations to the model's total operations count."""
17
+ """Incrementally add zero operations to the model's total operations count."""
18
18
  m.total_ops += calculate_zero_ops()
19
19
 
20
20
 
21
21
  def count_convNd(m: _ConvNd, x, y: torch.Tensor):
22
- """Calculate and add the number of convolutional operations (FLOPs) to the model's total operations count."""
22
+ """Calculate and add the number of convolutional operations (FLOPs) for a ConvNd layer to the model's total ops."""
23
23
  x = x[0]
24
-
24
+
25
25
  m.total_ops += calculate_conv2d_flops(
26
26
  input_size=list(x.shape),
27
27
  output_size=list(y.shape),
@@ -40,7 +40,7 @@ def count_convNd(m: _ConvNd, x, y: torch.Tensor):
40
40
 
41
41
 
42
42
  def count_convNd_ver2(m: _ConvNd, x, y: torch.Tensor):
43
- """Calculates the total operations for a convolutional layer and updates the layer's total_ops attribute."""
43
+ """Calculates and updates total operations (FLOPs) for a convolutional layer in a PyTorch model."""
44
44
  x = x[0]
45
45
 
46
46
  # N x H x W (exclude Cout)
@@ -56,9 +56,7 @@ def count_convNd_ver2(m: _ConvNd, x, y: torch.Tensor):
56
56
 
57
57
 
58
58
  def count_normalization(m: nn.modules.batchnorm._BatchNorm, x, y):
59
- """Calculate and add the FLOPs for a batch normalization layer, considering elementwise operations and possible
60
- affine parameters.
61
- """
59
+ """Calculate and add the FLOPs for a batch normalization layer, including elementwise and affine operations."""
62
60
  # https://github.com/Lyken17/pytorch-OpCounter/issues/124
63
61
  # y = (x - mean) / sqrt(eps + var) * weight + bias
64
62
  x = x[0]
@@ -80,7 +78,7 @@ def count_normalization(m: nn.modules.batchnorm._BatchNorm, x, y):
80
78
 
81
79
 
82
80
  def count_prelu(m, x, y):
83
- """Calculate and update the total operation counts for a PReLU layer."""
81
+ """Calculate and update the total operation counts for a PReLU layer using input element number."""
84
82
  x = x[0]
85
83
 
86
84
  nelements = x.numel()
@@ -95,7 +93,7 @@ def count_relu(m, x, y):
95
93
 
96
94
 
97
95
  def count_softmax(m, x, y):
98
- """Calculate and update the total operation counts for a Softmax layer."""
96
+ """Calculate and update the total operation counts for a Softmax layer in a PyTorch model."""
99
97
  x = x[0]
100
98
  nfeatures = x.size()[m.dim]
101
99
  batch_size = x.numel() // nfeatures
@@ -104,7 +102,7 @@ def count_softmax(m, x, y):
104
102
 
105
103
 
106
104
  def count_avgpool(m, x, y):
107
- """Calculate and update the total operation counts for an AvgPool layer."""
105
+ """Calculate and update the total number of operations (FLOPs) for an AvgPool layer based on the output elements."""
108
106
  # total_div = 1
109
107
  # kernel_ops = total_add + total_div
110
108
  num_elements = y.numel()
@@ -112,7 +110,7 @@ def count_avgpool(m, x, y):
112
110
 
113
111
 
114
112
  def count_adap_avgpool(m, x, y):
115
- """Calculate and update the total operation counts for an AdaptiveAvgPool layer."""
113
+ """Calculate and update the total operation counts for an AdaptiveAvgPool layer using kernel and element counts."""
116
114
  kernel = torch.div(torch.DoubleTensor([*(x[0].shape[2:])]), torch.DoubleTensor([*(y.shape[2:])]))
117
115
  total_add = torch.prod(kernel)
118
116
  num_elements = y.numel()
@@ -121,7 +119,7 @@ def count_adap_avgpool(m, x, y):
121
119
 
122
120
  # TODO: verify the accuracy
123
121
  def count_upsample(m, x, y):
124
- """Update the total operations counter in the given module for supported upsampling modes."""
122
+ """Update total operations counter for upsampling layers based on the mode used."""
125
123
  if m.mode not in (
126
124
  "nearest",
127
125
  "linear",
@@ -137,9 +135,7 @@ def count_upsample(m, x, y):
137
135
 
138
136
  # nn.Linear
139
137
  def count_linear(m, x, y):
140
- """Counts total operations for nn.Linear layers by calculating multiplications and additions based on input and
141
- output elements.
142
- """
138
+ """Counts total operations for nn.Linear layers using input and output element dimensions."""
143
139
  total_mul = m.in_features
144
140
  # total_add = m.in_features - 1
145
141
  # total_add += 1 if m.bias is not None else 0
thop/vision/calc_func.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
5
 
6
6
 
7
7
  def l_prod(in_list):
8
- """Calculate the product of all elements in a list."""
8
+ """Compute the product of all elements in the input list."""
9
9
  res = 1
10
10
  for _ in in_list:
11
11
  res *= _
@@ -13,22 +13,22 @@ def l_prod(in_list):
13
13
 
14
14
 
15
15
  def l_sum(in_list):
16
- """Calculate the sum of all elements in a list."""
16
+ """Calculate the sum of all numerical elements in a list."""
17
17
  return sum(in_list)
18
18
 
19
19
 
20
20
  def calculate_parameters(param_list):
21
- """Calculate the total number of parameters in a list of tensors."""
21
+ """Calculate the total number of parameters in a list of tensors using the product of their shapes."""
22
22
  return sum(torch.DoubleTensor([p.nelement()]) for p in param_list)
23
23
 
24
24
 
25
25
  def calculate_zero_ops():
26
- """Return a tensor initialized to zero."""
26
+ """Initializes and returns a tensor with all elements set to zero."""
27
27
  return torch.DoubleTensor([0])
28
28
 
29
29
 
30
30
  def calculate_conv2d_flops(input_size: list, output_size: list, kernel_size: list, groups: int, bias: bool = False):
31
- """Calculate FLOPs for a Conv2D layer given input/output sizes, kernel size, groups, and bias flag."""
31
+ """Calculate FLOPs for a Conv2D layer using input/output sizes, kernel size, groups, and the bias flag."""
32
32
  # n, in_c, ih, iw = input_size
33
33
  # out_c, in_c, kh, kw = kernel_size
34
34
  in_c = input_size[1]
@@ -37,31 +37,29 @@ def calculate_conv2d_flops(input_size: list, output_size: list, kernel_size: lis
37
37
 
38
38
 
39
39
  def calculate_conv(bias, kernel_size, output_size, in_channel, group):
40
+ """Calculate FLOPs for convolutional layers given bias, kernel size, output size, in_channels, and groups."""
40
41
  warnings.warn("This API is being deprecated.")
41
- """Inputs are all numbers!"""
42
42
  return torch.DoubleTensor([output_size * (in_channel / group * kernel_size + bias)])
43
43
 
44
44
 
45
45
  def calculate_norm(input_size):
46
- """Input is a number not a array or tensor."""
46
+ """Compute the L2 norm of a tensor or array based on its input size."""
47
47
  return torch.DoubleTensor([2 * input_size])
48
48
 
49
49
 
50
50
  def calculate_relu_flops(input_size):
51
- """Calculates the FLOPs for a ReLU activation function based on the input size."""
51
+ """Calculates the FLOPs for a ReLU activation function based on the input tensor's dimensions."""
52
52
  return 0
53
53
 
54
54
 
55
55
  def calculate_relu(input_size: torch.Tensor):
56
- """Convert an input tensor to a DoubleTensor with the same value."""
56
+ """Convert an input tensor to a DoubleTensor with the same value (deprecated)."""
57
57
  warnings.warn("This API is being deprecated")
58
58
  return torch.DoubleTensor([int(input_size)])
59
59
 
60
60
 
61
61
  def calculate_softmax(batch_size, nfeatures):
62
- """Calculate the number of FLOPs required for a softmax activation function based on batch size and number of
63
- features.
64
- """
62
+ """Compute FLOPs for a softmax activation given batch size and feature count."""
65
63
  total_exp = nfeatures
66
64
  total_add = nfeatures - 1
67
65
  total_div = nfeatures
@@ -70,19 +68,19 @@ def calculate_softmax(batch_size, nfeatures):
70
68
 
71
69
 
72
70
  def calculate_avgpool(input_size):
73
- """Calculate the average pooling size given the input size."""
71
+ """Calculate the average pooling size for a given input tensor."""
74
72
  return torch.DoubleTensor([int(input_size)])
75
73
 
76
74
 
77
75
  def calculate_adaptive_avg(kernel_size, output_size):
78
- """Calculate the number of operations for adaptive average pooling given kernel and output sizes."""
76
+ """Calculate FLOPs for adaptive average pooling given kernel size and output size."""
79
77
  total_div = 1
80
78
  kernel_op = kernel_size + total_div
81
79
  return torch.DoubleTensor([int(kernel_op * output_size)])
82
80
 
83
81
 
84
82
  def calculate_upsample(mode: str, output_size):
85
- """Calculate the number of operations for upsample methods given the mode and output size."""
83
+ """Calculate the operations required for various upsample methods based on mode and output size."""
86
84
  total_ops = output_size
87
85
  if mode == "bicubic":
88
86
  total_ops *= 224 + 35
@@ -96,29 +94,29 @@ def calculate_upsample(mode: str, output_size):
96
94
 
97
95
 
98
96
  def calculate_linear(in_feature, num_elements):
99
- """Calculate the linear operation count for an input feature and number of elements."""
97
+ """Calculate the linear operation count for given input feature and number of elements."""
100
98
  return torch.DoubleTensor([int(in_feature * num_elements)])
101
99
 
102
100
 
103
101
  def counter_matmul(input_size, output_size):
104
- """Calculate the total number of operations for a matrix multiplication given input and output sizes."""
102
+ """Calculate the total number of operations for matrix multiplication given input and output sizes."""
105
103
  input_size = np.array(input_size)
106
104
  output_size = np.array(output_size)
107
105
  return np.prod(input_size) * output_size[-1]
108
106
 
109
107
 
110
108
  def counter_mul(input_size):
111
- """Calculate the total number of operations for a matrix multiplication given input and output sizes."""
109
+ """Calculate the total number of operations for element-wise multiplication given the input size."""
112
110
  return input_size
113
111
 
114
112
 
115
113
  def counter_pow(input_size):
116
- """Calculate the total number of scalar multiplications for a power operation given an input size."""
114
+ """Computes the total scalar multiplications required for power operations based on input size."""
117
115
  return input_size
118
116
 
119
117
 
120
118
  def counter_sqrt(input_size):
121
- """Calculate the total number of scalar operations for a square root operation given an input size."""
119
+ """Calculate the total number of scalar operations required for a square root operation given an input size."""
122
120
  return input_size
123
121
 
124
122
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics-thop
3
- Version: 2.0.0
3
+ Version: 2.0.2
4
4
  Summary: Ultralytics THOP package for fast computation of PyTorch model FLOPs and parameters.
5
5
  Author-email: Ligeng Zhu <ligeng.zhu+github@gmail.com>
6
6
  Maintainer: Glenn Jocher
@@ -13,7 +13,7 @@ Classifier: Development Status :: 4 - Beta
13
13
  Classifier: Intended Audience :: Developers
14
14
  Classifier: Intended Audience :: Education
15
15
  Classifier: Intended Audience :: Science/Research
16
- Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)
17
17
  Classifier: Programming Language :: Python :: 3
18
18
  Classifier: Programming Language :: Python :: 3.8
19
19
  Classifier: Programming Language :: Python :: 3.9
@@ -23,6 +23,7 @@ Classifier: Programming Language :: Python :: 3.12
23
23
  Classifier: Topic :: Software Development
24
24
  Classifier: Topic :: Scientific/Engineering
25
25
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
26
+ Classifier: Topic :: Scientific/Engineering :: Image Recognition
26
27
  Classifier: Operating System :: POSIX :: Linux
27
28
  Classifier: Operating System :: MacOS
28
29
  Classifier: Operating System :: Microsoft :: Windows
@@ -33,13 +34,13 @@ Requires-Dist: numpy
33
34
  Requires-Dist: torch
34
35
 
35
36
  <br>
36
- <img src="https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg" width="320">
37
+ <a href="https://ultralytics.com" target="_blank"><img src="https://raw.githubusercontent.com/ultralytics/assets/main/logo/Ultralytics_Logotype_Original.svg" width="320" alt="Ultralytics logo"></a>
37
38
 
38
39
  # 🚀 THOP: PyTorch-OpCounter
39
40
 
40
41
  Welcome to the [THOP](https://github.com/ultralytics/thop) repository, your comprehensive solution for profiling PyTorch models by computing the number of Multiply-Accumulate Operations (MACs) and parameters. This tool is essential for deep learning practitioners to evaluate model efficiency and performance.
41
42
 
42
- [![GitHub Actions](https://github.com/ultralytics/thop/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/thop/actions/workflows/main.yml) <a href="https://ultralytics.com/discord"><img alt="Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
43
+ [![GitHub Actions](https://github.com/ultralytics/thop/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/thop/actions/workflows/main.yml) <a href="https://ultralytics.com/discord"><img alt="Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a> <a href="https://community.ultralytics.com"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
43
44
 
44
45
  ## 📄 Description
45
46
 
@@ -68,9 +69,10 @@ pip install --upgrade git+https://github.com/ultralytics/thop.git
68
69
  To profile a model, you can use the following example:
69
70
 
70
71
  ```python
72
+ import torch
71
73
  from torchvision.models import resnet50
74
+
72
75
  from thop import profile
73
- import torch
74
76
 
75
77
  model = resnet50()
76
78
  input = torch.randn(1, 3, 224, 224)
@@ -170,7 +172,7 @@ We welcome community contributions to enhance THOP. Please check our [Contributi
170
172
 
171
173
  ## 📄 License
172
174
 
173
- THOP is licensed under the AGPL-3.0 License. For more information, see the [LICENSE](https://github.com/ultralytics/thop/blob/master/LICENSE) file.
175
+ THOP is licensed under the AGPL-3.0 License. For more information, see the [LICENSE](https://github.com/ultralytics/thop/blob/main/LICENSE) file.
174
176
 
175
177
  ## 📮 Contact
176
178
 
@@ -188,7 +190,7 @@ For bugs or feature requests, please open an issue on [GitHub Issues](https://gi
188
190
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
189
191
  <a href="https://www.tiktok.com/@ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-tiktok.png" width="3%" alt="Ultralytics TikTok"></a>
190
192
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
191
- <a href="https://www.instagram.com/ultralytics/"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="3%" alt="Ultralytics Instagram"></a>
193
+ <a href="https://ultralytics.com/bilibili"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-bilibili.png" width="3%" alt="Ultralytics BiliBili"></a>
192
194
  <img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="3%" alt="space">
193
195
  <a href="https://ultralytics.com/discord"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-discord.png" width="3%" alt="Ultralytics Discord"></a>
194
196
  </div>
@@ -0,0 +1,13 @@
1
+ thop/__init__.py,sha256=jcR-yZnxf2TgpJ9VTv52KMkgWkpS1rCPQFOZWCUOS6g,146
2
+ thop/fx_profile.py,sha256=u1eNvsRBTl9u0jJ6ymgrnrJP1VSLpAZkaEvm4D2mrI4,8172
3
+ thop/profile.py,sha256=7ZMuGia9jIz_aeuUWXD_wb5WgunuaPx7nfaUQVf8CzY,7911
4
+ thop/rnn_hooks.py,sha256=IwxLvVvbeo5skThdg3BKXm08_0GsM8FI2QM_alQcCB8,6432
5
+ thop/utils.py,sha256=SP5QWanMqVRieDoYGQN-7lftZmYXohn2E_m3cP17tS4,1413
6
+ thop/vision/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ thop/vision/basic_hooks.py,sha256=tKclhcddDByXrIXiI9u224nIKTbqWfp2J3hqIUYK6Hc,4639
8
+ thop/vision/calc_func.py,sha256=5YbWp48XRbJYzSRkAFMr5xlM4o5BbjTcU6rHSKPkD6I,4291
9
+ ultralytics_thop-2.0.2.dist-info/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
10
+ ultralytics_thop-2.0.2.dist-info/METADATA,sha256=-9izM8Samq6PGBEdy3H0VuDM-lgZPu56qkqsLpPYUUI,8912
11
+ ultralytics_thop-2.0.2.dist-info/WHEEL,sha256=HiCZjzuy6Dw0hdX5R3LCFPDmFS4BWl8H-8W39XfmgX4,91
12
+ ultralytics_thop-2.0.2.dist-info/top_level.txt,sha256=HQ7D0gSvDJ31CNR-f0EuXNVve05RYBmwyIkHQKiEhU8,5
13
+ ultralytics_thop-2.0.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.43.0)
2
+ Generator: setuptools (72.2.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,13 +0,0 @@
1
- thop/__init__.py,sha256=atkwStCUTA2zIEim2pGqbVkiIS0T_lSE1d7ajoxa2fA,146
2
- thop/fx_profile.py,sha256=NDObo07yrBf7B6ENltPMR9Cr1IolEumRkeTI6pXhpVs,8210
3
- thop/profile.py,sha256=z89mX1zVr_42axKbhgz9k2MJbgBLaBPt30lYT_PcWuA,7848
4
- thop/rnn_hooks.py,sha256=GYuKaNPEdZzTuqNCrJpTBjeQHsWs65UXCCaViS2giik,6485
5
- thop/utils.py,sha256=V_Pj_qC6RjqMyuiSX05eeUujT07hZQV-xobCIabjDds,1422
6
- thop/vision/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- thop/vision/basic_hooks.py,sha256=oOuHk5iPjRJjUuijY8Zppqr1s1abKV0y08N4DuqZGVg,4619
8
- thop/vision/calc_func.py,sha256=RE-qQWGjZIlRx8CNKjF-ZY7aS0WShPkvnOAYvQA8z8I,4130
9
- ultralytics_thop-2.0.0.dist-info/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
10
- ultralytics_thop-2.0.0.dist-info/METADATA,sha256=JdsrGLxGdfqT1xnZsmaz5ejScJG5ALlTVjE4jNqwxQs,8525
11
- ultralytics_thop-2.0.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
12
- ultralytics_thop-2.0.0.dist-info/top_level.txt,sha256=HQ7D0gSvDJ31CNR-f0EuXNVve05RYBmwyIkHQKiEhU8,5
13
- ultralytics_thop-2.0.0.dist-info/RECORD,,