gtsam-develop 4.3a0.dev202510101133__cp311-cp311-macosx_11_0_arm64.whl → 4.3a0.dev202510101309__cp311-cp311-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gtsam-develop might be problematic. Click here for more details.

gtsam/gtsam/gtsfm.pyi CHANGED
@@ -2,15 +2,20 @@
2
2
  gtsfm submodule
3
3
  """
4
4
  from __future__ import annotations
5
+ import collections.abc
5
6
  import gtsam.gtsam
6
7
  import numpy
8
+ import numpy.typing
7
9
  import typing
8
10
  __all__: list[str] = ['Keypoints', 'tracksFromPairwiseMatches']
9
- M = typing.TypeVar("M", bound=int)
10
- N = typing.TypeVar("N", bound=int)
11
11
  class Keypoints:
12
- coordinates: numpy.ndarray[tuple[M, typing.Literal[2]], numpy.dtype[numpy.float64]]
13
- def __init__(self, coordinates: numpy.ndarray[tuple[M, typing.Literal[2]], numpy.dtype[numpy.float64]]) -> None:
12
+ def __init__(self, coordinates: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 2]"]) -> None:
14
13
  ...
15
- def tracksFromPairwiseMatches(matches_dict: dict[gtsam.gtsam.IndexPair, numpy.ndarray[tuple[M, typing.Literal[2]], numpy.dtype[numpy.int32]]], keypoints_list: list[Keypoints], verbose: bool = False) -> list[gtsam.gtsam.SfmTrack2d]:
14
+ @property
15
+ def coordinates(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, 2]"]:
16
+ ...
17
+ @coordinates.setter
18
+ def coordinates(self, arg0: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 2]"]) -> None:
19
+ ...
20
+ def tracksFromPairwiseMatches(matches_dict: collections.abc.Mapping[gtsam.gtsam.IndexPair, typing.Annotated[numpy.typing.ArrayLike, numpy.int32, "[m, 2]"]], keypoints_list: collections.abc.Sequence[Keypoints], verbose: bool = False) -> list[gtsam.gtsam.SfmTrack2d]:
16
21
  ...
gtsam/gtsam/imuBias.pyi CHANGED
@@ -3,9 +3,9 @@ imuBias submodule
3
3
  """
4
4
  from __future__ import annotations
5
5
  import numpy
6
+ import numpy.typing
6
7
  import typing
7
8
  __all__: list[str] = ['ConstantBias']
8
- M = typing.TypeVar("M", bound=int)
9
9
  class ConstantBias:
10
10
  @staticmethod
11
11
  def Identity() -> ConstantBias:
@@ -20,7 +20,7 @@ class ConstantBias:
20
20
  def __init__(self) -> None:
21
21
  ...
22
22
  @typing.overload
23
- def __init__(self, biasAcc: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]], biasGyro: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> None:
23
+ def __init__(self, biasAcc: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"], biasGyro: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> None:
24
24
  ...
25
25
  def __neg__(self) -> ConstantBias:
26
26
  ...
@@ -30,41 +30,41 @@ class ConstantBias:
30
30
  ...
31
31
  def __sub__(self, arg0: ConstantBias) -> ConstantBias:
32
32
  ...
33
- def accelerometer(self) -> numpy.ndarray[tuple[typing.Literal[3], typing.Literal[1]], numpy.dtype[numpy.float64]]:
33
+ def accelerometer(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[3, 1]"]:
34
34
  """
35
35
  get accelerometer bias
36
36
  """
37
- def correctAccelerometer(self, measurement: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> numpy.ndarray[tuple[typing.Literal[3], typing.Literal[1]], numpy.dtype[numpy.float64]]:
37
+ def correctAccelerometer(self, measurement: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[3, 1]"]:
38
38
  """
39
39
  Correct an accelerometer measurement using this bias model, and optionally compute Jacobians.
40
40
  """
41
- def correctGyroscope(self, measurement: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> numpy.ndarray[tuple[typing.Literal[3], typing.Literal[1]], numpy.dtype[numpy.float64]]:
41
+ def correctGyroscope(self, measurement: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[3, 1]"]:
42
42
  """
43
43
  Correct a gyroscope measurement using this bias model, and optionally compute Jacobians.
44
44
  """
45
45
  def deserialize(self, serialized: str) -> None:
46
46
  ...
47
- def equals(self, expected: ConstantBias, tol: float) -> bool:
47
+ def equals(self, expected: ConstantBias, tol: typing.SupportsFloat) -> bool:
48
48
  """
49
49
  equality up to tolerance
50
50
  """
51
- def gyroscope(self) -> numpy.ndarray[tuple[typing.Literal[3], typing.Literal[1]], numpy.dtype[numpy.float64]]:
51
+ def gyroscope(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[3, 1]"]:
52
52
  """
53
53
  get gyroscope bias
54
54
  """
55
- def localCoordinates(self, b: ConstantBias) -> numpy.ndarray[tuple[typing.Literal[6], typing.Literal[1]], numpy.dtype[numpy.float64]]:
55
+ def localCoordinates(self, b: ConstantBias) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[6, 1]"]:
56
56
  ...
57
57
  def print(self, s: str = '') -> None:
58
58
  """
59
59
  print with optional string
60
60
  """
61
- def retract(self, v: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> ConstantBias:
61
+ def retract(self, v: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> ConstantBias:
62
62
  """
63
63
  The retract function.
64
64
  """
65
65
  def serialize(self) -> str:
66
66
  ...
67
- def vector(self) -> numpy.ndarray[tuple[typing.Literal[6], typing.Literal[1]], numpy.dtype[numpy.float64]]:
67
+ def vector(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[6, 1]"]:
68
68
  """
69
69
  return the accelerometer and gyro biases in a single vector
70
70
  """
@@ -3,6 +3,7 @@ noiseModel submodule
3
3
  """
4
4
  from __future__ import annotations
5
5
  import numpy
6
+ import numpy.typing
6
7
  import typing
7
8
  from . import mEstimator
8
9
  __all__: list[str] = ['Base', 'Constrained', 'Diagonal', 'Gaussian', 'Isotropic', 'Robust', 'Unit', 'mEstimator']
@@ -14,47 +15,47 @@ class Base:
14
15
  class Constrained(Diagonal):
15
16
  @staticmethod
16
17
  @typing.overload
17
- def All(dim: int) -> Constrained:
18
+ def All(dim: typing.SupportsInt) -> Constrained:
18
19
  """
19
20
  Fully constrained variations.
20
21
  """
21
22
  @staticmethod
22
23
  @typing.overload
23
- def All(dim: int, mu: float) -> Constrained:
24
+ def All(dim: typing.SupportsInt, mu: typing.SupportsFloat) -> Constrained:
24
25
  """
25
26
  Fully constrained variations.
26
27
  """
27
28
  @staticmethod
28
29
  @typing.overload
29
- def MixedPrecisions(mu: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]], precisions: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> Constrained:
30
+ def MixedPrecisions(mu: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"], precisions: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> Constrained:
30
31
  """
31
32
  A diagonal noise model created by specifying a Vector of precisions, some of which might be inf.
32
33
  """
33
34
  @staticmethod
34
35
  @typing.overload
35
- def MixedPrecisions(precisions: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> Constrained:
36
+ def MixedPrecisions(precisions: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> Constrained:
36
37
  ...
37
38
  @staticmethod
38
39
  @typing.overload
39
- def MixedSigmas(mu: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]], sigmas: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> Constrained:
40
+ def MixedSigmas(mu: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"], sigmas: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> Constrained:
40
41
  """
41
42
  A diagonal noise model created by specifying a Vector of standard devations, some of which might be zero.
42
43
  """
43
44
  @staticmethod
44
45
  @typing.overload
45
- def MixedSigmas(m: float, sigmas: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> Constrained:
46
+ def MixedSigmas(m: typing.SupportsFloat, sigmas: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> Constrained:
46
47
  """
47
48
  A diagonal noise model created by specifying a Vector of standard devations, some of which might be zero.
48
49
  """
49
50
  @staticmethod
50
51
  @typing.overload
51
- def MixedVariances(mu: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]], variances: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> Constrained:
52
+ def MixedVariances(mu: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"], variances: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> Constrained:
52
53
  """
53
54
  A diagonal noise model created by specifying a Vector of standard devations, some of which might be zero.
54
55
  """
55
56
  @staticmethod
56
57
  @typing.overload
57
- def MixedVariances(variances: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> Constrained:
58
+ def MixedVariances(variances: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> Constrained:
58
59
  ...
59
60
  def __getstate__(self) -> tuple:
60
61
  ...
@@ -70,21 +71,21 @@ class Constrained(Diagonal):
70
71
  """
71
72
  class Diagonal(Gaussian):
72
73
  @staticmethod
73
- def Precisions(precisions: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]], smart: bool = True) -> Diagonal:
74
+ def Precisions(precisions: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"], smart: bool = True) -> Diagonal:
74
75
  """
75
76
  A diagonal noise model created by specifying a Vector of precisions, i.e.
76
77
 
77
78
  i.e. the diagonal of the information matrix, i.e., weights
78
79
  """
79
80
  @staticmethod
80
- def Sigmas(sigmas: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]], smart: bool = True) -> Diagonal:
81
+ def Sigmas(sigmas: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"], smart: bool = True) -> Diagonal:
81
82
  """
82
83
  A diagonal noise model created by specifying a Vector of sigmas, i.e.
83
84
 
84
85
  standard deviations, the diagonal of the square root covariance matrix.
85
86
  """
86
87
  @staticmethod
87
- def Variances(variances: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]], smart: bool = True) -> Diagonal:
88
+ def Variances(variances: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"], smart: bool = True) -> Diagonal:
88
89
  """
89
90
  A diagonal noise model created by specifying a Vector of variances, i.e.
90
91
 
@@ -92,7 +93,7 @@ class Diagonal(Gaussian):
92
93
  variances: A vector containing the variances of this noise model
93
94
  smart: check if can be simplified to derived class
94
95
  """
95
- def R(self) -> numpy.ndarray[tuple[M, N], numpy.dtype[numpy.float64]]:
96
+ def R(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, n]"]:
96
97
  """
97
98
  Return R itself, but note that Whiten(H) is cheaper than R*H.
98
99
  """
@@ -102,29 +103,29 @@ class Diagonal(Gaussian):
102
103
  ...
103
104
  def deserialize(self, serialized: str) -> None:
104
105
  ...
105
- def invsigmas(self) -> numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]:
106
+ def invsigmas(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, 1]"]:
106
107
  """
107
108
  Return sqrt precisions.
108
109
  """
109
- def precisions(self) -> numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]:
110
+ def precisions(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, 1]"]:
110
111
  """
111
112
  Return precisions.
112
113
  """
113
114
  def serialize(self) -> str:
114
115
  ...
115
- def sigmas(self) -> numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]:
116
+ def sigmas(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, 1]"]:
116
117
  """
117
118
  Calculate standard deviations.
118
119
  """
119
120
  class Gaussian(Base):
120
121
  @staticmethod
121
- def Covariance(R: numpy.ndarray[tuple[M, N], numpy.dtype[numpy.float64]], smart: bool = True) -> Gaussian:
122
+ def Covariance(R: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, n]"], smart: bool = True) -> Gaussian:
122
123
  ...
123
124
  @staticmethod
124
- def Information(R: numpy.ndarray[tuple[M, N], numpy.dtype[numpy.float64]], smart: bool = True) -> Gaussian:
125
+ def Information(R: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, n]"], smart: bool = True) -> Gaussian:
125
126
  ...
126
127
  @staticmethod
127
- def SqrtInformation(R: numpy.ndarray[tuple[M, N], numpy.dtype[numpy.float64]], smart: bool = True) -> Gaussian:
128
+ def SqrtInformation(R: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, n]"], smart: bool = True) -> Gaussian:
128
129
  """
129
130
  AGaussiannoise model created by specifying a square root information matrix.
130
131
 
@@ -132,11 +133,11 @@ class Gaussian(Base):
132
133
  R: The (upper-triangular) square root information matrix
133
134
  smart: check if can be simplified to derived class
134
135
  """
135
- def R(self) -> numpy.ndarray[tuple[M, N], numpy.dtype[numpy.float64]]:
136
+ def R(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, n]"]:
136
137
  """
137
138
  Return R itself, but note that Whiten(H) is cheaper than R*H.
138
139
  """
139
- def Whiten(self, H: numpy.ndarray[tuple[M, N], numpy.dtype[numpy.float64]]) -> numpy.ndarray[tuple[M, N], numpy.dtype[numpy.float64]]:
140
+ def Whiten(self, H: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, n]"]) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, n]"]:
140
141
  """
141
142
  Multiply a derivative with R (derivative of whiten) Equivalent to whitening each column of the input matrix.
142
143
  """
@@ -144,15 +145,15 @@ class Gaussian(Base):
144
145
  ...
145
146
  def __setstate__(self, arg0: tuple) -> None:
146
147
  ...
147
- def covariance(self) -> numpy.ndarray[tuple[M, N], numpy.dtype[numpy.float64]]:
148
+ def covariance(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, n]"]:
148
149
  """
149
150
  Compute covariance matrix.
150
151
  """
151
152
  def deserialize(self, serialized: str) -> None:
152
153
  ...
153
- def equals(self, expected: Base, tol: float) -> bool:
154
+ def equals(self, expected: Base, tol: typing.SupportsFloat) -> bool:
154
155
  ...
155
- def information(self) -> numpy.ndarray[tuple[M, N], numpy.dtype[numpy.float64]]:
156
+ def information(self) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, n]"]:
156
157
  """
157
158
  Compute information matrix.
158
159
  """
@@ -164,27 +165,27 @@ class Gaussian(Base):
164
165
  """
165
166
  def serialize(self) -> str:
166
167
  ...
167
- def unwhiten(self, v: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]:
168
+ def unwhiten(self, v: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, 1]"]:
168
169
  """
169
170
  Unwhiten an error vector.
170
171
  """
171
- def whiten(self, v: numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]) -> numpy.ndarray[tuple[M, typing.Literal[1]], numpy.dtype[numpy.float64]]:
172
+ def whiten(self, v: typing.Annotated[numpy.typing.ArrayLike, numpy.float64, "[m, 1]"]) -> typing.Annotated[numpy.typing.NDArray[numpy.float64], "[m, 1]"]:
172
173
  """
173
174
  Whiten an error vector.
174
175
  """
175
176
  class Isotropic(Diagonal):
176
177
  @staticmethod
177
- def Precision(dim: int, precision: float, smart: bool = True) -> Isotropic:
178
+ def Precision(dim: typing.SupportsInt, precision: typing.SupportsFloat, smart: bool = True) -> Isotropic:
178
179
  """
179
180
  An isotropic noise model created by specifying a precision.
180
181
  """
181
182
  @staticmethod
182
- def Sigma(dim: int, sigma: float, smart: bool = True) -> Isotropic:
183
+ def Sigma(dim: typing.SupportsInt, sigma: typing.SupportsFloat, smart: bool = True) -> Isotropic:
183
184
  """
184
185
  An isotropic noise model created by specifying a standard devation sigma.
185
186
  """
186
187
  @staticmethod
187
- def Variance(dim: int, varianace: float, smart: bool = True) -> Isotropic:
188
+ def Variance(dim: typing.SupportsInt, varianace: typing.SupportsFloat, smart: bool = True) -> Isotropic:
188
189
  ...
189
190
  def __getstate__(self) -> tuple:
190
191
  ...
@@ -214,7 +215,7 @@ class Robust(Base):
214
215
  ...
215
216
  class Unit(Isotropic):
216
217
  @staticmethod
217
- def Create(dim: int) -> Unit:
218
+ def Create(dim: typing.SupportsInt) -> Unit:
218
219
  """
219
220
  Create a unit covariance noise model.
220
221
  """
@@ -2,55 +2,54 @@
2
2
  mEstimator submodule
3
3
  """
4
4
  from __future__ import annotations
5
+ import collections.abc
5
6
  import typing
6
7
  __all__: list[str] = ['AsymmetricCauchy', 'AsymmetricTukey', 'Base', 'Cauchy', 'Custom', 'DCS', 'Fair', 'GemanMcClure', 'Huber', 'L2WithDeadZone', 'Null', 'Tukey', 'Welsch']
7
- M = typing.TypeVar("M", bound=int)
8
- N = typing.TypeVar("N", bound=int)
9
8
  class AsymmetricCauchy(Base):
10
9
  @staticmethod
11
- def Create(k: float) -> AsymmetricCauchy:
10
+ def Create(k: typing.SupportsFloat) -> AsymmetricCauchy:
12
11
  ...
13
12
  def __getstate__(self) -> tuple:
14
13
  ...
15
14
  @typing.overload
16
- def __init__(self, k: float) -> None:
15
+ def __init__(self, k: typing.SupportsFloat) -> None:
17
16
  ...
18
17
  @typing.overload
19
- def __init__(self, k: float, reweight: Base.ReweightScheme) -> None:
18
+ def __init__(self, k: typing.SupportsFloat, reweight: Base.ReweightScheme) -> None:
20
19
  ...
21
20
  def __setstate__(self, arg0: tuple) -> None:
22
21
  ...
23
22
  def deserialize(self, serialized: str) -> None:
24
23
  ...
25
- def loss(self, error: float) -> float:
24
+ def loss(self, error: typing.SupportsFloat) -> float:
26
25
  ...
27
26
  def serialize(self) -> str:
28
27
  ...
29
- def weight(self, error: float) -> float:
28
+ def weight(self, error: typing.SupportsFloat) -> float:
30
29
  """
31
30
  produce a weight vector according to an error vector and the implemented robust function
32
31
  """
33
32
  class AsymmetricTukey(Base):
34
33
  @staticmethod
35
- def Create(k: float) -> AsymmetricTukey:
34
+ def Create(k: typing.SupportsFloat) -> AsymmetricTukey:
36
35
  ...
37
36
  def __getstate__(self) -> tuple:
38
37
  ...
39
38
  @typing.overload
40
- def __init__(self, k: float) -> None:
39
+ def __init__(self, k: typing.SupportsFloat) -> None:
41
40
  ...
42
41
  @typing.overload
43
- def __init__(self, k: float, reweight: Base.ReweightScheme) -> None:
42
+ def __init__(self, k: typing.SupportsFloat, reweight: Base.ReweightScheme) -> None:
44
43
  ...
45
44
  def __setstate__(self, arg0: tuple) -> None:
46
45
  ...
47
46
  def deserialize(self, serialized: str) -> None:
48
47
  ...
49
- def loss(self, error: float) -> float:
48
+ def loss(self, error: typing.SupportsFloat) -> float:
50
49
  ...
51
50
  def serialize(self) -> str:
52
51
  ...
53
- def weight(self, error: float) -> float:
52
+ def weight(self, error: typing.SupportsFloat) -> float:
54
53
  """
55
54
  produce a weight vector according to an error vector and the implemented robust function
56
55
  """
@@ -80,7 +79,7 @@ class Base:
80
79
  ...
81
80
  def __index__(self) -> int:
82
81
  ...
83
- def __init__(self, value: int) -> None:
82
+ def __init__(self, value: typing.SupportsInt) -> None:
84
83
  ...
85
84
  def __int__(self) -> int:
86
85
  ...
@@ -102,7 +101,7 @@ class Base:
102
101
  ...
103
102
  def __rxor__(self, other: typing.Any) -> typing.Any:
104
103
  ...
105
- def __setstate__(self, state: int) -> None:
104
+ def __setstate__(self, state: typing.SupportsInt) -> None:
106
105
  ...
107
106
  def __str__(self) -> str:
108
107
  ...
@@ -120,157 +119,157 @@ class Base:
120
119
  ...
121
120
  class Cauchy(Base):
122
121
  @staticmethod
123
- def Create(k: float) -> Cauchy:
122
+ def Create(k: typing.SupportsFloat) -> Cauchy:
124
123
  ...
125
124
  def __getstate__(self) -> tuple:
126
125
  ...
127
126
  @typing.overload
128
- def __init__(self, k: float) -> None:
127
+ def __init__(self, k: typing.SupportsFloat) -> None:
129
128
  ...
130
129
  @typing.overload
131
- def __init__(self, k: float, reweight: Base.ReweightScheme) -> None:
130
+ def __init__(self, k: typing.SupportsFloat, reweight: Base.ReweightScheme) -> None:
132
131
  ...
133
132
  def __setstate__(self, arg0: tuple) -> None:
134
133
  ...
135
134
  def deserialize(self, serialized: str) -> None:
136
135
  ...
137
- def loss(self, error: float) -> float:
136
+ def loss(self, error: typing.SupportsFloat) -> float:
138
137
  ...
139
138
  def serialize(self) -> str:
140
139
  ...
141
- def weight(self, error: float) -> float:
140
+ def weight(self, error: typing.SupportsFloat) -> float:
142
141
  """
143
142
  produce a weight vector according to an error vector and the implemented robust function
144
143
  """
145
144
  class Custom(Base):
146
145
  @staticmethod
147
- def Create(weight: typing.Callable[[float], float], loss: typing.Callable[[float], float], reweight: Base.ReweightScheme, name: str) -> Custom:
146
+ def Create(weight: collections.abc.Callable[[typing.SupportsFloat], float], loss: collections.abc.Callable[[typing.SupportsFloat], float], reweight: Base.ReweightScheme, name: str) -> Custom:
148
147
  ...
149
148
  def __getstate__(self) -> tuple:
150
149
  ...
151
- def __init__(self, weight: typing.Callable[[float], float], loss: typing.Callable[[float], float], reweight: Base.ReweightScheme, name: str) -> None:
150
+ def __init__(self, weight: collections.abc.Callable[[typing.SupportsFloat], float], loss: collections.abc.Callable[[typing.SupportsFloat], float], reweight: Base.ReweightScheme, name: str) -> None:
152
151
  ...
153
152
  def __setstate__(self, arg0: tuple) -> None:
154
153
  ...
155
154
  def deserialize(self, serialized: str) -> None:
156
155
  ...
157
- def loss(self, error: float) -> float:
156
+ def loss(self, error: typing.SupportsFloat) -> float:
158
157
  ...
159
158
  def serialize(self) -> str:
160
159
  ...
161
- def weight(self, error: float) -> float:
160
+ def weight(self, error: typing.SupportsFloat) -> float:
162
161
  """
163
162
  produce a weight vector according to an error vector and the implemented robust function
164
163
  """
165
164
  class DCS(Base):
166
165
  @staticmethod
167
- def Create(c: float) -> DCS:
166
+ def Create(c: typing.SupportsFloat) -> DCS:
168
167
  ...
169
168
  def __getstate__(self) -> tuple:
170
169
  ...
171
170
  @typing.overload
172
- def __init__(self, c: float) -> None:
171
+ def __init__(self, c: typing.SupportsFloat) -> None:
173
172
  ...
174
173
  @typing.overload
175
- def __init__(self, c: float, reweight: Base.ReweightScheme) -> None:
174
+ def __init__(self, c: typing.SupportsFloat, reweight: Base.ReweightScheme) -> None:
176
175
  ...
177
176
  def __setstate__(self, arg0: tuple) -> None:
178
177
  ...
179
178
  def deserialize(self, serialized: str) -> None:
180
179
  ...
181
- def loss(self, error: float) -> float:
180
+ def loss(self, error: typing.SupportsFloat) -> float:
182
181
  ...
183
182
  def serialize(self) -> str:
184
183
  ...
185
- def weight(self, error: float) -> float:
184
+ def weight(self, error: typing.SupportsFloat) -> float:
186
185
  """
187
186
  produce a weight vector according to an error vector and the implemented robust function
188
187
  """
189
188
  class Fair(Base):
190
189
  @staticmethod
191
- def Create(c: float) -> Fair:
190
+ def Create(c: typing.SupportsFloat) -> Fair:
192
191
  ...
193
192
  def __getstate__(self) -> tuple:
194
193
  ...
195
- def __init__(self, c: float) -> None:
194
+ def __init__(self, c: typing.SupportsFloat) -> None:
196
195
  ...
197
196
  def __setstate__(self, arg0: tuple) -> None:
198
197
  ...
199
198
  def deserialize(self, serialized: str) -> None:
200
199
  ...
201
- def loss(self, error: float) -> float:
200
+ def loss(self, error: typing.SupportsFloat) -> float:
202
201
  ...
203
202
  def serialize(self) -> str:
204
203
  ...
205
- def weight(self, error: float) -> float:
204
+ def weight(self, error: typing.SupportsFloat) -> float:
206
205
  """
207
206
  produce a weight vector according to an error vector and the implemented robust function
208
207
  """
209
208
  class GemanMcClure(Base):
210
209
  @staticmethod
211
- def Create(c: float) -> GemanMcClure:
210
+ def Create(c: typing.SupportsFloat) -> GemanMcClure:
212
211
  ...
213
212
  def __getstate__(self) -> tuple:
214
213
  ...
215
214
  @typing.overload
216
- def __init__(self, c: float) -> None:
215
+ def __init__(self, c: typing.SupportsFloat) -> None:
217
216
  ...
218
217
  @typing.overload
219
- def __init__(self, c: float, reweight: Base.ReweightScheme) -> None:
218
+ def __init__(self, c: typing.SupportsFloat, reweight: Base.ReweightScheme) -> None:
220
219
  ...
221
220
  def __setstate__(self, arg0: tuple) -> None:
222
221
  ...
223
222
  def deserialize(self, serialized: str) -> None:
224
223
  ...
225
- def loss(self, error: float) -> float:
224
+ def loss(self, error: typing.SupportsFloat) -> float:
226
225
  ...
227
226
  def serialize(self) -> str:
228
227
  ...
229
- def weight(self, error: float) -> float:
228
+ def weight(self, error: typing.SupportsFloat) -> float:
230
229
  """
231
230
  produce a weight vector according to an error vector and the implemented robust function
232
231
  """
233
232
  class Huber(Base):
234
233
  @staticmethod
235
- def Create(k: float) -> Huber:
234
+ def Create(k: typing.SupportsFloat) -> Huber:
236
235
  ...
237
236
  def __getstate__(self) -> tuple:
238
237
  ...
239
- def __init__(self, k: float) -> None:
238
+ def __init__(self, k: typing.SupportsFloat) -> None:
240
239
  ...
241
240
  def __setstate__(self, arg0: tuple) -> None:
242
241
  ...
243
242
  def deserialize(self, serialized: str) -> None:
244
243
  ...
245
- def loss(self, error: float) -> float:
244
+ def loss(self, error: typing.SupportsFloat) -> float:
246
245
  ...
247
246
  def serialize(self) -> str:
248
247
  ...
249
- def weight(self, error: float) -> float:
248
+ def weight(self, error: typing.SupportsFloat) -> float:
250
249
  """
251
250
  produce a weight vector according to an error vector and the implemented robust function
252
251
  """
253
252
  class L2WithDeadZone(Base):
254
253
  @staticmethod
255
- def Create(k: float) -> L2WithDeadZone:
254
+ def Create(k: typing.SupportsFloat) -> L2WithDeadZone:
256
255
  ...
257
256
  def __getstate__(self) -> tuple:
258
257
  ...
259
258
  @typing.overload
260
- def __init__(self, k: float) -> None:
259
+ def __init__(self, k: typing.SupportsFloat) -> None:
261
260
  ...
262
261
  @typing.overload
263
- def __init__(self, k: float, reweight: Base.ReweightScheme) -> None:
262
+ def __init__(self, k: typing.SupportsFloat, reweight: Base.ReweightScheme) -> None:
264
263
  ...
265
264
  def __setstate__(self, arg0: tuple) -> None:
266
265
  ...
267
266
  def deserialize(self, serialized: str) -> None:
268
267
  ...
269
- def loss(self, error: float) -> float:
268
+ def loss(self, error: typing.SupportsFloat) -> float:
270
269
  ...
271
270
  def serialize(self) -> str:
272
271
  ...
273
- def weight(self, error: float) -> float:
272
+ def weight(self, error: typing.SupportsFloat) -> float:
274
273
  """
275
274
  produce a weight vector according to an error vector and the implemented robust function
276
275
  """
@@ -286,59 +285,59 @@ class Null(Base):
286
285
  ...
287
286
  def deserialize(self, serialized: str) -> None:
288
287
  ...
289
- def loss(self, error: float) -> float:
288
+ def loss(self, error: typing.SupportsFloat) -> float:
290
289
  ...
291
290
  def serialize(self) -> str:
292
291
  ...
293
- def weight(self, error: float) -> float:
292
+ def weight(self, error: typing.SupportsFloat) -> float:
294
293
  """
295
294
  produce a weight vector according to an error vector and the implemented robust function
296
295
  """
297
296
  class Tukey(Base):
298
297
  @staticmethod
299
- def Create(k: float) -> Tukey:
298
+ def Create(k: typing.SupportsFloat) -> Tukey:
300
299
  ...
301
300
  def __getstate__(self) -> tuple:
302
301
  ...
303
302
  @typing.overload
304
- def __init__(self, k: float) -> None:
303
+ def __init__(self, k: typing.SupportsFloat) -> None:
305
304
  ...
306
305
  @typing.overload
307
- def __init__(self, k: float, reweight: Base.ReweightScheme) -> None:
306
+ def __init__(self, k: typing.SupportsFloat, reweight: Base.ReweightScheme) -> None:
308
307
  ...
309
308
  def __setstate__(self, arg0: tuple) -> None:
310
309
  ...
311
310
  def deserialize(self, serialized: str) -> None:
312
311
  ...
313
- def loss(self, error: float) -> float:
312
+ def loss(self, error: typing.SupportsFloat) -> float:
314
313
  ...
315
314
  def serialize(self) -> str:
316
315
  ...
317
- def weight(self, error: float) -> float:
316
+ def weight(self, error: typing.SupportsFloat) -> float:
318
317
  """
319
318
  produce a weight vector according to an error vector and the implemented robust function
320
319
  """
321
320
  class Welsch(Base):
322
321
  @staticmethod
323
- def Create(k: float) -> Welsch:
322
+ def Create(k: typing.SupportsFloat) -> Welsch:
324
323
  ...
325
324
  def __getstate__(self) -> tuple:
326
325
  ...
327
326
  @typing.overload
328
- def __init__(self, k: float) -> None:
327
+ def __init__(self, k: typing.SupportsFloat) -> None:
329
328
  ...
330
329
  @typing.overload
331
- def __init__(self, k: float, reweight: Base.ReweightScheme) -> None:
330
+ def __init__(self, k: typing.SupportsFloat, reweight: Base.ReweightScheme) -> None:
332
331
  ...
333
332
  def __setstate__(self, arg0: tuple) -> None:
334
333
  ...
335
334
  def deserialize(self, serialized: str) -> None:
336
335
  ...
337
- def loss(self, error: float) -> float:
336
+ def loss(self, error: typing.SupportsFloat) -> float:
338
337
  ...
339
338
  def serialize(self) -> str:
340
339
  ...
341
- def weight(self, error: float) -> float:
340
+ def weight(self, error: typing.SupportsFloat) -> float:
342
341
  """
343
342
  produce a weight vector according to an error vector and the implemented robust function
344
343
  """