copulas 0.12.4.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
copulas/__init__.py ADDED
@@ -0,0 +1,91 @@
1
+ """Top-level package for Copulas."""
2
+
3
+ __author__ = 'DataCebo, Inc.'
4
+ __email__ = 'info@sdv.dev'
5
+ __version__ = '0.12.4.dev3'
6
+
7
+ import sys
8
+ import warnings
9
+ from copy import deepcopy
10
+ from importlib.metadata import entry_points
11
+ from operator import attrgetter
12
+ from types import ModuleType
13
+
14
+
15
+ def _get_addon_target(addon_path_name):
16
+ """Find the target object for the add-on.
17
+
18
+ Args:
19
+ addon_path_name (str):
20
+ The add-on's name. The add-on's name should be the full path of valid Python
21
+ identifiers (i.e. importable.module:object.attr).
22
+
23
+ Returns:
24
+ tuple:
25
+ * object:
26
+ The base module or object the add-on should be added to.
27
+ * str:
28
+ The name the add-on should be added to under the module or object.
29
+ """
30
+ module_path, _, object_path = addon_path_name.partition(':')
31
+ module_path = module_path.split('.')
32
+
33
+ if module_path[0] != __name__:
34
+ msg = f"expected base module to be '{__name__}', found '{module_path[0]}'"
35
+ raise AttributeError(msg)
36
+
37
+ target_base = sys.modules[__name__]
38
+ for submodule in module_path[1:-1]:
39
+ target_base = getattr(target_base, submodule)
40
+
41
+ addon_name = module_path[-1]
42
+ if object_path:
43
+ if len(module_path) > 1 and not hasattr(target_base, module_path[-1]):
44
+ msg = f"cannot add '{object_path}' to unknown submodule '{'.'.join(module_path)}'"
45
+ raise AttributeError(msg)
46
+
47
+ if len(module_path) > 1:
48
+ target_base = getattr(target_base, module_path[-1])
49
+
50
+ split_object = object_path.split('.')
51
+ addon_name = split_object[-1]
52
+
53
+ if len(split_object) > 1:
54
+ target_base = attrgetter('.'.join(split_object[:-1]))(target_base)
55
+
56
+ return target_base, addon_name
57
+
58
+
59
+ def _find_addons():
60
+ """Find and load all copulas add-ons."""
61
+ group = 'copulas_modules'
62
+ try:
63
+ eps = entry_points(group=group)
64
+ except TypeError:
65
+ # Load-time selection requires Python >= 3.10 or importlib_metadata >= 3.6
66
+ eps = entry_points().get(group, [])
67
+
68
+ for entry_point in eps:
69
+ try:
70
+ addon = entry_point.load()
71
+ except Exception as e: # pylint: disable=broad-exception-caught
72
+ msg = f'Failed to load "{entry_point.name}" from "{entry_point.value}" with error:\n{e}'
73
+ warnings.warn(msg)
74
+ continue
75
+
76
+ try:
77
+ addon_target, addon_name = _get_addon_target(entry_point.name)
78
+ except AttributeError as error:
79
+ msg = f"Failed to set '{entry_point.name}': {error}."
80
+ warnings.warn(msg)
81
+ continue
82
+
83
+ if isinstance(addon, ModuleType):
84
+ addon_module_name = f'{addon_target.__name__}.{addon_name}'
85
+ if addon_module_name not in sys.modules:
86
+ sys.modules[addon_module_name] = addon
87
+
88
+ setattr(addon_target, addon_name, addon)
89
+
90
+
91
+ _find_addons()
@@ -0,0 +1,175 @@
1
+ """Bivariate copulas."""
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+ from copulas.utils import EPSILON
7
+ from copulas.bivariate.base import Bivariate, CopulaTypes
8
+ from copulas.bivariate.clayton import Clayton
9
+ from copulas.bivariate.frank import Frank
10
+ from copulas.bivariate.gumbel import Gumbel
11
+ from copulas.bivariate.utils import split_matrix
12
+
13
+ __all__ = (
14
+ 'Bivariate',
15
+ 'Clayton',
16
+ 'CopulaTypes',
17
+ 'Frank',
18
+ 'Gumbel',
19
+ )
20
+
21
+
22
+ COMPUTE_EMPIRICAL_STEPS = 50
23
+
24
+
25
+ def _compute_empirical(X):
26
+ """Compute empirical distribution.
27
+
28
+ Args:
29
+ X(numpy.array): Shape (n,2); Datapoints to compute the empirical(frequentist) copula.
30
+
31
+ Return:
32
+ tuple(list):
33
+
34
+ """
35
+ z_left = []
36
+ z_right = []
37
+ L = []
38
+ R = []
39
+
40
+ U, V = split_matrix(X)
41
+ N = len(U)
42
+ base = np.linspace(EPSILON, 1.0 - EPSILON, COMPUTE_EMPIRICAL_STEPS)
43
+ # See https://github.com/sdv-dev/Copulas/issues/45
44
+
45
+ for k in range(COMPUTE_EMPIRICAL_STEPS):
46
+ left = sum(np.logical_and(U <= base[k], V <= base[k])) / N
47
+ right = sum(np.logical_and(U >= base[k], V >= base[k])) / N
48
+
49
+ if left > 0:
50
+ z_left.append(base[k])
51
+ L.append(left / base[k] ** 2)
52
+
53
+ if right > 0:
54
+ z_right.append(base[k])
55
+ R.append(right / (1 - z_right[k]) ** 2)
56
+
57
+ return z_left, L, z_right, R
58
+
59
+
60
+ def _compute_tail(c, z):
61
+ r"""Compute upper concentration function for tail.
62
+
63
+ The upper tail concentration function is defined by:
64
+
65
+ .. math:: R(z) = \frac{[1 − 2z + C(z, z)]}{(1 − z)^{2}}
66
+
67
+ Args:
68
+ c(Iterable): Values of :math:`C(z,z)`.
69
+ z(Iterable): Values for the empirical copula.
70
+
71
+ Returns:
72
+ numpy.ndarray
73
+
74
+ """
75
+ return (1.0 - 2 * np.asarray(z) + c) / (np.power(1.0 - np.asarray(z), 2))
76
+
77
+
78
+ def _compute_candidates(copulas, left_tail, right_tail):
79
+ """Compute dependencies.
80
+
81
+ Args:
82
+ copulas(list[Bivariate]): Fitted instances of bivariate copulas.
83
+ z_left(list):
84
+ z_right(list):
85
+
86
+ Returns:
87
+ tuple[list]: Arrays of left and right dependencies for the empirical copula.
88
+
89
+
90
+ """
91
+ left = []
92
+ right = []
93
+
94
+ X_left = np.column_stack((left_tail, left_tail))
95
+ X_right = np.column_stack((right_tail, right_tail))
96
+
97
+ for copula in copulas:
98
+ left.append(copula.cumulative_distribution(X_left) / np.power(left_tail, 2))
99
+ right.append(_compute_tail(copula.cumulative_distribution(X_right), right_tail))
100
+
101
+ return left, right
102
+
103
+
104
+ def select_copula(X):
105
+ r"""Select best copula function based on likelihood.
106
+
107
+ Given out candidate copulas the procedure proposed for selecting the one
108
+ that best fit to a dataset of pairs :math:`\{(u_j, v_j )\}, j=1,2,...n` , is as follows:
109
+
110
+ 1. Estimate the most likely parameter :math:`\theta` of each copula candidate for the given
111
+ dataset.
112
+
113
+ 2. Construct :math:`R(z|\theta)`. Calculate the area under the tail for each of the copula
114
+ candidates.
115
+
116
+ 3. Compare the areas: :math:`a_u` achieved using empirical copula against the ones
117
+ achieved for the copula candidates. Score the outcome of the comparison from 3 (best)
118
+ down to 1 (worst).
119
+
120
+ 4. Proceed as in steps 2- 3 with the lower tail and function :math:`L`.
121
+
122
+ 5. Finally the sum of empirical upper and lower tail functions is compared against
123
+ :math:`R + L`. Scores of the three comparisons are summed and the candidate with the
124
+ highest value is selected.
125
+
126
+ Args:
127
+ X(np.ndarray): Matrix of shape (n,2).
128
+
129
+ Returns:
130
+ copula: Best copula that fits for it.
131
+
132
+ """
133
+ frank = Frank()
134
+ frank.fit(X)
135
+
136
+ if frank.tau <= 0:
137
+ return frank
138
+
139
+ copula_candidates = [frank]
140
+
141
+ # append copulas into the candidate list
142
+ for copula_class in [Clayton, Gumbel]:
143
+ try:
144
+ copula = copula_class()
145
+ copula.tau = frank.tau
146
+ copula._compute_theta()
147
+ copula_candidates.append(copula)
148
+ except ValueError:
149
+ pass
150
+
151
+ left_tail, empirical_left_aut, right_tail, empirical_right_aut = _compute_empirical(X)
152
+ candidate_left_auts, candidate_right_auts = _compute_candidates(
153
+ copula_candidates, left_tail, right_tail
154
+ )
155
+
156
+ empirical_aut = np.concatenate((empirical_left_aut, empirical_right_aut))
157
+ candidate_auts = [
158
+ np.concatenate((left, right))
159
+ for left, right in zip(candidate_left_auts, candidate_right_auts)
160
+ ]
161
+
162
+ # compute L2 distance from empirical distribution
163
+ diff_left = [np.sum((empirical_left_aut - left) ** 2) for left in candidate_left_auts]
164
+ diff_right = [np.sum((empirical_right_aut - right) ** 2) for right in candidate_right_auts]
165
+ diff_both = [np.sum((empirical_aut - candidate) ** 2) for candidate in candidate_auts]
166
+
167
+ # calcule ranks
168
+ score_left = pd.Series(diff_left).rank(ascending=False)
169
+ score_right = pd.Series(diff_right).rank(ascending=False)
170
+ score_both = pd.Series(diff_both).rank(ascending=False)
171
+
172
+ score = score_left + score_right + score_both
173
+
174
+ selected_copula = np.argmax(score.to_numpy())
175
+ return copula_candidates[selected_copula]
@@ -0,0 +1,448 @@
1
+ """This module contains a base class for bivariate copulas."""
2
+
3
+ import json
4
+ import warnings
5
+ from enum import Enum
6
+
7
+ import numpy as np
8
+ from scipy import stats
9
+ from scipy.optimize import brentq
10
+
11
+ from copulas.bivariate.utils import split_matrix
12
+ from copulas.errors import NotFittedError
13
+ from copulas.utils import EPSILON, random_state, validate_random_state
14
+
15
+
16
+ class CopulaTypes(Enum):
17
+ """Available copula families."""
18
+
19
+ CLAYTON = 0
20
+ FRANK = 1
21
+ GUMBEL = 2
22
+ INDEPENDENCE = 3
23
+
24
+
25
+ class Bivariate(object):
26
+ """Base class for bivariate copulas.
27
+
28
+ This class allows to instantiate all its subclasses and serves as a unique entry point for
29
+ the bivariate copulas classes.
30
+
31
+ >>> Bivariate(copula_type=CopulaTypes.FRANK).__class__
32
+ copulas.bivariate.frank.Frank
33
+
34
+ >>> Bivariate(copula_type='frank').__class__
35
+ copulas.bivariate.frank.Frank
36
+
37
+
38
+ Args:
39
+ copula_type (Union[CopulaType, str]): Subtype of the copula.
40
+ random_state (Union[int, np.random.RandomState, None]): Seed or RandomState
41
+ for the random generator.
42
+
43
+ Attributes:
44
+ copula_type(CopulaTypes): Family of the copula a subclass belongs to.
45
+ _subclasses(list[type]): List of declared subclasses.
46
+ theta_interval(list[float]): Interval of valid thetas for the given copula family.
47
+ invalid_thetas(list[float]): Values that, even though they belong to
48
+ :attr:`theta_interval`, shouldn't be considered valid.
49
+ tau (float): Kendall's tau for the data given at :meth:`fit`.
50
+ theta(float): Parameter for the copula.
51
+
52
+ """
53
+
54
+ copula_type = None
55
+ _subclasses = []
56
+ theta_interval = []
57
+ invalid_thetas = []
58
+ theta = None
59
+ tau = None
60
+
61
+ @classmethod
62
+ def _get_subclasses(cls):
63
+ """Find recursively subclasses for the current class object.
64
+
65
+ Returns:
66
+ list[Bivariate]: List of subclass objects.
67
+
68
+ """
69
+ subclasses = []
70
+ for subclass in cls.__subclasses__():
71
+ subclasses.append(subclass)
72
+ subclasses.extend(subclass._get_subclasses())
73
+
74
+ return subclasses
75
+
76
+ @classmethod
77
+ def subclasses(cls):
78
+ """Return a list of subclasses for the current class object.
79
+
80
+ Returns:
81
+ list[Bivariate]: Subclasses for given class.
82
+
83
+ """
84
+ if not cls._subclasses:
85
+ cls._subclasses = cls._get_subclasses()
86
+
87
+ return cls._subclasses
88
+
89
+ def __new__(cls, *args, **kwargs):
90
+ """Create and return a new object.
91
+
92
+ Returns:
93
+ Bivariate: New object.
94
+ """
95
+ copula_type = kwargs.get('copula_type', None)
96
+ if copula_type is None:
97
+ return super(Bivariate, cls).__new__(cls)
98
+
99
+ if not isinstance(copula_type, CopulaTypes):
100
+ if isinstance(copula_type, str) and copula_type.upper() in CopulaTypes.__members__:
101
+ copula_type = CopulaTypes[copula_type.upper()]
102
+ else:
103
+ raise ValueError(f'Invalid copula type {copula_type}')
104
+
105
+ for subclass in cls.subclasses():
106
+ if subclass.copula_type is copula_type:
107
+ return super(Bivariate, cls).__new__(subclass)
108
+
109
+ def __init__(self, copula_type=None, random_state=None):
110
+ """Initialize Bivariate object.
111
+
112
+ Args:
113
+ copula_type (CopulaType or str): Subtype of the copula.
114
+ random_state (int, np.random.RandomState, or None): Seed or RandomState
115
+ for the random generator.
116
+ """
117
+ self.random_state = validate_random_state(random_state)
118
+
119
+ def check_theta(self):
120
+ """Validate the computed theta against the copula specification.
121
+
122
+ This method is used to assert the computed theta is in the valid range for the copula.
123
+
124
+ Raises:
125
+ ValueError: If theta is not in :attr:`theta_interval` or is in :attr:`invalid_thetas`,
126
+
127
+ """
128
+ lower, upper = self.theta_interval
129
+ if (not lower <= self.theta <= upper) or (self.theta in self.invalid_thetas):
130
+ message = 'The computed theta value {} is out of limits for the given {} copula.'
131
+ raise ValueError(message.format(self.theta, self.copula_type.name))
132
+
133
+ def check_fit(self):
134
+ """Assert that the model is fit and the computed `theta` is valid.
135
+
136
+ Raises:
137
+ NotFittedError: if the model is not fitted.
138
+ ValueError: if the computed theta is invalid.
139
+
140
+ """
141
+ if not self.theta:
142
+ raise NotFittedError('This model is not fitted.')
143
+
144
+ self.check_theta()
145
+
146
+ def check_marginal(self, u):
147
+ """Check that the marginals are uniformly distributed.
148
+
149
+ Args:
150
+ u(np.ndarray): Array of datapoints with shape (n,).
151
+
152
+ Raises:
153
+ ValueError: If the data does not appear uniformly distributed.
154
+ """
155
+ if min(u) < 0.0 or max(u) > 1.0:
156
+ raise ValueError('Marginal value out of bounds.')
157
+
158
+ emperical_cdf = np.sort(u)
159
+ uniform_cdf = np.linspace(0.0, 1.0, num=len(u))
160
+ ks_statistic = max(np.abs(emperical_cdf - uniform_cdf))
161
+ if ks_statistic > 1.627 / np.sqrt(len(u)):
162
+ # KS test with significance level 0.01
163
+ warnings.warn('Data does not appear to be uniform.', category=RuntimeWarning)
164
+
165
+ def _compute_theta(self):
166
+ """Compute theta, validate it and assign it to self."""
167
+ self.theta = self.compute_theta()
168
+ self.check_theta()
169
+
170
+ def fit(self, X):
171
+ """Fit a model to the data updating the parameters.
172
+
173
+ Args:
174
+ X(np.ndarray): Array of datapoints with shape (n,2).
175
+
176
+ Return:
177
+ None
178
+ """
179
+ U, V = split_matrix(X)
180
+ self.check_marginal(U)
181
+ self.check_marginal(V)
182
+ self.tau = stats.kendalltau(U, V)[0]
183
+ if np.isnan(self.tau):
184
+ if len(np.unique(U)) == 1 or len(np.unique(V)) == 1:
185
+ raise ValueError('Constant column.')
186
+ raise ValueError('Unable to compute tau.')
187
+ self._compute_theta()
188
+
189
+ def to_dict(self):
190
+ """Return a `dict` with the parameters to replicate this object.
191
+
192
+ Returns:
193
+ dict: Parameters of the copula.
194
+
195
+ """
196
+ return {'copula_type': self.copula_type.name, 'theta': self.theta, 'tau': self.tau}
197
+
198
+ @classmethod
199
+ def from_dict(cls, copula_dict):
200
+ """Create a new instance from the given parameters.
201
+
202
+ Args:
203
+ copula_dict: `dict` with the parameters to replicate the copula.
204
+ Like the output of `Bivariate.to_dict`
205
+
206
+ Returns:
207
+ Bivariate: Instance of the copula defined on the parameters.
208
+
209
+ """
210
+ instance = cls(copula_type=copula_dict['copula_type'])
211
+ instance.theta = copula_dict['theta']
212
+ instance.tau = copula_dict['tau']
213
+ return instance
214
+
215
+ def infer(self, X):
216
+ """Take in subset of values and predicts the rest."""
217
+ raise NotImplementedError
218
+
219
+ def generator(self, t):
220
+ r"""Compute the generator function for Archimedian copulas.
221
+
222
+ The generator is a function
223
+ :math:`\psi: [0,1]\times\Theta \rightarrow [0, \infty)` # noqa: JS101
224
+
225
+ that given an Archimedian copula fulfills:
226
+ .. math:: C(u,v) = \psi^{-1}(\psi(u) + \psi(v))
227
+
228
+
229
+ In a more generic way:
230
+
231
+ .. math:: C(u_1, u_2, ..., u_n;\theta) = \psi^-1(\sum_0^n{\psi(u_i;\theta)}; \theta)
232
+
233
+ """
234
+ raise NotImplementedError
235
+
236
+ def probability_density(self, X):
237
+ r"""Compute probability density function for given copula family.
238
+
239
+ The probability density(pdf) for a given copula is defined as:
240
+
241
+ .. math:: c(U,V) = \frac{\partial^2 C(u,v)}{\partial v \partial u}
242
+
243
+ Args:
244
+ X(np.ndarray): Shape (n, 2).Datapoints to compute pdf.
245
+
246
+ Returns:
247
+ np.array: Probability density for the input values.
248
+
249
+ """
250
+ raise NotImplementedError
251
+
252
+ def log_probability_density(self, X):
253
+ """Return log probability density of model.
254
+
255
+ The log probability should be overridden with numerically stable
256
+ variants whenever possible.
257
+
258
+ Arguments:
259
+ X: `np.ndarray` of shape (n, 1).
260
+
261
+ Returns:
262
+ np.ndarray
263
+
264
+ """
265
+ return np.log(self.probability_density(X))
266
+
267
+ def pdf(self, X):
268
+ """Shortcut to :meth:`probability_density`."""
269
+ return self.probability_density(X)
270
+
271
+ def cumulative_distribution(self, X):
272
+ """Compute the cumulative distribution function for the copula, :math:`C(u, v)`.
273
+
274
+ Args:
275
+ X(np.ndarray):
276
+
277
+ Returns:
278
+ numpy.array: cumulative probability
279
+
280
+ """
281
+ raise NotImplementedError
282
+
283
+ def cdf(self, X):
284
+ """Shortcut to :meth:`cumulative_distribution`."""
285
+ return self.cumulative_distribution(X)
286
+
287
+ def percent_point(self, y, V):
288
+ """Compute the inverse of conditional cumulative distribution :math:`C(u|v)^{-1}`.
289
+
290
+ Args:
291
+ y: `np.ndarray` value of :math:`C(u|v)`.
292
+ v: `np.ndarray` given value of v.
293
+ """
294
+ self.check_fit()
295
+ result = []
296
+ for _y, _v in zip(y, V):
297
+
298
+ def f(u):
299
+ return self.partial_derivative_scalar(u, _v) - _y
300
+
301
+ minimum = brentq(f, EPSILON, 1.0)
302
+ if isinstance(minimum, np.ndarray):
303
+ minimum = minimum[0]
304
+
305
+ result.append(minimum)
306
+
307
+ return np.array(result)
308
+
309
+ def ppf(self, y, V):
310
+ """Shortcut to :meth:`percent_point`."""
311
+ return self.percent_point(y, V)
312
+
313
+ def partial_derivative(self, X):
314
+ r"""Compute partial derivative of cumulative distribution.
315
+
316
+ The partial derivative of the copula(CDF) is the conditional CDF.
317
+
318
+ .. math:: F(v|u) = \frac{\partial C(u,v)}{\partial u}
319
+
320
+ The base class provides a finite difference approximation of the
321
+ partial derivative of the CDF with respect to u.
322
+
323
+ Args:
324
+ X(np.ndarray)
325
+ y(float)
326
+
327
+ Returns:
328
+ np.ndarray
329
+
330
+ """
331
+ delta = -2 * (X[:, 1] > 0.5) + 1
332
+ delta = 0.0001 * delta
333
+ X_prime = X.copy()
334
+ X_prime[:, 1] += delta
335
+ f = self.cumulative_distribution(X)
336
+ f_prime = self.cumulative_distribution(X_prime)
337
+ return (f_prime - f) / delta
338
+
339
+ def partial_derivative_scalar(self, U, V):
340
+ """Compute partial derivative :math:`C(u|v)` of cumulative density of single values."""
341
+ self.check_fit()
342
+
343
+ X = np.column_stack((U, V))
344
+ return self.partial_derivative(X).item()
345
+
346
+ def set_random_state(self, random_state):
347
+ """Set the random state.
348
+
349
+ Args:
350
+ random_state (int, np.random.RandomState, or None): Seed or RandomState
351
+ for the random generator.
352
+ """
353
+ self.random_state = validate_random_state(random_state)
354
+
355
+ @random_state
356
+ def sample(self, n_samples):
357
+ """Generate specified `n_samples` of new data from model.
358
+
359
+ The sampled are generated using the inverse transform method `v~U[0,1],v~C^-1(u|v)`
360
+
361
+ Args:
362
+ n_samples (int): amount of samples to create.
363
+
364
+ Returns:
365
+ np.ndarray: Array of length `n_samples` with generated data from the model.
366
+
367
+ """
368
+ if self.tau > 1 or self.tau < -1:
369
+ raise ValueError('The range for correlation measure is [-1,1].')
370
+
371
+ v = np.random.uniform(0, 1, n_samples)
372
+ c = np.random.uniform(0, 1, n_samples)
373
+
374
+ u = self.percent_point(c, v)
375
+ return np.column_stack((u, v))
376
+
377
+ def compute_theta(self):
378
+ """Compute theta parameter using Kendall's tau."""
379
+ raise NotImplementedError
380
+
381
+ @classmethod
382
+ def select_copula(cls, X):
383
+ r"""Select best copula function based on likelihood.
384
+
385
+ Given out candidate copulas the procedure proposed for selecting the one
386
+ that best fit to a dataset of pairs :math:`\{(u_j, v_j )\}, j=1,2,...n` , is as follows:
387
+
388
+ 1. Estimate the most likely parameter :math:`\theta` of each copula candidate for the given
389
+ dataset.
390
+
391
+ 2. Construct :math:`R(z|\theta)`. Calculate the area under the tail for each of the copula
392
+ candidates.
393
+
394
+ 3. Compare the areas: :math:`a_u` achieved using empirical copula against the ones
395
+ achieved for the copula candidates. Score the outcome of the comparison from 3 (best)
396
+ down to 1 (worst).
397
+
398
+ 4. Proceed as in steps 2- 3 with the lower tail and function :math:`L`.
399
+
400
+ 5. Finally the sum of empirical upper and lower tail functions is compared against
401
+ :math:`R + L`. Scores of the three comparisons are summed and the candidate with the
402
+ highest value is selected.
403
+
404
+ Args:
405
+ X(np.ndarray): Matrix of shape (n,2).
406
+
407
+ Returns:
408
+ copula: Best copula that fits for it.
409
+
410
+ """
411
+ from copulas.bivariate import select_copula # noqa
412
+
413
+ warnings.warn(
414
+ '`Bivariate.select_copula` has been deprecated and will be removed in a later '
415
+ 'release. Please use `copulas.bivariate.select_copula` instead',
416
+ DeprecationWarning,
417
+ )
418
+ return select_copula(X)
419
+
420
+ def save(self, filename):
421
+ """Save the internal state of a copula in the specified filename.
422
+
423
+ Args:
424
+ filename(str): Path to save.
425
+
426
+ Returns:
427
+ None
428
+
429
+ """
430
+ content = self.to_dict()
431
+ with open(filename, 'w') as f:
432
+ json.dump(content, f)
433
+
434
+ @classmethod
435
+ def load(cls, copula_path):
436
+ """Create a new instance from a file.
437
+
438
+ Args:
439
+ copula_path(str): Path to file with the serialized copula.
440
+
441
+ Returns:
442
+ Bivariate: Instance with the parameters stored in the file.
443
+
444
+ """
445
+ with open(copula_path) as f:
446
+ copula_dict = json.load(f)
447
+
448
+ return cls.from_dict(copula_dict)