upgini 1.2.67__tar.gz → 1.2.68a3818.dev1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of upgini might be problematic. Click here for more details.
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/PKG-INFO +1 -1
- upgini-1.2.68a3818.dev1/src/upgini/__about__.py +1 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/date.py +15 -21
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/feature.py +5 -1
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/timeseries/cross.py +15 -7
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/timeseries/roll.py +2 -7
- upgini-1.2.68a3818.dev1/src/upgini/autofe/utils.py +83 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/features_enricher.py +141 -145
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/resource_bundle/strings.properties +1 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/search_task.py +7 -1
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/mstats.py +1 -1
- upgini-1.2.67/src/upgini/__about__.py +0 -1
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/.gitignore +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/LICENSE +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/README.md +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/pyproject.toml +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/ads.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/ads_management/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/ads_management/ads_manager.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/all_operators.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/binary.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/groupby.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/operator.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/timeseries/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/timeseries/base.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/timeseries/delta.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/timeseries/lag.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/timeseries/trend.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/timeseries/volatility.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/unary.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/autofe/vector.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/data_source/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/data_source/data_source_publisher.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/dataset.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/errors.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/http.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/lazy_import.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/mdc/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/mdc/context.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/metadata.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/metrics.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/normalizer/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/normalizer/normalize_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/resource_bundle/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/resource_bundle/exceptions.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/resource_bundle/strings_widget.properties +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/sampler/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/sampler/base.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/sampler/random_under_sampler.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/sampler/utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/spinner.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/Roboto-Regular.ttf +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/__init__.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/base_search_key_detector.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/blocked_time_series.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/country_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/custom_loss_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/cv_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/datetime_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/deduplicate_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/display_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/email_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/fallback_progress_bar.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/feature_info.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/features_validator.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/format.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/ip_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/phone_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/postal_code_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/progress_bar.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/sklearn_ext.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/sort.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/target_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/track_info.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/ts_utils.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/utils/warning_counter.py +0 -0
- {upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/version_validator.py +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "1.2.68a3818.dev1"
|
|
@@ -8,6 +8,7 @@ from pandas.core.arrays.timedeltas import TimedeltaArray
|
|
|
8
8
|
from pydantic import BaseModel, __version__ as pydantic_version
|
|
9
9
|
|
|
10
10
|
from upgini.autofe.operator import PandasOperator, ParametrizedOperator
|
|
11
|
+
from upgini.autofe.utils import pydantic_validator
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
def get_pydantic_version():
|
|
@@ -209,6 +210,14 @@ class DateListDiffBounded(DateListDiff, ParametrizedOperator):
|
|
|
209
210
|
|
|
210
211
|
return cls(diff_unit=diff_unit, lower_bound=lower_bound, upper_bound=upper_bound, aggregation=aggregation)
|
|
211
212
|
|
|
213
|
+
def get_params(self) -> Dict[str, Optional[str]]:
|
|
214
|
+
res = super().get_params()
|
|
215
|
+
if self.lower_bound is not None:
|
|
216
|
+
res["lower_bound"] = str(self.lower_bound)
|
|
217
|
+
if self.upper_bound is not None:
|
|
218
|
+
res["upper_bound"] = str(self.upper_bound)
|
|
219
|
+
return res
|
|
220
|
+
|
|
212
221
|
def _agg(self, x):
|
|
213
222
|
x = x[
|
|
214
223
|
(x >= (self.lower_bound if self.lower_bound is not None else -np.inf))
|
|
@@ -269,32 +278,17 @@ class DatePercentile(DatePercentileBase):
|
|
|
269
278
|
{
|
|
270
279
|
"zero_month": self.zero_month,
|
|
271
280
|
"zero_year": self.zero_year,
|
|
272
|
-
"zero_bounds": self.zero_bounds,
|
|
281
|
+
"zero_bounds": json.dumps(self.zero_bounds),
|
|
273
282
|
"step": self.step,
|
|
274
283
|
}
|
|
275
284
|
)
|
|
276
285
|
return res
|
|
277
286
|
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
@field_validator("zero_bounds", mode="before")
|
|
284
|
-
def parse_zero_bounds(cls, value):
|
|
285
|
-
if isinstance(value, str):
|
|
286
|
-
return json.loads(value)
|
|
287
|
-
return value
|
|
288
|
-
|
|
289
|
-
else:
|
|
290
|
-
# Use @validator for Pydantic 1.x
|
|
291
|
-
from pydantic import validator
|
|
292
|
-
|
|
293
|
-
@validator("zero_bounds", pre=True)
|
|
294
|
-
def parse_zero_bounds(cls, value):
|
|
295
|
-
if isinstance(value, str):
|
|
296
|
-
return json.loads(value)
|
|
297
|
-
return value
|
|
287
|
+
@pydantic_validator("zero_bounds", mode="before")
|
|
288
|
+
def parse_zero_bounds(cls, value):
|
|
289
|
+
if isinstance(value, str):
|
|
290
|
+
return json.loads(value)
|
|
291
|
+
return value
|
|
298
292
|
|
|
299
293
|
def _get_bounds(self, date_col: pd.Series) -> pd.Series:
|
|
300
294
|
months = date_col.dt.month
|
|
@@ -112,7 +112,11 @@ class Feature:
|
|
|
112
112
|
|
|
113
113
|
def get_hash(self) -> str:
|
|
114
114
|
return hashlib.sha256(
|
|
115
|
-
"_".join(
|
|
115
|
+
"_".join(
|
|
116
|
+
[self.op.get_hash_component()]
|
|
117
|
+
+ [ch.op.get_hash_component() for ch in self.children if isinstance(ch, Feature)]
|
|
118
|
+
+ [ch.get_display_name() for ch in self.children]
|
|
119
|
+
).encode("utf-8")
|
|
116
120
|
).hexdigest()[:8]
|
|
117
121
|
|
|
118
122
|
def set_alias(self, alias: str) -> "Feature":
|
|
@@ -1,16 +1,13 @@
|
|
|
1
|
+
import json
|
|
1
2
|
from typing import Dict, List, Optional
|
|
2
3
|
|
|
3
4
|
import numpy as np
|
|
4
5
|
import pandas as pd
|
|
5
6
|
|
|
6
|
-
try:
|
|
7
|
-
from pydantic import field_validator as validator # V2
|
|
8
|
-
except ImportError:
|
|
9
|
-
from pydantic import validator # V1
|
|
10
|
-
|
|
11
7
|
from upgini.autofe.all_operators import find_op
|
|
12
8
|
from upgini.autofe.operator import PandasOperator, ParametrizedOperator
|
|
13
9
|
from upgini.autofe.timeseries.base import TimeSeriesBase
|
|
10
|
+
from upgini.autofe.utils import pydantic_validator
|
|
14
11
|
|
|
15
12
|
|
|
16
13
|
class CrossSeriesInteraction(TimeSeriesBase, ParametrizedOperator):
|
|
@@ -20,13 +17,24 @@ class CrossSeriesInteraction(TimeSeriesBase, ParametrizedOperator):
|
|
|
20
17
|
left_descriptor: List[str] = []
|
|
21
18
|
right_descriptor: List[str] = []
|
|
22
19
|
|
|
23
|
-
@
|
|
24
|
-
@classmethod
|
|
20
|
+
@pydantic_validator("descriptor_indices")
|
|
25
21
|
def validate_descriptor_indices(cls, v):
|
|
26
22
|
if not v:
|
|
27
23
|
raise ValueError("descriptor_indices cannot be empty for CrossSeriesInteraction")
|
|
28
24
|
return v
|
|
29
25
|
|
|
26
|
+
@pydantic_validator("left_descriptor", "right_descriptor", mode="before")
|
|
27
|
+
def parse_descriptors(cls, v):
|
|
28
|
+
if isinstance(v, str):
|
|
29
|
+
return json.loads(v)
|
|
30
|
+
return v
|
|
31
|
+
|
|
32
|
+
@pydantic_validator("interaction_op", mode="before")
|
|
33
|
+
def validate_interaction_op(cls, v):
|
|
34
|
+
if isinstance(v, str):
|
|
35
|
+
return find_op(v)
|
|
36
|
+
return v
|
|
37
|
+
|
|
30
38
|
def __init__(self, **data):
|
|
31
39
|
super().__init__(**data)
|
|
32
40
|
indices = self.descriptor_indices
|
|
@@ -3,6 +3,7 @@ from typing import Dict, Optional
|
|
|
3
3
|
|
|
4
4
|
from upgini.autofe.operator import ParametrizedOperator
|
|
5
5
|
from upgini.autofe.timeseries.base import TimeSeriesBase
|
|
6
|
+
from upgini.autofe.utils import pydantic_validator
|
|
6
7
|
|
|
7
8
|
# Roll aggregation functions
|
|
8
9
|
roll_aggregations = {
|
|
@@ -12,19 +13,13 @@ roll_aggregations = {
|
|
|
12
13
|
"iqr": lambda x: x.quantile(0.75) - x.quantile(0.25),
|
|
13
14
|
}
|
|
14
15
|
|
|
15
|
-
try:
|
|
16
|
-
from pydantic import field_validator as validator # V2
|
|
17
|
-
except ImportError:
|
|
18
|
-
from pydantic import validator # V1
|
|
19
|
-
|
|
20
16
|
|
|
21
17
|
class Roll(TimeSeriesBase, ParametrizedOperator):
|
|
22
18
|
aggregation: str
|
|
23
19
|
window_size: int = 1
|
|
24
20
|
window_unit: str = "D"
|
|
25
21
|
|
|
26
|
-
@
|
|
27
|
-
@classmethod
|
|
22
|
+
@pydantic_validator("window_unit")
|
|
28
23
|
def validate_window_unit(cls, v: str) -> str:
|
|
29
24
|
try:
|
|
30
25
|
pd.tseries.frequencies.to_offset(v)
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility functions for autofe module.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import functools
|
|
6
|
+
from typing import Callable
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_pydantic_version():
|
|
10
|
+
"""
|
|
11
|
+
Get the major version of pydantic.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
int: Major version number (1 or 2)
|
|
15
|
+
"""
|
|
16
|
+
try:
|
|
17
|
+
from pydantic import __version__ as pydantic_version
|
|
18
|
+
|
|
19
|
+
major_version = int(pydantic_version.split(".")[0])
|
|
20
|
+
return major_version
|
|
21
|
+
except (ImportError, ValueError):
|
|
22
|
+
# Default to version 1 if unable to determine
|
|
23
|
+
return 1
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def pydantic_validator(field_name: str, *fields, mode: str = "before", **kwargs):
|
|
27
|
+
"""
|
|
28
|
+
A decorator that applies the appropriate Pydantic validator based on the installed version.
|
|
29
|
+
|
|
30
|
+
This decorator handles the differences between Pydantic v1 and v2 validator syntax,
|
|
31
|
+
making it easier to write code that works with both versions.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
field_name (str): The name of the field to validate
|
|
35
|
+
mode (str): The validation mode, either "before" or "after" (for Pydantic v2)
|
|
36
|
+
**kwargs: Additional arguments to pass to the validator
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Callable: A decorator that can be applied to validator methods
|
|
40
|
+
|
|
41
|
+
Example:
|
|
42
|
+
```python
|
|
43
|
+
class MyModel(BaseModel):
|
|
44
|
+
items: List[int]
|
|
45
|
+
|
|
46
|
+
@pydantic_validator("items")
|
|
47
|
+
def parse_items(cls, value):
|
|
48
|
+
if isinstance(value, str):
|
|
49
|
+
return [int(x) for x in value.split(",")]
|
|
50
|
+
return value
|
|
51
|
+
```
|
|
52
|
+
"""
|
|
53
|
+
pydantic_version = get_pydantic_version()
|
|
54
|
+
|
|
55
|
+
if pydantic_version >= 2:
|
|
56
|
+
# Use field_validator for Pydantic 2.x
|
|
57
|
+
from pydantic import field_validator
|
|
58
|
+
|
|
59
|
+
def decorator(func: Callable) -> Callable:
|
|
60
|
+
@field_validator(field_name, *fields, mode=mode, **kwargs)
|
|
61
|
+
@functools.wraps(func)
|
|
62
|
+
def wrapper(cls, value, **kw):
|
|
63
|
+
return func(cls, value)
|
|
64
|
+
|
|
65
|
+
return wrapper
|
|
66
|
+
|
|
67
|
+
return decorator
|
|
68
|
+
else:
|
|
69
|
+
# Use validator for Pydantic 1.x
|
|
70
|
+
from pydantic import validator
|
|
71
|
+
|
|
72
|
+
# Map mode to Pydantic v1 parameters
|
|
73
|
+
pre = True if mode == "before" else False
|
|
74
|
+
|
|
75
|
+
def decorator(func: Callable) -> Callable:
|
|
76
|
+
@validator(field_name, *fields, pre=pre, **kwargs)
|
|
77
|
+
@functools.wraps(func)
|
|
78
|
+
def wrapper(cls, value, **kw):
|
|
79
|
+
return func(cls, value)
|
|
80
|
+
|
|
81
|
+
return wrapper
|
|
82
|
+
|
|
83
|
+
return decorator
|
|
@@ -702,6 +702,7 @@ class FeaturesEnricher(TransformerMixin):
|
|
|
702
702
|
def transform(
|
|
703
703
|
self,
|
|
704
704
|
X: pd.DataFrame,
|
|
705
|
+
y: Optional[pd.Series] = None,
|
|
705
706
|
*args,
|
|
706
707
|
exclude_features_sources: Optional[List[str]] = None,
|
|
707
708
|
keep_input: bool = True,
|
|
@@ -766,6 +767,7 @@ class FeaturesEnricher(TransformerMixin):
|
|
|
766
767
|
result, _, _ = self.__inner_transform(
|
|
767
768
|
trace_id,
|
|
768
769
|
X,
|
|
770
|
+
y=y,
|
|
769
771
|
exclude_features_sources=exclude_features_sources,
|
|
770
772
|
importance_threshold=importance_threshold,
|
|
771
773
|
max_features=max_features,
|
|
@@ -1682,7 +1684,6 @@ class FeaturesEnricher(TransformerMixin):
|
|
|
1682
1684
|
validated_X,
|
|
1683
1685
|
validated_y,
|
|
1684
1686
|
eval_set,
|
|
1685
|
-
is_demo_dataset,
|
|
1686
1687
|
exclude_features_sources,
|
|
1687
1688
|
trace_id,
|
|
1688
1689
|
progress_bar,
|
|
@@ -1873,158 +1874,147 @@ class FeaturesEnricher(TransformerMixin):
|
|
|
1873
1874
|
validated_X: pd.DataFrame,
|
|
1874
1875
|
validated_y: pd.Series,
|
|
1875
1876
|
eval_set: Optional[List[tuple]],
|
|
1876
|
-
is_demo_dataset: bool,
|
|
1877
1877
|
exclude_features_sources: Optional[List[str]],
|
|
1878
1878
|
trace_id: str,
|
|
1879
1879
|
progress_bar: Optional[ProgressBar],
|
|
1880
1880
|
progress_callback: Optional[Callable[[SearchProgress], Any]],
|
|
1881
1881
|
) -> _SampledDataForMetrics:
|
|
1882
|
-
|
|
1883
|
-
if eval_set is not None:
|
|
1884
|
-
self.logger.info("Transform with eval_set")
|
|
1885
|
-
# concatenate X and eval_set with eval_set_index
|
|
1886
|
-
df = validated_X.copy()
|
|
1887
|
-
df[TARGET] = validated_y
|
|
1888
|
-
df[EVAL_SET_INDEX] = 0
|
|
1889
|
-
for idx, eval_pair in enumerate(eval_set):
|
|
1890
|
-
eval_x, eval_y = self._validate_eval_set_pair(validated_X, eval_pair)
|
|
1891
|
-
eval_df_with_index = eval_x.copy()
|
|
1892
|
-
eval_df_with_index[TARGET] = eval_y
|
|
1893
|
-
eval_df_with_index[EVAL_SET_INDEX] = idx + 1
|
|
1894
|
-
df = pd.concat([df, eval_df_with_index])
|
|
1895
|
-
|
|
1896
|
-
df, _ = clean_full_duplicates(df, logger=self.logger, bundle=self.bundle)
|
|
1897
|
-
|
|
1898
|
-
# downsample if need to eval_set threshold
|
|
1899
|
-
num_samples = _num_samples(df)
|
|
1900
|
-
force_downsampling = (
|
|
1901
|
-
not self.disable_force_downsampling
|
|
1902
|
-
and self.columns_for_online_api is not None
|
|
1903
|
-
and num_samples > Dataset.FORCE_SAMPLE_SIZE
|
|
1904
|
-
)
|
|
1905
|
-
# TODO: check that system_record_id was added before this step
|
|
1906
|
-
if force_downsampling:
|
|
1907
|
-
self.logger.info(f"Force downsampling from {num_samples} to {Dataset.FORCE_SAMPLE_SIZE}")
|
|
1908
|
-
df = balance_undersample_forced(
|
|
1909
|
-
df=df,
|
|
1910
|
-
target_column=TARGET,
|
|
1911
|
-
id_columns=self.id_columns,
|
|
1912
|
-
date_column=self._get_date_column(self.search_keys),
|
|
1913
|
-
task_type=self.model_task_type,
|
|
1914
|
-
cv_type=self.cv,
|
|
1915
|
-
random_state=self.random_state,
|
|
1916
|
-
sample_size=Dataset.FORCE_SAMPLE_SIZE,
|
|
1917
|
-
logger=self.logger,
|
|
1918
|
-
bundle=self.bundle,
|
|
1919
|
-
warning_callback=self.__log_warning,
|
|
1920
|
-
)
|
|
1921
|
-
elif num_samples > Dataset.FIT_SAMPLE_WITH_EVAL_SET_THRESHOLD:
|
|
1922
|
-
self.logger.info(f"Downsampling from {num_samples} to {Dataset.FIT_SAMPLE_WITH_EVAL_SET_ROWS}")
|
|
1923
|
-
df = df.sample(n=Dataset.FIT_SAMPLE_WITH_EVAL_SET_ROWS, random_state=self.random_state)
|
|
1882
|
+
has_eval_set = eval_set is not None
|
|
1924
1883
|
|
|
1925
|
-
|
|
1884
|
+
self.logger.info(f"Transform {'with' if has_eval_set else 'without'} eval_set")
|
|
1926
1885
|
|
|
1927
|
-
|
|
1928
|
-
|
|
1886
|
+
# Prepare
|
|
1887
|
+
df = self.__combine_train_and_eval_sets(validated_X, validated_y, eval_set)
|
|
1888
|
+
df, _ = clean_full_duplicates(df, logger=self.logger, bundle=self.bundle)
|
|
1889
|
+
df = self.__downsample_for_metrics(df)
|
|
1929
1890
|
|
|
1930
|
-
|
|
1931
|
-
trace_id,
|
|
1932
|
-
df,
|
|
1933
|
-
exclude_features_sources=exclude_features_sources,
|
|
1934
|
-
silent_mode=True,
|
|
1935
|
-
metrics_calculation=True,
|
|
1936
|
-
progress_bar=progress_bar,
|
|
1937
|
-
progress_callback=progress_callback,
|
|
1938
|
-
add_fit_system_record_id=True,
|
|
1939
|
-
target_name=tmp_target_name,
|
|
1940
|
-
)
|
|
1941
|
-
if enriched_df is None:
|
|
1942
|
-
return None
|
|
1891
|
+
# Transform
|
|
1943
1892
|
|
|
1944
|
-
|
|
1893
|
+
enriched_df, _, _ = self.__inner_transform(
|
|
1894
|
+
trace_id,
|
|
1895
|
+
X=df.drop(columns=[TARGET]),
|
|
1896
|
+
y=df[TARGET],
|
|
1897
|
+
exclude_features_sources=exclude_features_sources,
|
|
1898
|
+
silent_mode=True,
|
|
1899
|
+
metrics_calculation=True,
|
|
1900
|
+
progress_bar=progress_bar,
|
|
1901
|
+
progress_callback=progress_callback,
|
|
1902
|
+
add_fit_system_record_id=True,
|
|
1903
|
+
)
|
|
1904
|
+
if enriched_df is None:
|
|
1905
|
+
return None
|
|
1945
1906
|
|
|
1946
|
-
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1907
|
+
x_columns = [
|
|
1908
|
+
c
|
|
1909
|
+
for c in (validated_X.columns.tolist() + self.fit_generated_features + [SYSTEM_RECORD_ID])
|
|
1910
|
+
if c in enriched_df.columns
|
|
1911
|
+
]
|
|
1951
1912
|
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
enriched_X_columns = enriched_X.columns.tolist()
|
|
1913
|
+
X_sampled, y_sampled, enriched_X = self.__extract_train_data(enriched_df, x_columns)
|
|
1914
|
+
eval_set_sampled_dict = self.__extract_eval_data(
|
|
1915
|
+
enriched_df, x_columns, enriched_X.columns.tolist(), len(eval_set) if has_eval_set else 0
|
|
1916
|
+
)
|
|
1957
1917
|
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
enriched_eval_x = enriched_eval_xy[enriched_X_columns].copy()
|
|
1963
|
-
eval_set_sampled_dict[idx] = (eval_x_sampled, enriched_eval_x, eval_y_sampled)
|
|
1964
|
-
else:
|
|
1965
|
-
self.logger.info("Transform without eval_set")
|
|
1966
|
-
df = validated_X.copy()
|
|
1918
|
+
# Cache and return results
|
|
1919
|
+
return self.__cache_and_return_results(
|
|
1920
|
+
validated_X, validated_y, eval_set, X_sampled, y_sampled, enriched_X, eval_set_sampled_dict
|
|
1921
|
+
)
|
|
1967
1922
|
|
|
1968
|
-
|
|
1923
|
+
def __combine_train_and_eval_sets(
|
|
1924
|
+
self, validated_X: pd.DataFrame, validated_y: pd.Series, eval_set: Optional[List[tuple]]
|
|
1925
|
+
) -> pd.DataFrame:
|
|
1926
|
+
df = validated_X.copy()
|
|
1927
|
+
df[TARGET] = validated_y
|
|
1928
|
+
if eval_set is None:
|
|
1929
|
+
return df
|
|
1969
1930
|
|
|
1970
|
-
|
|
1931
|
+
df[EVAL_SET_INDEX] = 0
|
|
1971
1932
|
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
)
|
|
1933
|
+
for idx, eval_pair in enumerate(eval_set):
|
|
1934
|
+
eval_x, eval_y = self._validate_eval_set_pair(validated_X, eval_pair)
|
|
1935
|
+
eval_df_with_index = eval_x.copy()
|
|
1936
|
+
eval_df_with_index[TARGET] = eval_y
|
|
1937
|
+
eval_df_with_index[EVAL_SET_INDEX] = idx + 1
|
|
1938
|
+
df = pd.concat([df, eval_df_with_index])
|
|
1978
1939
|
|
|
1979
|
-
|
|
1980
|
-
self.logger.info(f"Force downsampling from {num_samples} to {Dataset.FORCE_SAMPLE_SIZE}")
|
|
1981
|
-
df = balance_undersample_forced(
|
|
1982
|
-
df=df,
|
|
1983
|
-
target_column=TARGET,
|
|
1984
|
-
id_columns=self.id_columns,
|
|
1985
|
-
date_column=self._get_date_column(self.search_keys),
|
|
1986
|
-
task_type=self.model_task_type,
|
|
1987
|
-
cv_type=self.cv,
|
|
1988
|
-
random_state=self.random_state,
|
|
1989
|
-
sample_size=Dataset.FORCE_SAMPLE_SIZE,
|
|
1990
|
-
logger=self.logger,
|
|
1991
|
-
bundle=self.bundle,
|
|
1992
|
-
warning_callback=self.__log_warning,
|
|
1993
|
-
)
|
|
1994
|
-
elif num_samples > Dataset.FIT_SAMPLE_THRESHOLD:
|
|
1995
|
-
self.logger.info(f"Downsampling from {num_samples} to {Dataset.FIT_SAMPLE_ROWS}")
|
|
1996
|
-
df = df.sample(n=Dataset.FIT_SAMPLE_ROWS, random_state=self.random_state)
|
|
1940
|
+
return df
|
|
1997
1941
|
|
|
1998
|
-
|
|
1999
|
-
|
|
1942
|
+
def __downsample_for_metrics(self, df: pd.DataFrame) -> pd.DataFrame:
|
|
1943
|
+
num_samples = _num_samples(df)
|
|
1944
|
+
force_downsampling = (
|
|
1945
|
+
not self.disable_force_downsampling
|
|
1946
|
+
and self.columns_for_online_api is not None
|
|
1947
|
+
and num_samples > Dataset.FORCE_SAMPLE_SIZE
|
|
1948
|
+
)
|
|
2000
1949
|
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
|
|
2008
|
-
|
|
2009
|
-
|
|
2010
|
-
|
|
1950
|
+
if force_downsampling:
|
|
1951
|
+
self.logger.info(f"Force downsampling from {num_samples} to {Dataset.FORCE_SAMPLE_SIZE}")
|
|
1952
|
+
return balance_undersample_forced(
|
|
1953
|
+
df=df,
|
|
1954
|
+
target_column=TARGET,
|
|
1955
|
+
id_columns=self.id_columns,
|
|
1956
|
+
date_column=self._get_date_column(self.search_keys),
|
|
1957
|
+
task_type=self.model_task_type,
|
|
1958
|
+
cv_type=self.cv,
|
|
1959
|
+
random_state=self.random_state,
|
|
1960
|
+
sample_size=Dataset.FORCE_SAMPLE_SIZE,
|
|
1961
|
+
logger=self.logger,
|
|
1962
|
+
bundle=self.bundle,
|
|
1963
|
+
warning_callback=self.__log_warning,
|
|
2011
1964
|
)
|
|
2012
|
-
|
|
2013
|
-
|
|
1965
|
+
elif num_samples > Dataset.FIT_SAMPLE_THRESHOLD:
|
|
1966
|
+
if EVAL_SET_INDEX in df.columns:
|
|
1967
|
+
threshold = Dataset.FIT_SAMPLE_WITH_EVAL_SET_THRESHOLD
|
|
1968
|
+
sample_size = Dataset.FIT_SAMPLE_WITH_EVAL_SET_ROWS
|
|
1969
|
+
else:
|
|
1970
|
+
threshold = Dataset.FIT_SAMPLE_THRESHOLD
|
|
1971
|
+
sample_size = Dataset.FIT_SAMPLE_ROWS
|
|
2014
1972
|
|
|
2015
|
-
|
|
1973
|
+
if num_samples > threshold:
|
|
1974
|
+
self.logger.info(f"Downsampling from {num_samples} to {sample_size}")
|
|
1975
|
+
return df.sample(n=sample_size, random_state=self.random_state)
|
|
2016
1976
|
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
|
|
2020
|
-
|
|
2021
|
-
|
|
1977
|
+
return df
|
|
1978
|
+
|
|
1979
|
+
def __extract_train_data(
|
|
1980
|
+
self, enriched_df: pd.DataFrame, x_columns: List[str]
|
|
1981
|
+
) -> Tuple[pd.DataFrame, pd.Series, pd.DataFrame]:
|
|
1982
|
+
if EVAL_SET_INDEX in enriched_df.columns:
|
|
1983
|
+
enriched_Xy = enriched_df.query(f"{EVAL_SET_INDEX} == 0")
|
|
1984
|
+
else:
|
|
1985
|
+
enriched_Xy = enriched_df
|
|
1986
|
+
X_sampled = enriched_Xy[x_columns].copy()
|
|
1987
|
+
y_sampled = enriched_Xy[TARGET].copy()
|
|
1988
|
+
enriched_X = enriched_Xy.drop(columns=[TARGET, EVAL_SET_INDEX], errors="ignore")
|
|
1989
|
+
return X_sampled, y_sampled, enriched_X
|
|
1990
|
+
|
|
1991
|
+
def __extract_eval_data(
|
|
1992
|
+
self, enriched_df: pd.DataFrame, x_columns: List[str], enriched_X_columns: List[str], eval_set_len: int
|
|
1993
|
+
) -> Dict[int, Tuple]:
|
|
1994
|
+
eval_set_sampled_dict = {}
|
|
2022
1995
|
|
|
2023
|
-
|
|
2024
|
-
|
|
2025
|
-
|
|
1996
|
+
for idx in range(eval_set_len):
|
|
1997
|
+
enriched_eval_xy = enriched_df.query(f"{EVAL_SET_INDEX} == {idx + 1}")
|
|
1998
|
+
eval_x_sampled = enriched_eval_xy[x_columns].copy()
|
|
1999
|
+
eval_y_sampled = enriched_eval_xy[TARGET].copy()
|
|
2000
|
+
enriched_eval_x = enriched_eval_xy[enriched_X_columns].copy()
|
|
2001
|
+
eval_set_sampled_dict[idx] = (eval_x_sampled, enriched_eval_x, eval_y_sampled)
|
|
2026
2002
|
|
|
2003
|
+
return eval_set_sampled_dict
|
|
2004
|
+
|
|
2005
|
+
def __cache_and_return_results(
|
|
2006
|
+
self,
|
|
2007
|
+
validated_X: pd.DataFrame,
|
|
2008
|
+
validated_y: pd.Series,
|
|
2009
|
+
eval_set: Optional[List[tuple]],
|
|
2010
|
+
X_sampled: pd.DataFrame,
|
|
2011
|
+
y_sampled: pd.Series,
|
|
2012
|
+
enriched_X: pd.DataFrame,
|
|
2013
|
+
eval_set_sampled_dict: Dict[int, Tuple],
|
|
2014
|
+
) -> _SampledDataForMetrics:
|
|
2027
2015
|
datasets_hash = hash_input(validated_X, validated_y, eval_set)
|
|
2016
|
+
columns_renaming = getattr(self, "fit_columns_renaming", {})
|
|
2017
|
+
|
|
2028
2018
|
self.__cached_sampled_datasets[datasets_hash] = (
|
|
2029
2019
|
X_sampled,
|
|
2030
2020
|
y_sampled,
|
|
@@ -2161,6 +2151,7 @@ if response.status_code == 200:
|
|
|
2161
2151
|
trace_id: str,
|
|
2162
2152
|
X: pd.DataFrame,
|
|
2163
2153
|
*,
|
|
2154
|
+
y: Optional[pd.Series] = None,
|
|
2164
2155
|
exclude_features_sources: Optional[List[str]] = None,
|
|
2165
2156
|
importance_threshold: Optional[float] = None,
|
|
2166
2157
|
max_features: Optional[int] = None,
|
|
@@ -2179,8 +2170,14 @@ if response.status_code == 200:
|
|
|
2179
2170
|
self.logger.info("Start transform")
|
|
2180
2171
|
|
|
2181
2172
|
validated_X = self._validate_X(X, is_transform=True)
|
|
2173
|
+
if y is not None:
|
|
2174
|
+
validated_y = self._validate_y(validated_X, y)
|
|
2175
|
+
df = self.__combine_train_and_eval_sets(validated_X, validated_y, eval_set=None)
|
|
2176
|
+
else:
|
|
2177
|
+
validated_y = None
|
|
2178
|
+
df = validated_X
|
|
2182
2179
|
|
|
2183
|
-
self.__log_debug_information(validated_X, exclude_features_sources=exclude_features_sources)
|
|
2180
|
+
self.__log_debug_information(validated_X, validated_y, exclude_features_sources=exclude_features_sources)
|
|
2184
2181
|
|
|
2185
2182
|
self.__validate_search_keys(self.search_keys, self.search_id)
|
|
2186
2183
|
|
|
@@ -2223,29 +2220,27 @@ if response.status_code == 200:
|
|
|
2223
2220
|
self.logger.info(msg)
|
|
2224
2221
|
print(msg)
|
|
2225
2222
|
|
|
2226
|
-
is_demo_dataset = hash_input(
|
|
2223
|
+
is_demo_dataset = hash_input(df) in DEMO_DATASET_HASHES
|
|
2227
2224
|
|
|
2228
2225
|
columns_to_drop = [
|
|
2229
|
-
c for c in
|
|
2226
|
+
c for c in df.columns if c in self.feature_names_ and c in self.dropped_client_feature_names_
|
|
2230
2227
|
]
|
|
2231
2228
|
if len(columns_to_drop) > 0:
|
|
2232
2229
|
msg = self.bundle.get("x_contains_enriching_columns").format(columns_to_drop)
|
|
2233
2230
|
self.logger.warning(msg)
|
|
2234
2231
|
print(msg)
|
|
2235
|
-
|
|
2232
|
+
df = df.drop(columns=columns_to_drop)
|
|
2236
2233
|
|
|
2237
2234
|
search_keys = self.search_keys.copy()
|
|
2238
2235
|
if self.id_columns is not None and self.cv is not None and self.cv.is_time_series():
|
|
2239
|
-
|
|
2236
|
+
search_keys.update(
|
|
2240
2237
|
{col: SearchKey.CUSTOM_KEY for col in self.id_columns if col not in self.search_keys}
|
|
2241
2238
|
)
|
|
2242
2239
|
|
|
2243
2240
|
search_keys = self.__prepare_search_keys(
|
|
2244
|
-
|
|
2241
|
+
df, search_keys, is_demo_dataset, is_transform=True, silent_mode=silent_mode
|
|
2245
2242
|
)
|
|
2246
2243
|
|
|
2247
|
-
df = validated_X.copy()
|
|
2248
|
-
|
|
2249
2244
|
df = self.__handle_index_search_keys(df, search_keys)
|
|
2250
2245
|
|
|
2251
2246
|
if DEFAULT_INDEX in df.columns:
|
|
@@ -2284,8 +2279,11 @@ if response.status_code == 200:
|
|
|
2284
2279
|
features_for_transform = self._search_task.get_features_for_transform() or []
|
|
2285
2280
|
if len(features_for_transform) > 0:
|
|
2286
2281
|
missing_features_for_transform = [
|
|
2287
|
-
columns_renaming.get(f) for f in features_for_transform if f not in df.columns
|
|
2282
|
+
columns_renaming.get(f) or f for f in features_for_transform if f not in df.columns
|
|
2288
2283
|
]
|
|
2284
|
+
if TARGET in missing_features_for_transform:
|
|
2285
|
+
raise ValidationError(self.bundle.get("missing_target_for_transform"))
|
|
2286
|
+
|
|
2289
2287
|
if len(missing_features_for_transform) > 0:
|
|
2290
2288
|
raise ValidationError(
|
|
2291
2289
|
self.bundle.get("missing_features_for_transform").format(missing_features_for_transform)
|
|
@@ -2341,11 +2339,10 @@ if response.status_code == 200:
|
|
|
2341
2339
|
converter = PostalCodeSearchKeyConverter(postal_code)
|
|
2342
2340
|
df = converter.convert(df)
|
|
2343
2341
|
|
|
2344
|
-
|
|
2342
|
+
meaning_types = {}
|
|
2343
|
+
meaning_types.update({col: FileColumnMeaningType.FEATURE for col in features_for_transform})
|
|
2344
|
+
meaning_types.update({col: key.value for col, key in search_keys.items()})
|
|
2345
2345
|
|
|
2346
|
-
meaning_types = {col: key.value for col, key in search_keys.items()}
|
|
2347
|
-
for col in features_for_transform:
|
|
2348
|
-
meaning_types[col] = FileColumnMeaningType.FEATURE
|
|
2349
2346
|
features_not_to_pass = [
|
|
2350
2347
|
c
|
|
2351
2348
|
for c in df.columns
|
|
@@ -2354,13 +2351,12 @@ if response.status_code == 200:
|
|
|
2354
2351
|
and c not in [ENTITY_SYSTEM_RECORD_ID, SEARCH_KEY_UNNEST]
|
|
2355
2352
|
]
|
|
2356
2353
|
|
|
2357
|
-
if add_fit_system_record_id
|
|
2358
|
-
reversed_columns_renaming = {v: k for k, v in columns_renaming.items()}
|
|
2354
|
+
if add_fit_system_record_id:
|
|
2359
2355
|
df = self.__add_fit_system_record_id(
|
|
2360
2356
|
df,
|
|
2361
2357
|
search_keys,
|
|
2362
2358
|
SYSTEM_RECORD_ID,
|
|
2363
|
-
|
|
2359
|
+
TARGET,
|
|
2364
2360
|
columns_renaming,
|
|
2365
2361
|
silent=True,
|
|
2366
2362
|
)
|
|
@@ -136,6 +136,7 @@ x_and_eval_x_diff_types=X and eval_set X has different types: {} and {}
|
|
|
136
136
|
baseline_score_column_not_exists=baseline_score_column {} doesn't exist in input dataframe
|
|
137
137
|
baseline_score_column_has_na=baseline_score_column contains NaN. Clear it and and retry
|
|
138
138
|
missing_features_for_transform=Missing some features for transform that were presented on fit: {}
|
|
139
|
+
missing_target_for_transform=Search contains features on target. Please add y to the call and try again
|
|
139
140
|
missing_id_column=Id column {} not found in X
|
|
140
141
|
# target validation
|
|
141
142
|
empty_target=Target is empty in all rows
|
|
@@ -168,7 +168,13 @@ class SearchTask:
|
|
|
168
168
|
for meta in self.provider_metadata_v2:
|
|
169
169
|
if meta.features_used_for_embeddings is not None:
|
|
170
170
|
features_for_transform.update(meta.features_used_for_embeddings)
|
|
171
|
-
|
|
171
|
+
if meta.generated_features:
|
|
172
|
+
features_for_transform.update(
|
|
173
|
+
c.original_name
|
|
174
|
+
for f in meta.generated_features
|
|
175
|
+
for c in f.base_columns
|
|
176
|
+
if c.ads_definition_id is None
|
|
177
|
+
)
|
|
172
178
|
return list(features_for_transform)
|
|
173
179
|
|
|
174
180
|
def get_shuffle_kfold(self) -> Optional[bool]:
|
|
@@ -118,7 +118,7 @@ def spearmanr(
|
|
|
118
118
|
# - dof: degrees of freedom
|
|
119
119
|
# - t_stat: t-statistic
|
|
120
120
|
# - alternative: 'two-sided', 'greater', 'less'
|
|
121
|
-
def compute_t_pvalue(t_stat, dof, alternative=
|
|
121
|
+
def compute_t_pvalue(t_stat, dof, alternative="two-sided"):
|
|
122
122
|
from scipy.stats import t
|
|
123
123
|
|
|
124
124
|
if alternative == "two-sided":
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "1.2.67"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{upgini-1.2.67 → upgini-1.2.68a3818.dev1}/src/upgini/resource_bundle/strings_widget.properties
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|