upgini 1.2.86.dev1__py3-none-any.whl → 1.2.87.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of upgini might be problematic. Click here for more details.

upgini/__about__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.2.86.dev1"
1
+ __version__ = "1.2.87.dev2"
@@ -30,7 +30,7 @@ from pandas.api.types import (
30
30
  from scipy.stats import ks_2samp
31
31
  from sklearn.base import TransformerMixin
32
32
  from sklearn.exceptions import NotFittedError
33
- from sklearn.model_selection import BaseCrossValidator
33
+ from sklearn.model_selection import BaseCrossValidator, TimeSeriesSplit
34
34
 
35
35
  from upgini.autofe.feature import Feature
36
36
  from upgini.autofe.timeseries import TimeSeriesBase
@@ -71,6 +71,7 @@ from upgini.resource_bundle import ResourceBundle, bundle, get_custom_bundle
71
71
  from upgini.search_task import SearchTask
72
72
  from upgini.spinner import Spinner
73
73
  from upgini.utils import combine_search_keys, find_numbers_with_decimal_comma
74
+ from upgini.utils.blocked_time_series import BlockedTimeSeriesSplit
74
75
  from upgini.utils.country_utils import (
75
76
  CountrySearchKeyConverter,
76
77
  CountrySearchKeyDetector,
@@ -114,7 +115,9 @@ from upgini.utils.postal_code_utils import (
114
115
  try:
115
116
  from upgini.utils.progress_bar import CustomProgressBar as ProgressBar
116
117
  except Exception:
117
- from upgini.utils.fallback_progress_bar import CustomFallbackProgressBar as ProgressBar
118
+ from upgini.utils.fallback_progress_bar import (
119
+ CustomFallbackProgressBar as ProgressBar,
120
+ )
118
121
 
119
122
  from upgini.utils.sort import sort_columns
120
123
  from upgini.utils.target_utils import (
@@ -239,6 +242,7 @@ class FeaturesEnricher(TransformerMixin):
239
242
  add_date_if_missing: bool = True,
240
243
  disable_force_downsampling: bool = False,
241
244
  id_columns: Optional[List[str]] = None,
245
+ generate_search_key_features: bool = True,
242
246
  **kwargs,
243
247
  ):
244
248
  self.bundle = get_custom_bundle(custom_bundle_config)
@@ -365,6 +369,8 @@ class FeaturesEnricher(TransformerMixin):
365
369
  self.exclude_columns = exclude_columns
366
370
  self.baseline_score_column = baseline_score_column
367
371
  self.add_date_if_missing = add_date_if_missing
372
+ self.generate_search_key_features = generate_search_key_features
373
+
368
374
  self.features_info_display_handle = None
369
375
  self.data_sources_display_handle = None
370
376
  self.autofe_features_display_handle = None
@@ -1045,6 +1051,7 @@ class FeaturesEnricher(TransformerMixin):
1045
1051
  self._check_train_and_eval_target_distribution(y_sorted, fitting_eval_set_dict)
1046
1052
 
1047
1053
  has_date = self._get_date_column(search_keys) is not None
1054
+ has_time = has_date and isinstance(_cv, TimeSeriesSplit) or isinstance(_cv, BlockedTimeSeriesSplit)
1048
1055
  model_task_type = self.model_task_type or define_task(y_sorted, has_date, self.logger, silent=True)
1049
1056
  cat_features = list(set(client_cat_features + cat_features_from_backend))
1050
1057
  baseline_cat_features = [f for f in cat_features if f in fitting_X.columns]
@@ -1077,7 +1084,7 @@ class FeaturesEnricher(TransformerMixin):
1077
1084
  add_params=custom_loss_add_params,
1078
1085
  groups=groups,
1079
1086
  text_features=text_features,
1080
- has_date=has_date,
1087
+ has_time=has_time,
1081
1088
  )
1082
1089
  baseline_cv_result = baseline_estimator.cross_val_predict(
1083
1090
  fitting_X, y_sorted, baseline_score_column
@@ -1112,7 +1119,7 @@ class FeaturesEnricher(TransformerMixin):
1112
1119
  add_params=custom_loss_add_params,
1113
1120
  groups=groups,
1114
1121
  text_features=text_features,
1115
- has_date=has_date,
1122
+ has_time=has_time,
1116
1123
  )
1117
1124
  enriched_cv_result = enriched_estimator.cross_val_predict(fitting_enriched_X, enriched_y_sorted)
1118
1125
  enriched_metric = enriched_cv_result.get_display_metric()
@@ -1773,7 +1780,13 @@ class FeaturesEnricher(TransformerMixin):
1773
1780
  date_column = self._get_date_column(search_keys)
1774
1781
  generated_features = []
1775
1782
  if date_column is not None:
1776
- converter = DateTimeSearchKeyConverter(date_column, self.date_format, self.logger, self.bundle)
1783
+ converter = DateTimeSearchKeyConverter(
1784
+ date_column,
1785
+ self.date_format,
1786
+ self.logger,
1787
+ self.bundle,
1788
+ generate_cyclical_features=self.generate_search_key_features,
1789
+ )
1777
1790
  # Leave original date column values
1778
1791
  df_with_date_features = converter.convert(df, keep_time=True)
1779
1792
  df_with_date_features[date_column] = df[date_column]
@@ -1781,7 +1794,7 @@ class FeaturesEnricher(TransformerMixin):
1781
1794
  generated_features = converter.generated_features
1782
1795
 
1783
1796
  email_columns = SearchKey.find_all_keys(search_keys, SearchKey.EMAIL)
1784
- if email_columns:
1797
+ if email_columns and self.generate_search_key_features:
1785
1798
  generator = EmailDomainGenerator(email_columns)
1786
1799
  df = generator.generate(df)
1787
1800
  generated_features.extend(generator.generated_features)
@@ -2204,10 +2217,12 @@ class FeaturesEnricher(TransformerMixin):
2204
2217
  {"name": name, "value": key_example(sk_type)} for name in sk_meta.unnestKeyNames
2205
2218
  ]
2206
2219
  else:
2207
- search_keys_with_values[sk_type.name] = [{
2208
- "name": sk_meta.originalName,
2209
- "value": key_example(sk_type),
2210
- }]
2220
+ search_keys_with_values[sk_type.name] = [
2221
+ {
2222
+ "name": sk_meta.originalName,
2223
+ "value": key_example(sk_type),
2224
+ }
2225
+ ]
2211
2226
 
2212
2227
  keys_section = json.dumps(search_keys_with_values)
2213
2228
  features_for_transform = self._search_task.get_features_for_transform()
@@ -2360,7 +2375,13 @@ if response.status_code == 200:
2360
2375
  generated_features = []
2361
2376
  date_column = self._get_date_column(search_keys)
2362
2377
  if date_column is not None:
2363
- converter = DateTimeSearchKeyConverter(date_column, self.date_format, self.logger, bundle=self.bundle)
2378
+ converter = DateTimeSearchKeyConverter(
2379
+ date_column,
2380
+ self.date_format,
2381
+ self.logger,
2382
+ bundle=self.bundle,
2383
+ generate_cyclical_features=self.generate_search_key_features,
2384
+ )
2364
2385
  df = converter.convert(df, keep_time=True)
2365
2386
  self.logger.info(f"Date column after convertion: {df[date_column]}")
2366
2387
  generated_features.extend(converter.generated_features)
@@ -2370,7 +2391,7 @@ if response.status_code == 200:
2370
2391
  df = self._add_current_date_as_key(df, search_keys, self.logger, self.bundle)
2371
2392
 
2372
2393
  email_columns = SearchKey.find_all_keys(search_keys, SearchKey.EMAIL)
2373
- if email_columns:
2394
+ if email_columns and self.generate_search_key_features:
2374
2395
  generator = EmailDomainGenerator(email_columns)
2375
2396
  df = generator.generate(df)
2376
2397
  generated_features.extend(generator.generated_features)
@@ -2860,6 +2881,7 @@ if response.status_code == 200:
2860
2881
  self.date_format,
2861
2882
  self.logger,
2862
2883
  bundle=self.bundle,
2884
+ generate_cyclical_features=self.generate_search_key_features,
2863
2885
  )
2864
2886
  df = converter.convert(df, keep_time=True)
2865
2887
  if converter.has_old_dates:
@@ -2872,7 +2894,7 @@ if response.status_code == 200:
2872
2894
  df = self._add_current_date_as_key(df, self.fit_search_keys, self.logger, self.bundle)
2873
2895
 
2874
2896
  email_columns = SearchKey.find_all_keys(self.fit_search_keys, SearchKey.EMAIL)
2875
- if email_columns:
2897
+ if email_columns and self.generate_search_key_features:
2876
2898
  generator = EmailDomainGenerator(email_columns)
2877
2899
  df = generator.generate(df)
2878
2900
  self.fit_generated_features.extend(generator.generated_features)
@@ -3564,7 +3586,9 @@ if response.status_code == 200:
3564
3586
  maybe_date_col = SearchKey.find_key(self.search_keys, [SearchKey.DATE, SearchKey.DATETIME])
3565
3587
  if X is not None and maybe_date_col is not None and maybe_date_col in X.columns:
3566
3588
  # TODO cast date column to single dtype
3567
- date_converter = DateTimeSearchKeyConverter(maybe_date_col, self.date_format)
3589
+ date_converter = DateTimeSearchKeyConverter(
3590
+ maybe_date_col, self.date_format, generate_cyclical_features=False
3591
+ )
3568
3592
  converted_X = date_converter.convert(X)
3569
3593
  min_date = converted_X[maybe_date_col].min()
3570
3594
  max_date = converted_X[maybe_date_col].max()
@@ -3603,7 +3627,7 @@ if response.status_code == 200:
3603
3627
  self.__log_warning(bundle.get("current_date_added"))
3604
3628
  df[FeaturesEnricher.CURRENT_DATE] = datetime.date.today()
3605
3629
  search_keys[FeaturesEnricher.CURRENT_DATE] = SearchKey.DATE
3606
- converter = DateTimeSearchKeyConverter(FeaturesEnricher.CURRENT_DATE)
3630
+ converter = DateTimeSearchKeyConverter(FeaturesEnricher.CURRENT_DATE, generate_cyclical_features=False)
3607
3631
  df = converter.convert(df)
3608
3632
  return df
3609
3633
 
upgini/metrics.py CHANGED
@@ -15,7 +15,7 @@ from catboost import CatBoostClassifier, CatBoostRegressor
15
15
  from category_encoders.cat_boost import CatBoostEncoder
16
16
  from lightgbm import LGBMClassifier, LGBMRegressor
17
17
  from numpy import log1p
18
- from pandas.api.types import is_numeric_dtype, is_integer_dtype, is_float_dtype
18
+ from pandas.api.types import is_float_dtype, is_integer_dtype, is_numeric_dtype
19
19
  from sklearn.metrics import check_scoring, get_scorer, make_scorer, roc_auc_score
20
20
 
21
21
  from upgini.utils.blocked_time_series import BlockedTimeSeriesSplit
@@ -391,9 +391,7 @@ class EstimatorWrapper:
391
391
  self.converted_to_int.append(c)
392
392
  self.cat_features.remove(c)
393
393
  elif is_float_dtype(x[c]) or (x[c].dtype == "category" and is_float_dtype(x[c].cat.categories)):
394
- self.logger.info(
395
- f"Convert float cat feature {c} to string"
396
- )
394
+ self.logger.info(f"Convert float cat feature {c} to string")
397
395
  x[c] = x[c].astype(str)
398
396
  self.converted_to_str.append(c)
399
397
  elif x[c].dtype not in ["category", "int64"]:
@@ -551,7 +549,7 @@ class EstimatorWrapper:
551
549
  text_features: Optional[List[str]] = None,
552
550
  add_params: Optional[Dict[str, Any]] = None,
553
551
  groups: Optional[List[str]] = None,
554
- has_date: Optional[bool] = None,
552
+ has_time: Optional[bool] = None,
555
553
  ) -> EstimatorWrapper:
556
554
  scorer, metric_name, multiplier = define_scorer(target_type, scoring)
557
555
  kwargs = {
@@ -568,7 +566,7 @@ class EstimatorWrapper:
568
566
  if estimator is None:
569
567
  if EstimatorWrapper.default_estimator == "catboost":
570
568
  logger.info("Using CatBoost as default estimator")
571
- params = {"has_time": has_date}
569
+ params = {"has_time": has_time}
572
570
  if target_type == ModelTaskType.MULTICLASS:
573
571
  params = _get_add_params(params, CATBOOST_MULTICLASS_PARAMS)
574
572
  params = _get_add_params(params, add_params)
@@ -610,8 +608,8 @@ class EstimatorWrapper:
610
608
  estimator_copy = deepcopy(estimator)
611
609
  kwargs["estimator"] = estimator_copy
612
610
  if is_catboost_estimator(estimator):
613
- if has_date is not None:
614
- estimator_copy.set_params(has_time=has_date)
611
+ if has_time is not None:
612
+ estimator_copy.set_params(has_time=has_time)
615
613
  estimator = CatBoostWrapper(**kwargs)
616
614
  else:
617
615
  if isinstance(estimator, (LGBMClassifier, LGBMRegressor)):
@@ -41,6 +41,7 @@ class DateTimeSearchKeyConverter:
41
41
  date_format: Optional[str] = None,
42
42
  logger: Optional[logging.Logger] = None,
43
43
  bundle: Optional[ResourceBundle] = None,
44
+ generate_cyclical_features: bool = True,
44
45
  ):
45
46
  self.date_column = date_column
46
47
  self.date_format = date_format
@@ -51,6 +52,7 @@ class DateTimeSearchKeyConverter:
51
52
  self.logger.setLevel("FATAL")
52
53
  self.generated_features: List[str] = []
53
54
  self.bundle = bundle or get_custom_bundle()
55
+ self.generate_cyclical_features = generate_cyclical_features
54
56
  self.has_old_dates = False
55
57
 
56
58
  @staticmethod
@@ -121,61 +123,63 @@ class DateTimeSearchKeyConverter:
121
123
  df[cos_feature] = np.cos(2 * np.pi * df[column] / period)
122
124
  self.generated_features.append(cos_feature)
123
125
 
124
- # df["quarter"] = df[self.date_column].dt.quarter
126
+ if self.generate_cyclical_features:
125
127
 
126
- # # Calculate the start date of the quarter for each timestamp
127
- # df["quarter_start"] = df[self.date_column].dt.to_period("Q").dt.start_time
128
+ df["quarter"] = df[self.date_column].dt.quarter
128
129
 
129
- # # Calculate the day in the quarter
130
- # df["day_in_quarter"] = (df[self.date_column] - df["quarter_start"]).dt.days + 1
130
+ # Calculate the start date of the quarter for each timestamp
131
+ df["quarter_start"] = df[self.date_column].dt.to_period("Q").dt.start_time
131
132
 
132
- # # Vectorized calculation of days_in_quarter
133
- # quarter = df["quarter"]
134
- # start = df["quarter_start"]
135
- # year = start.dt.year
136
- # month = start.dt.month
133
+ # Calculate the day in the quarter
134
+ df["day_in_quarter"] = (df[self.date_column] - df["quarter_start"]).dt.days + 1
137
135
 
138
- # quarter_end_year = np.where(quarter == 4, year + 1, year)
139
- # quarter_end_month = np.where(quarter == 4, 1, month + 3)
136
+ # Vectorized calculation of days_in_quarter
137
+ quarter = df["quarter"]
138
+ start = df["quarter_start"]
139
+ year = start.dt.year
140
+ month = start.dt.month
140
141
 
141
- # end = pd.to_datetime({"year": quarter_end_year, "month": quarter_end_month, "day": 1})
142
- # end.index = df.index
142
+ quarter_end_year = np.where(quarter == 4, year + 1, year)
143
+ quarter_end_month = np.where(quarter == 4, 1, month + 3)
143
144
 
144
- # df["days_in_quarter"] = (end - start).dt.days
145
+ end = pd.to_datetime({"year": quarter_end_year, "month": quarter_end_month, "day": 1})
146
+ end.index = df.index
145
147
 
146
- # add_cyclical_features(df, "day_in_quarter", df["days_in_quarter"]) # Days in the quarter
148
+ df["days_in_quarter"] = (end - start).dt.days
147
149
 
148
- # df.drop(columns=["quarter", "quarter_start", "day_in_quarter", "days_in_quarter"], inplace=True)
150
+ add_cyclical_features(df, "day_in_quarter", df["days_in_quarter"]) # Days in the quarter
149
151
 
150
- df[seconds] = (df[self.date_column] - df[self.date_column].dt.floor("D")).dt.seconds
152
+ df.drop(columns=["quarter", "quarter_start", "day_in_quarter", "days_in_quarter"], inplace=True)
151
153
 
152
- seconds_without_na = df[seconds].dropna()
153
- if (seconds_without_na != 0).any() and seconds_without_na.nunique() > 1:
154
- self.logger.info("Time found in date search key. Add extra features based on time")
154
+ df[seconds] = (df[self.date_column] - df[self.date_column].dt.floor("D")).dt.seconds
155
155
 
156
- # Extract basic components
157
- df["second"] = df[self.date_column].dt.second
158
- df["minute"] = df[self.date_column].dt.minute
159
- df["hour"] = df[self.date_column].dt.hour
156
+ seconds_without_na = df[seconds].dropna()
157
+ if (seconds_without_na != 0).any() and seconds_without_na.nunique() > 1:
158
+ self.logger.info("Time found in date search key. Add extra features based on time")
160
159
 
161
- # Apply cyclical transformations
162
- add_cyclical_features(df, "second", 60) # Seconds in a minute
163
- add_cyclical_features(df, "minute", 60) # Minutes in an hour
164
- add_cyclical_features(df, "minute", 30) # Minutes in half an hour
165
- add_cyclical_features(df, "hour", 24) # Hours in a day
160
+ # Extract basic components
161
+ df["second"] = df[self.date_column].dt.second
162
+ df["minute"] = df[self.date_column].dt.minute
163
+ df["hour"] = df[self.date_column].dt.hour
166
164
 
167
- # Drop intermediate columns if not needed
168
- df.drop(columns=["second", "minute", "hour"], inplace=True)
169
- else:
170
- keep_time = False
165
+ # Apply cyclical transformations
166
+ add_cyclical_features(df, "second", 60) # Seconds in a minute
167
+ add_cyclical_features(df, "minute", 60) # Minutes in an hour
168
+ add_cyclical_features(df, "minute", 30) # Minutes in half an hour
169
+ add_cyclical_features(df, "hour", 24) # Hours in a day
170
+
171
+ # Drop intermediate columns if not needed
172
+ df.drop(columns=["second", "minute", "hour"], inplace=True)
173
+ else:
174
+ keep_time = False
171
175
 
172
- for generated_feature in self.generated_features[:]:
173
- if df[generated_feature].dropna().nunique() <= 1:
174
- self.logger.warning(f"Generated constant feature {generated_feature} will be dropped")
175
- df.drop(columns=generated_feature, inplace=True)
176
- self.generated_features.remove(generated_feature)
176
+ for generated_feature in self.generated_features[:]:
177
+ if df[generated_feature].dropna().nunique() <= 1:
178
+ self.logger.warning(f"Generated constant feature {generated_feature} will be dropped")
179
+ df.drop(columns=generated_feature, inplace=True)
180
+ self.generated_features.remove(generated_feature)
177
181
 
178
- df.drop(columns=seconds, inplace=True)
182
+ df.drop(columns=seconds, inplace=True)
179
183
 
180
184
  if keep_time:
181
185
  df[self.DATETIME_COL] = df[self.date_column].astype(np.int64) // 1_000_000
@@ -104,9 +104,9 @@ def remove_fintech_duplicates(
104
104
  sub_df = pd.merge(sub_df, nonunique_target_rows, on=personal_cols)
105
105
 
106
106
  # Convert date columns for further checks
107
- sub_df = DateTimeSearchKeyConverter(date_col, date_format=date_format, logger=logger, bundle=bundle).convert(
108
- sub_df
109
- )
107
+ sub_df = DateTimeSearchKeyConverter(
108
+ date_col, date_format=date_format, logger=logger, bundle=bundle, generate_cyclical_features=False
109
+ ).convert(sub_df)
110
110
  grouped_by_personal_cols = sub_df.groupby(personal_cols, group_keys=False)
111
111
  rows_with_diff_target = grouped_by_personal_cols.filter(has_diff_target_within_60_days)
112
112
 
@@ -36,11 +36,11 @@ class EmailDomainGenerator:
36
36
  self.generated_features = []
37
37
 
38
38
  def generate(self, df: pd.DataFrame) -> pd.DataFrame:
39
- # for email_col in self.email_columns:
40
- # domain_feature = email_col + self.DOMAIN_SUFFIX
41
- # if domain_feature not in df.columns:
42
- # df[domain_feature] = df[email_col].apply(self._email_to_domain).astype("string")
43
- # self.generated_features.append(domain_feature)
39
+ for email_col in self.email_columns:
40
+ domain_feature = email_col + self.DOMAIN_SUFFIX
41
+ if domain_feature not in df.columns:
42
+ df[domain_feature] = df[email_col].apply(self._email_to_domain).astype("string")
43
+ self.generated_features.append(domain_feature)
44
44
  return df
45
45
 
46
46
  @staticmethod
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: upgini
3
- Version: 1.2.86.dev1
3
+ Version: 1.2.87.dev2
4
4
  Summary: Intelligent data search & enrichment for Machine Learning
5
5
  Project-URL: Bug Reports, https://github.com/upgini/upgini/issues
6
6
  Project-URL: Homepage, https://upgini.com/
@@ -1,12 +1,12 @@
1
- upgini/__about__.py,sha256=x1iyqkRuCxNu5kTIKv8yNfzxxa0JD4GnBFMpKHM2wRM,28
1
+ upgini/__about__.py,sha256=lMGQTkNeO4UaNk9zHrLiiQXvmrSDuq_oo4cWNvv6Lho,28
2
2
  upgini/__init__.py,sha256=LXSfTNU0HnlOkE69VCxkgIKDhWP-JFo_eBQ71OxTr5Y,261
3
3
  upgini/ads.py,sha256=nvuRxRx5MHDMgPr9SiU-fsqRdFaBv8p4_v1oqiysKpc,2714
4
4
  upgini/dataset.py,sha256=fRtqSkXNONLnPe6cCL967GMt349FTIpXzy_u8LUKncw,35354
5
5
  upgini/errors.py,sha256=2b_Wbo0OYhLUbrZqdLIx5jBnAsiD1Mcenh-VjR4HCTw,950
6
- upgini/features_enricher.py,sha256=G0qbRPdlWe9p6cwYF3khP99-0kgAO8N0A2sfQxSLgmM,213446
6
+ upgini/features_enricher.py,sha256=n8KBoBgJApLiRv4wXeSgfS-PfbB1D5aDOJfFnL0q6v8,214487
7
7
  upgini/http.py,sha256=6Qcepv0tDC72mBBJxYHnA2xqw6QwFaKrXN8o4vju8Es,44372
8
8
  upgini/metadata.py,sha256=zt_9k0iQbWXuiRZcel4ORNPdQKt6Ou69ucZD_E1Q46o,12341
9
- upgini/metrics.py,sha256=3cip0_L6-OFew74KsRwzxJDU6UFq05h2v7IsyHLcMRc,43164
9
+ upgini/metrics.py,sha256=7j8wcU-5xh_jSzqGpY7SiWwo1FEAhcPelFVOVu0qwzs,43118
10
10
  upgini/search_task.py,sha256=Q5HjBpLIB3OCxAD1zNv5yQ3ZNJx696WCK_-H35_y7Rs,17912
11
11
  upgini/spinner.py,sha256=4iMd-eIe_BnkqFEMIliULTbj6rNI2HkN_VJ4qYe0cUc,1118
12
12
  upgini/version_validator.py,sha256=DvbaAvuYFoJqYt0fitpsk6Xcv-H1BYDJYHUMxaKSH_Y,1509
@@ -51,10 +51,10 @@ upgini/utils/blocked_time_series.py,sha256=Uqr3vp4YqNclj2-PzEYqVy763GSXHn86sbpIl
51
51
  upgini/utils/country_utils.py,sha256=lY-eXWwFVegdVENFttbvLcgGDjFO17Sex8hd2PyJaRk,6937
52
52
  upgini/utils/custom_loss_utils.py,sha256=kieNZYBYZm5ZGBltF1F_jOSF4ea6C29rYuCyiDcqVNY,3857
53
53
  upgini/utils/cv_utils.py,sha256=w6FQb9nO8BWDx88EF83NpjPLarK4eR4ia0Wg0kLBJC4,3525
54
- upgini/utils/datetime_utils.py,sha256=FKeCc5PQnhMSyLiw8nuiMccmMkrUCj4zCIgpZnffpbU,13569
55
- upgini/utils/deduplicate_utils.py,sha256=AcMLoObMjhOTQ_fMS1LWy0GKp6WXnZ-FNux_8V3nbZU,8914
54
+ upgini/utils/datetime_utils.py,sha256=UpM2Ljri8rCqHBrSGhmjViGDheLiOYWARFcGaFvC7JE,13858
55
+ upgini/utils/deduplicate_utils.py,sha256=jm9ARZ0fbJFF3aJqj-xm_T6lNh-WErM0H0h6B_L1xQc,8948
56
56
  upgini/utils/display_utils.py,sha256=hAeWEcJtPDg8fAVcMNrNB-azFD2WJp1nvbPAhR7SeP4,12071
57
- upgini/utils/email_utils.py,sha256=TZ_2UL0T7rzXG5WNu3dLUReY15qt6PozEGY_4cyuhdM,5287
57
+ upgini/utils/email_utils.py,sha256=pZ2vCfNxLIPUhxr0-OlABNXm12jjU44isBk8kGmqQzA,5277
58
58
  upgini/utils/fallback_progress_bar.py,sha256=PDaKb8dYpVZaWMroNcOHsTc3pSjgi9mOm0--cOFTwJ0,1074
59
59
  upgini/utils/feature_info.py,sha256=Q9HN6A-fvfVD-irFWrmOqqZG9RsUSvh5MTY_k0xu-tE,7287
60
60
  upgini/utils/features_validator.py,sha256=lEfmk4DoxZ4ooOE1HC0ZXtUb_lFKRFHIrnFULZ4_rL8,3746
@@ -70,7 +70,7 @@ upgini/utils/target_utils.py,sha256=LRN840dzx78-wg7ftdxAkp2c1eu8-JDvkACiRThm4HE,
70
70
  upgini/utils/track_info.py,sha256=G5Lu1xxakg2_TQjKZk4b5SvrHsATTXNVV3NbvWtT8k8,5663
71
71
  upgini/utils/ts_utils.py,sha256=26vhC0pN7vLXK6R09EEkMK3Lwb9IVPH7LRdqFIQ3kPs,1383
72
72
  upgini/utils/warning_counter.py,sha256=-GRY8EUggEBKODPSuXAkHn9KnEQwAORC0mmz_tim-PM,254
73
- upgini-1.2.86.dev1.dist-info/METADATA,sha256=WbxVPEQbJJMxYSDRTiJAdevnfltYEQ8WjxyGgVv7vaE,49167
74
- upgini-1.2.86.dev1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
75
- upgini-1.2.86.dev1.dist-info/licenses/LICENSE,sha256=5RRzgvdJUu3BUDfv4bzVU6FqKgwHlIay63pPCSmSgzw,1514
76
- upgini-1.2.86.dev1.dist-info/RECORD,,
73
+ upgini-1.2.87.dev2.dist-info/METADATA,sha256=7Mt_3MedPG9E1Ie9vtf8T67JlPCGd8PnISUSrN91ctU,49167
74
+ upgini-1.2.87.dev2.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
75
+ upgini-1.2.87.dev2.dist-info/licenses/LICENSE,sha256=5RRzgvdJUu3BUDfv4bzVU6FqKgwHlIay63pPCSmSgzw,1514
76
+ upgini-1.2.87.dev2.dist-info/RECORD,,