imsciences 0.6.0.2__py3-none-any.whl → 0.6.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -184,7 +184,17 @@ class dataprocessing:
184
184
  print("\n32. upgrade all packages")
185
185
  print(" - Description: Upgrades all packages.")
186
186
  print(" - Usage: upgrade_outdated_packages()")
187
- print(" - Example: upgrade_outdated_packages()")
187
+ print(" - Example: upgrade_outdated_packages()")
188
+
189
+ print("\n33. Convert Mixed Formats Dates")
190
+ print(" - Description: Convert a mix of US and UK dates to datetime.")
191
+ print(" - Usage: convert_mixed_formats_dates(df, datecol)")
192
+ print(" - Example: convert_mixed_formats_dates(df, 'OBS')")
193
+
194
+ print("\n34. Fill Weekly Missing Dates")
195
+ print(" - Description: Fill in any missing weeks with 0.")
196
+ print(" - Usage: fill_weekly_date_range(self, df, date_column, freq)")
197
+ print(" - Example: fill_weekly_date_range(df, 'OBS', 'W-MON')")
188
198
 
189
199
  def get_wd_levels(self, levels):
190
200
  """
@@ -602,10 +612,6 @@ class dataprocessing:
602
612
  )
603
613
  return df
604
614
 
605
- # Apply the fix to the specified column
606
- df[date_col] = df[date_col].apply(lambda x: fix_date(x) if not pd.isnull(x) else x)
607
- return df
608
-
609
615
  def combine_sheets(self, all_sheets):
610
616
  """
611
617
  Combines multiple DataFrames from a dictionary into a single DataFrame.
@@ -1229,7 +1235,55 @@ class dataprocessing:
1229
1235
  except Exception as e:
1230
1236
  print(f"An error occurred during the upgrade process: {e}")
1231
1237
 
1238
+ def convert_mixed_formats_dates(self, df, column_name):
1239
+ # Convert initial dates to datetime with coercion to handle errors
1240
+ df[column_name] = pd.to_datetime(df[column_name], errors='coerce')
1241
+ df[column_name] = df[column_name].astype(str)
1242
+ corrected_dates = []
1243
+
1244
+ for date_str in df[column_name]:
1245
+ date_str = date_str.replace('-', '').replace('/', '')
1246
+ if len(date_str) == 8:
1247
+ year = date_str[:4]
1248
+ month = date_str[4:6]
1249
+ day = date_str[6:8]
1250
+ if int(day) <= 12:
1251
+ # Swap month and day
1252
+ corrected_date_str = f"{year}-{day}-{month}"
1253
+ else:
1254
+ corrected_date_str = f"{year}-{month}-{day}"
1255
+ # Convert to datetime
1256
+ corrected_date = pd.to_datetime(corrected_date_str, errors='coerce')
1257
+ else:
1258
+ corrected_date = pd.to_datetime(date_str, errors='coerce')
1259
+
1260
+ corrected_dates.append(corrected_date)
1261
+
1262
+ # Check length of the corrected_dates list
1263
+ if len(corrected_dates) != len(df):
1264
+ raise ValueError("Length of corrected_dates does not match the original DataFrame")
1265
+
1266
+ # Assign the corrected dates back to the DataFrame
1267
+ df[column_name] = corrected_dates
1268
+ return df
1232
1269
 
1270
+ def fill_weekly_date_range(self, df, date_column, freq='W-MON'):
1271
+ # Ensure the date column is in datetime format
1272
+ df[date_column] = pd.to_datetime(df[date_column])
1273
+
1274
+ # Generate the full date range with the specified frequency
1275
+ full_date_range = pd.date_range(start=df[date_column].min(), end=df[date_column].max(), freq=freq)
1276
+
1277
+ # Create a new dataframe with the full date range
1278
+ full_date_df = pd.DataFrame({date_column: full_date_range})
1279
+
1280
+ # Merge the original dataframe with the new full date range dataframe
1281
+ df_full = full_date_df.merge(df, on=date_column, how='left')
1282
+
1283
+ # Fill missing values with 0
1284
+ df_full.fillna(0, inplace=True)
1285
+
1286
+ return df_full
1233
1287
 
1234
1288
 
1235
1289
 
@@ -0,0 +1,24 @@
1
+ Metadata-Version: 2.1
2
+ Name: imsciences
3
+ Version: 0.6.0.4
4
+ Summary: IMS Data Processing Package
5
+ Author: IMS
6
+ Author-email: cam@im-sciences.com
7
+ Keywords: python,data processing
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Operating System :: Unix
12
+ Classifier: Operating System :: MacOS :: MacOS X
13
+ Classifier: Operating System :: Microsoft :: Windows
14
+ Description-Content-Type: text/markdown
15
+ Requires-Dist: pandas
16
+
17
+ # IMS Package Documentation
18
+
19
+ The IMS package is a python library for processing incoming data into a format that can be used for projects. IMS processing offers a variety of functions to manipulate and analyze data efficiently. Here are the functionalities provided by the package:
20
+
21
+ ## Data Processing
22
+
23
+ ## Data Pulling
24
+
@@ -2,13 +2,13 @@ dataprocessing/__init__.py,sha256=quSwsLs6IuLoA5Rzi0ZD40xZaQudwDteF7_ai9JfTPk,32
2
2
  dataprocessing/data-processing-functions.py,sha256=vE1vsZ8xOSbR9Bwlp9SWXwEHXQ0nFydwGkvzHXf2f1Y,41
3
3
  dataprocessing/datafunctions.py,sha256=vE1vsZ8xOSbR9Bwlp9SWXwEHXQ0nFydwGkvzHXf2f1Y,41
4
4
  imsciences/__init__.py,sha256=GIPbLmWc06sVcOySWwNvMNUr6XGOHqPLryFIWgtpHh8,78
5
- imsciences/datafunctions.py,sha256=vrv-6H8iccN23bdn5OqBHLsWfscrKOWvVyAtrlkgyd4,132385
5
+ imsciences/datafunctions.py,sha256=Fr87wDxHy7wVfBsYpj_s6r3OxxsVslnFiWRdoBxYplI,135006
6
6
  imsciences/datapull.py,sha256=TPY0LDgOkcKTBk8OekbD0Grg5x0SomAK2dZ7MuT6X1E,19000
7
7
  imsciencesdataprocessing/__init__.py,sha256=quSwsLs6IuLoA5Rzi0ZD40xZaQudwDteF7_ai9JfTPk,32
8
8
  imsciencesdataprocessing/datafunctions.py,sha256=vE1vsZ8xOSbR9Bwlp9SWXwEHXQ0nFydwGkvzHXf2f1Y,41
9
9
  imsdataprocessing/__init__.py,sha256=quSwsLs6IuLoA5Rzi0ZD40xZaQudwDteF7_ai9JfTPk,32
10
10
  imsdataprocessing/datafunctions.py,sha256=vE1vsZ8xOSbR9Bwlp9SWXwEHXQ0nFydwGkvzHXf2f1Y,41
11
- imsciences-0.6.0.2.dist-info/METADATA,sha256=jB4rv8_8NBrTNNUi1WWT7-WZ5R2u11IxJIieDQJ5hm8,11571
12
- imsciences-0.6.0.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
13
- imsciences-0.6.0.2.dist-info/top_level.txt,sha256=hsENS-AlDVRh8tQJ6-426iUQlla9bPcGc0-UlFF0_iU,11
14
- imsciences-0.6.0.2.dist-info/RECORD,,
11
+ imsciences-0.6.0.4.dist-info/METADATA,sha256=qnM9QQ1VR2rwX0lOMEVxL99CKDZdnbutZiO4h46zSKU,854
12
+ imsciences-0.6.0.4.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
13
+ imsciences-0.6.0.4.dist-info/top_level.txt,sha256=hsENS-AlDVRh8tQJ6-426iUQlla9bPcGc0-UlFF0_iU,11
14
+ imsciences-0.6.0.4.dist-info/RECORD,,
@@ -1,176 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: imsciences
3
- Version: 0.6.0.2
4
- Summary: IMS Data Processing Package
5
- Author: IMS
6
- Author-email: cam@im-sciences.com
7
- Keywords: python,data processing
8
- Classifier: Development Status :: 3 - Alpha
9
- Classifier: Intended Audience :: Developers
10
- Classifier: Programming Language :: Python :: 3
11
- Classifier: Operating System :: Unix
12
- Classifier: Operating System :: MacOS :: MacOS X
13
- Classifier: Operating System :: Microsoft :: Windows
14
- Description-Content-Type: text/markdown
15
- Requires-Dist: pandas
16
-
17
- # IMS Package Documentation
18
-
19
- The IMS package is a python library for processing incoming data into a format that can be used for projects. IMS processing offers a variety of functions to manipulate and analyze data efficiently. Here are the functionalities provided by the package:
20
-
21
- ## Data Processing
22
-
23
- ### 1. `get_wd_levels(levels)`
24
- - **Description**: Get the working directory with the option of moving up parents.
25
- - **Usage**: `get_wd_levels(levels)`
26
-
27
- ### 2. `remove_rows(data_frame, num_rows_to_remove)`
28
- - **Description**: Removes a specified number of rows from a pandas DataFrame.
29
- - **Usage**: `remove_rows(data_frame, num_rows_to_remove)`
30
-
31
- ### 3. `aggregate_daily_to_wc_long(df, date_column, group_columns, sum_columns, wc, aggregation='sum', include_totals=False)`
32
- - **Description**: Aggregates daily data into weekly data, grouping and summing specified columns, starting on a specified day of the week. In the long format.
33
- - **Usage**: `aggregate_daily_to_wc_long(df, date_column, group_columns, sum_columns, wc, aggregation='sum', include_totals=False)`
34
-
35
- ### 4. `convert_monthly_to_daily(df, date_column)`
36
- - **Description**: Converts monthly data in a DataFrame to daily data by expanding and dividing the numeric values.
37
- - **Usage**: `convert_monthly_to_daily(df, date_column)`
38
-
39
- ### 5. `plot_two(df1, col1, df2, col2, date_column, same_axis=True)`
40
- - **Description**: Plots specified columns from two different DataFrames using a shared date column. Useful for comparing data.
41
- - **Usage**: `plot_two(df1, col1, df2, col2, date_column, same_axis=True)`
42
-
43
- ### 6. `remove_nan_rows(df, col_to_remove_rows)`
44
- - **Description**: Removes rows from a DataFrame where the specified column has NaN values.
45
- - **Usage**: `remove_nan_rows(df, col_to_remove_rows)`
46
-
47
- ### 7. `filter_rows(df, col_to_filter, list_of_filters)`
48
- - **Description**: Filters the DataFrame based on whether the values in a specified column are in a provided list.
49
- - **Usage**: `filter_rows(df, col_to_filter, list_of_filters)`
50
-
51
- ### 8. `plot_one(df1, col1, date_column)`
52
- - **Description**: Plots a specified column from a DataFrame.
53
- - **Usage**: `plot_one(df1, col1, date_column)`
54
-
55
- ### 9. `week_of_year_mapping(df, week_col, start_day_str)`
56
- - **Description**: Converts a week column in 'yyyy-Www' or 'yyyy-ww' format to week commencing date.
57
- - **Usage**: `week_of_year_mapping(df, week_col, start_day_str)`
58
-
59
- ### 10. `exclude_rows(df, col_to_filter, list_of_filters)`
60
- - **Description**: Removes rows from a DataFrame based on whether the values in a specified column are not in a provided list.
61
- - **Usage**: `exclude_rows(df, col_to_filter, list_of_filters)`
62
-
63
- ### 11. `rename_cols(df, cols_to_rename)`
64
- - **Description**: Renames columns in a pandas DataFrame.
65
- - **Usage**: `rename_cols(df, cols_to_rename)`
66
-
67
- ### 12. `merge_new_and_old(old_df, old_col, new_df, new_col, cutoff_date, date_col_name='OBS')`
68
- - **Description**: Creates a new DataFrame with two columns: one for dates and one for merged numeric values.
69
- - **Usage**: `merge_new_and_old(old_df, old_col, new_df, new_col, cutoff_date, date_col_name='OBS')`
70
-
71
- ### 13. `merge_dataframes_on_date(dataframes, common_column='OBS', merge_how='outer')`
72
- - **Description**: Merge a list of DataFrames on a common column.
73
- - **Usage**: `merge_dataframes_on_date(dataframes, common_column='OBS', merge_how='outer')`
74
-
75
- ### 14. `merge_and_update_dfs(df1, df2, key_column)`
76
- - **Description**: Merges two dataframes on a key column, updates the first dataframe's columns with the second's where available, and returns a dataframe sorted by the key column.
77
- - **Usage**: `merge_and_update_dfs(df1, df2, key_column)`
78
-
79
- ### 15. `convert_us_to_uk_dates(df, date_col)`
80
- - **Description**: Convert a DataFrame column with mixed date formats to datetime.
81
- - **Usage**: `convert_us_to_uk_dates(df, date_col)`
82
-
83
- ### 16. `combine_sheets(all_sheets)`
84
- - **Description**: Combines multiple DataFrames from a dictionary into a single DataFrame.
85
- - **Usage**: `combine_sheets({'Sheet1': df1, 'Sheet2': df2})`
86
-
87
- ### 17. `pivot_table(df, filters_dict, index_col, columns, values_col, fill_value=0,aggfunc='sum',margins=False,margins_name='Total',datetime_trans_needed=True)`
88
- - **Description**: Dynamically pivots a DataFrame based on specified columns.
89
- - **Usage**: `pivot_table(df, {'Master Include':' == 1','OBS':' >= datetime(2019,9,9)','Metric Short Names':' == 'spd''}, 'OBS', 'Channel Short Names', 'Value', fill_value=0,aggfunc='sum',margins=False,margins_name='Total',datetime_trans_needed=True)`
90
-
91
- ### 18. `apply_lookup_table_for_columns(df, col_names, to_find_dict, if_not_in_country_dict='Other'), new_column_name='Mapping')`
92
- - **Description**: Equivalent of xlookup in excel. Allows you to map a dictionary of substrings within a column. If multiple columns are need for the LUT then a | seperator is needed.
93
- - **Usage**: `classify_within_column(df, ['campaign type','media type'], {'France Paid Social FB|paid social': 'facebook','France Paid Social TW|paid social': 'twitter'}, 'other','mapping')`
94
-
95
- ### 19. `aggregate_daily_to_wc_wide(df, date_column, group_columns, sum_columns, wc, aggregation='sum', include_totals=False)`
96
- - **Description**: Aggregates daily data into weekly data, grouping and summing specified columns, starting on a specified day of the week. In the wide format.
97
- - **Usage**: `aggregate_daily_to_wc_wide(df, date_column, group_columns, sum_columns, wc, aggregation='sum', include_totals=False)`
98
-
99
- ### 20. `merge_cols_with_seperator(self, df, col_names,seperator='_',output_column_name = 'Merged',starting_prefix_str=None,ending_prefix_str=None)`
100
- - **Description**: Merge multiple columns in a dataframe into 1 column with a seperator.Can be used if multiple columns are needed for a LUT.
101
- - **Usage**: `merge_cols_with_seperator(df, ['Campaign','Product'],seperator='|','Merged Columns',starting_prefix_str='start_',ending_prefix_str='_end')`
102
-
103
- ### 21. `check_sum_of_df_cols_are_equal(df_1,df_2,cols_1,cols_2)`
104
- - **Description**: Checks if the sum of two columns in two dataframes are the same, and provides the sums of each column and the difference between them.
105
- - **Usage**: `check_sum_of_df_cols_are_equal(df_1,df_2,'Media Cost','Spend')`
106
-
107
- ### 22. `convert_2_df_cols_to_dict(df, key_col, value_col)`
108
- - **Description**: Can be used to create an LUT. Creates a dictionary using two columns in a dataframe.
109
- - **Usage**: `convert_2_df_cols_to_dict(df, 'Campaign', 'Channel')`
110
-
111
- ### 23. `create_FY_and_H_columns(df, index_col, start_date, starting_FY,short_format='No',half_years='No',combined_FY_and_H='No')`
112
- - **Description**: Used to create a financial year, half year, and financial half year column.
113
- - **Usage**: `create_FY_and_H_columns(df, 'Week (M-S)', '2022-10-03', 'FY2023',short_format='Yes',half_years='Yes',combined_FY_and_H='Yes')`
114
-
115
- ### 24. `keyword_lookup_replacement(df, col, replacement_rows, cols_to_merge, replacement_lookup_dict,output_column_name='Updated Column')`
116
- - **Description**: Essentially provides an if statement with a xlookup if a value is something. Updates certain chosen values in a specified column of the DataFrame based on a lookup dictionary.
117
- - **Usage**: `keyword_lookup_replacement(df, 'channel', 'Paid Search Generic', ['channel','segment','product'], qlik_dict_for_channel,output_column_name='Channel New')`
118
-
119
- ### 25. `create_new_version_of_col_using_LUT(df, keys_col,value_col, dict_for_specific_changes, new_col_name='New Version of Old Col')`
120
- - **Description**: Creates a new column in a dataframe, which takes an old column and uses a lookup table to changes values in the new column to reflect the lookup table. The lookup is based on a column in the dataframe.
121
- - **Usage**: `keyword_lookup_replacement(df, '*Campaign Name','Campaign Type',search_campaign_name_retag_lut,'Campaign Name New')`
122
-
123
- ### 26. `convert_df_wide_2_long(df,value_cols,variable_col_name='Stacked',value_col_name='Value')`
124
- - **Description**: Changes a dataframe from wide to long format.
125
- - **Usage**: `keyword_lookup_replacement(df, ['Media Cost','Impressions','Clicks'],variable_col_name='Metric')`
126
-
127
- ### 27. `manually_edit_data(df, filters_dict, col_to_change, new_value, change_in_existing_df_col='No', new_col_to_change_name='New', manual_edit_col_name=None, add_notes='No', existing_note_col_name=None, note=None)`
128
- - **Description**: Allows the capability to manually update any cell in dataframe by applying filters and chosing a column to edit in dataframe.
129
- - **Usage**: `keyword_lookup_replacement(df, {'OBS':' <= datetime(2023,1,23)','File_Name':' == 'France media''},'Master Include',1,change_in_existing_df_col = 'Yes',new_col_to_change_name = 'Master Include',manual_edit_col_name = 'Manual Changes')`
130
-
131
- ### 28. `format_numbers_with_commas(df, decimal_length_chosen=2)`
132
- - **Description**: Converts data in numerical format into numbers with commas and a chosen decimal place length.
133
- - **Usage**: `format_numbers_with_commas(df,1)`
134
-
135
- ### 29. `filter_df_on_multiple_conditions(df, filters_dict)`
136
- - **Description**: Filters dataframe on multiple conditions, which come in the form of a dictionary.
137
- - **Usage**: `filter_df_on_multiple_conditions(df, {'OBS':' <= datetime(2023,1,23)','File_Name':' == 'France media''})`
138
-
139
- ### 30. `read_and_concatenate_files(folder_path, file_type='csv')`
140
- - **Description**: Read and Concatinate all files of one type in a folder.
141
- - **Usage**: `read_and_concatenate_files(folder_path, file_type='csv')`
142
-
143
- ### 31. `remove_zero_values(data_frame, column_to_filter)`
144
- - **Description**: Remove zero values in a specified column.
145
- - **Usage**: `remove_zero_values(self, data_frame, column_to_filter)`
146
-
147
- ## Data Pulling
148
-
149
- ### 1. `pull_fred_data(data_frame, column_to_filter)`
150
- - **Description**: Get data from FRED by using series id tokens.
151
- - **Usage**: `pull_fred_data(week_commencing, series_id_list)`
152
-
153
- ### 2. `pull_boe_data(week_commencing)`
154
- - **Description**: Fetch and process Bank of England interest rate data.
155
- - **Usage**: ` pull_boe_data('mon')`
156
-
157
- ### 3. `pull_ons_data(series_list, week_commencing)`
158
- - **Description**: Fetch and process time series data from the ONS API.
159
- - **Usage**: `pull_ons_data(series_list, week_commencing)`
160
-
161
- ### 4. `pull_macro(country='GBR', week_commencing='mon')`
162
- - **Description**: Fetch macroeconomic data from OECD and other sources for a specified country.
163
- - **Usage**: `pull_macro(country='GBR', week_commencing='mon')`
164
-
165
- ### 5. `get_google_mobility_data(country, wc)`
166
- - **Description**: Fetch Google Mobility data for the specified country.
167
- - **Usage**: `get_google_mobility_data(country, wc)`
168
-
169
- ### 6. `pull_combined_dummies(week_commencing)`
170
- - **Description**: Generate combined dummy variables for seasonality, trends, and COVID lockdowns.
171
- - **Usage**: `pull_combined_dummies(week_commencing)`
172
-
173
- ### 7. `pull_weather(week_commencing, country)`
174
- - **Description**: Fetch and process historical weather data for the specified country.
175
- - **Usage**: `pull_weather(week_commencing, country)`
176
-