csv-detective 0.7.5.dev1197__py3-none-any.whl → 0.7.5.dev1228__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. csv_detective/__init__.py +1 -1
  2. csv_detective/detect_fields/FR/geo/adresse/__init__.py +1 -1
  3. csv_detective/detect_fields/FR/other/code_csp_insee/__init__.py +1 -1
  4. csv_detective/detect_fields/FR/other/csp_insee/__init__.py +1 -1
  5. csv_detective/detect_fields/FR/other/insee_ape700/__init__.py +1 -1
  6. csv_detective/detect_fields/FR/other/sexe/__init__.py +1 -1
  7. csv_detective/detect_fields/other/float/__init__.py +1 -1
  8. csv_detective/detect_labels/FR/geo/adresse/__init__.py +1 -1
  9. csv_detective/detect_labels/FR/geo/code_commune_insee/__init__.py +1 -1
  10. csv_detective/detect_labels/FR/geo/code_departement/__init__.py +1 -1
  11. csv_detective/detect_labels/FR/geo/code_fantoir/__init__.py +1 -1
  12. csv_detective/detect_labels/FR/geo/code_postal/__init__.py +1 -1
  13. csv_detective/detect_labels/FR/geo/code_region/__init__.py +1 -1
  14. csv_detective/detect_labels/FR/geo/commune/__init__.py +1 -1
  15. csv_detective/detect_labels/FR/geo/departement/__init__.py +1 -1
  16. csv_detective/detect_labels/FR/geo/insee_canton/__init__.py +1 -1
  17. csv_detective/detect_labels/FR/geo/latitude_l93/__init__.py +1 -1
  18. csv_detective/detect_labels/FR/geo/latitude_wgs_fr_metropole/__init__.py +1 -1
  19. csv_detective/detect_labels/FR/geo/longitude_l93/__init__.py +1 -1
  20. csv_detective/detect_labels/FR/geo/longitude_wgs_fr_metropole/__init__.py +1 -1
  21. csv_detective/detect_labels/FR/geo/pays/__init__.py +1 -1
  22. csv_detective/detect_labels/FR/geo/region/__init__.py +1 -1
  23. csv_detective/detect_labels/FR/other/code_csp_insee/__init__.py +1 -1
  24. csv_detective/detect_labels/FR/other/code_rna/__init__.py +1 -1
  25. csv_detective/detect_labels/FR/other/code_waldec/__init__.py +1 -1
  26. csv_detective/detect_labels/FR/other/csp_insee/__init__.py +1 -1
  27. csv_detective/detect_labels/FR/other/date_fr/__init__.py +1 -1
  28. csv_detective/detect_labels/FR/other/insee_ape700/__init__.py +1 -1
  29. csv_detective/detect_labels/FR/other/sexe/__init__.py +1 -1
  30. csv_detective/detect_labels/FR/other/siren/__init__.py +1 -1
  31. csv_detective/detect_labels/FR/other/siret/__init__.py +1 -1
  32. csv_detective/detect_labels/FR/other/tel_fr/__init__.py +1 -1
  33. csv_detective/detect_labels/FR/other/uai/__init__.py +1 -1
  34. csv_detective/detect_labels/FR/temp/jour_de_la_semaine/__init__.py +1 -1
  35. csv_detective/detect_labels/FR/temp/mois_de_annee/__init__.py +1 -1
  36. csv_detective/detect_labels/geo/iso_country_code_alpha2/__init__.py +1 -1
  37. csv_detective/detect_labels/geo/iso_country_code_alpha3/__init__.py +1 -1
  38. csv_detective/detect_labels/geo/iso_country_code_numeric/__init__.py +1 -1
  39. csv_detective/detect_labels/geo/json_geojson/__init__.py +1 -1
  40. csv_detective/detect_labels/geo/latitude_wgs/__init__.py +1 -1
  41. csv_detective/detect_labels/geo/latlon_wgs/__init__.py +1 -1
  42. csv_detective/detect_labels/geo/longitude_wgs/__init__.py +1 -1
  43. csv_detective/detect_labels/other/booleen/__init__.py +1 -1
  44. csv_detective/detect_labels/other/email/__init__.py +1 -1
  45. csv_detective/detect_labels/other/float/__init__.py +1 -1
  46. csv_detective/detect_labels/other/int/__init__.py +1 -1
  47. csv_detective/detect_labels/other/mongo_object_id/__init__.py +1 -1
  48. csv_detective/detect_labels/other/twitter/__init__.py +1 -1
  49. csv_detective/detect_labels/other/url/__init__.py +1 -1
  50. csv_detective/detect_labels/other/uuid/__init__.py +1 -1
  51. csv_detective/detect_labels/temp/date/__init__.py +1 -1
  52. csv_detective/detect_labels/temp/datetime_iso/__init__.py +1 -1
  53. csv_detective/detect_labels/temp/datetime_rfc822/__init__.py +1 -1
  54. csv_detective/detect_labels/temp/year/__init__.py +1 -1
  55. csv_detective/detection/columns.py +89 -0
  56. csv_detective/detection/encoding.py +27 -0
  57. csv_detective/detection/engine.py +46 -0
  58. csv_detective/detection/headers.py +32 -0
  59. csv_detective/detection/rows.py +18 -0
  60. csv_detective/detection/separator.py +44 -0
  61. csv_detective/detection/variables.py +98 -0
  62. csv_detective/explore_csv.py +40 -124
  63. csv_detective/output/dataframe.py +55 -0
  64. csv_detective/{create_example.py → output/example.py} +10 -9
  65. csv_detective/output/profile.py +87 -0
  66. csv_detective/{schema_generation.py → output/schema.py} +344 -343
  67. csv_detective/output/utils.py +51 -0
  68. csv_detective/parsing/columns.py +141 -0
  69. csv_detective/parsing/compression.py +11 -0
  70. csv_detective/parsing/csv.py +55 -0
  71. csv_detective/parsing/excel.py +169 -0
  72. csv_detective/parsing/load.py +97 -0
  73. csv_detective/utils.py +10 -236
  74. {csv_detective-0.7.5.dev1197.data → csv_detective-0.7.5.dev1228.data}/data/share/csv_detective/CHANGELOG.md +3 -0
  75. {csv_detective-0.7.5.dev1197.dist-info → csv_detective-0.7.5.dev1228.dist-info}/METADATA +1 -1
  76. {csv_detective-0.7.5.dev1197.dist-info → csv_detective-0.7.5.dev1228.dist-info}/RECORD +85 -71
  77. tests/test_fields.py +8 -7
  78. tests/test_file.py +15 -14
  79. csv_detective/detection.py +0 -633
  80. /csv_detective/{process_text.py → parsing/text.py} +0 -0
  81. {csv_detective-0.7.5.dev1197.data → csv_detective-0.7.5.dev1228.data}/data/share/csv_detective/LICENSE.AGPL.txt +0 -0
  82. {csv_detective-0.7.5.dev1197.data → csv_detective-0.7.5.dev1228.data}/data/share/csv_detective/README.md +0 -0
  83. {csv_detective-0.7.5.dev1197.dist-info → csv_detective-0.7.5.dev1228.dist-info}/WHEEL +0 -0
  84. {csv_detective-0.7.5.dev1197.dist-info → csv_detective-0.7.5.dev1228.dist-info}/entry_points.txt +0 -0
  85. {csv_detective-0.7.5.dev1197.dist-info → csv_detective-0.7.5.dev1228.dist-info}/licenses/LICENSE.AGPL.txt +0 -0
  86. {csv_detective-0.7.5.dev1197.dist-info → csv_detective-0.7.5.dev1228.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,51 @@
1
+ import pandas as pd
2
+
3
+
4
+ def prepare_output_dict(return_table: pd.DataFrame, limited_output: bool):
5
+ return_dict_cols = return_table.to_dict("dict")
6
+ return_dict_cols_intermediary = {}
7
+ for column_name in return_dict_cols:
8
+ return_dict_cols_intermediary[column_name] = []
9
+ for detected_value_type in return_dict_cols[column_name]:
10
+ if return_dict_cols[column_name][detected_value_type] == 0:
11
+ continue
12
+ dict_tmp = {}
13
+ dict_tmp["format"] = detected_value_type
14
+ dict_tmp["score"] = return_dict_cols[column_name][detected_value_type]
15
+ return_dict_cols_intermediary[column_name].append(dict_tmp)
16
+
17
+ # Clean dict using priorities
18
+ formats_detected = {
19
+ x["format"] for x in return_dict_cols_intermediary[column_name]
20
+ }
21
+ formats_to_remove = set()
22
+ # Deprioritise float and int detection vs others
23
+ if len(formats_detected - {"float", "int"}) > 0:
24
+ formats_to_remove = formats_to_remove.union({"float", "int"})
25
+ if "int" in formats_detected:
26
+ formats_to_remove.add("float")
27
+ if "latitude_wgs_fr_metropole" in formats_detected:
28
+ formats_to_remove.add("latitude_l93")
29
+ formats_to_remove.add("latitude_wgs")
30
+ if "longitude_wgs_fr_metropole" in formats_detected:
31
+ formats_to_remove.add("longitude_l93")
32
+ formats_to_remove.add("longitude_wgs")
33
+ if "longitude_wgs" in formats_detected:
34
+ formats_to_remove.add("longitude_l93")
35
+ if "code_region" in formats_detected:
36
+ formats_to_remove.add("code_departement")
37
+
38
+ formats_to_keep = formats_detected - formats_to_remove
39
+
40
+ detections = return_dict_cols_intermediary[column_name]
41
+ detections = [x for x in detections if x["format"] in formats_to_keep]
42
+ if not limited_output:
43
+ return_dict_cols_intermediary[column_name] = detections
44
+ else:
45
+ return_dict_cols_intermediary[column_name] = (
46
+ max(detections, key=lambda x: x["score"])
47
+ if len(detections) > 0
48
+ else {"format": "string", "score": 1.0}
49
+ )
50
+
51
+ return return_dict_cols_intermediary
@@ -0,0 +1,141 @@
1
+ import logging
2
+ from time import time
3
+ from typing import Callable
4
+
5
+ import pandas as pd
6
+
7
+ from csv_detective.utils import display_logs_depending_process_time
8
+
9
+
10
+ def test_col_val(
11
+ serie: pd.Series,
12
+ test_func: Callable,
13
+ proportion: float = 0.9,
14
+ skipna: bool = True,
15
+ limited_output: bool = False,
16
+ verbose: bool = False,
17
+ ):
18
+ """Tests values of the serie using test_func.
19
+ - skipna : if True indicates that NaNs are not counted as False
20
+ - proportion : indicates the proportion of values that have to pass the test
21
+ for the serie to be detected as a certain format
22
+ """
23
+ if verbose:
24
+ start = time()
25
+
26
+ # TODO : change for a cleaner method and only test columns in modules labels
27
+ def apply_test_func(serie: pd.Series, test_func: Callable, _range: int):
28
+ return serie.sample(n=_range).apply(test_func)
29
+ try:
30
+ if skipna:
31
+ serie = serie[serie.notnull()]
32
+ ser_len = len(serie)
33
+ if ser_len == 0:
34
+ return 0.0
35
+ if not limited_output:
36
+ result = apply_test_func(serie, test_func, ser_len).sum() / ser_len
37
+ return result if result >= proportion else 0.0
38
+ else:
39
+ if proportion == 1: # Then try first 1 value, then 5, then all
40
+ for _range in [
41
+ min(1, ser_len),
42
+ min(5, ser_len),
43
+ ser_len,
44
+ ]: # Pour ne pas faire d'opérations inutiles, on commence par 1,
45
+ # puis 5 valeurs puis la serie complète
46
+ if all(apply_test_func(serie, test_func, _range)):
47
+ # print(serie.name, ': check OK')
48
+ pass
49
+ else:
50
+ return 0.0
51
+ return 1.0
52
+ else:
53
+ # if we have a proportion, statistically it's OK to analyse up to 10k rows
54
+ # (arbitrary number) and get a significant result
55
+ to_analyse = min(ser_len, 10000)
56
+ result = apply_test_func(serie, test_func, to_analyse).sum() / to_analyse
57
+ return result if result >= proportion else 0.0
58
+ finally:
59
+ if verbose and time() - start > 3:
60
+ display_logs_depending_process_time(
61
+ f"\t/!\\ Column '{serie.name}' took too long ({round(time() - start, 3)}s)",
62
+ time() - start
63
+ )
64
+
65
+
66
+ def test_col_label(label: str, test_func: Callable, proportion: float = 1, limited_output: bool = False):
67
+ """Tests label (from header) using test_func.
68
+ - proportion : indicates the minimum score to pass the test for the serie
69
+ to be detected as a certain format
70
+ """
71
+ if not limited_output:
72
+ return test_func(label)
73
+ else:
74
+ result = test_func(label)
75
+ return result if result >= proportion else 0
76
+
77
+
78
+ def test_col(table: pd.DataFrame, all_tests: list, limited_output: bool, skipna: bool = True, verbose: bool = False):
79
+ # Initialising dict for tests
80
+ if verbose:
81
+ start = time()
82
+ logging.info("Testing columns to get types")
83
+ test_funcs = dict()
84
+ for test in all_tests:
85
+ name = test.__name__.split(".")[-1]
86
+ test_funcs[name] = {"func": test._is, "prop": test.PROPORTION}
87
+ return_table = pd.DataFrame(columns=table.columns)
88
+ for idx, (key, value) in enumerate(test_funcs.items()):
89
+ if verbose:
90
+ start_type = time()
91
+ logging.info(f"\t- Starting with type '{key}'")
92
+ # improvement lead : put the longest tests behind and make them only if previous tests not satisfactory
93
+ # => the following needs to change, "apply" means all columns are tested for one type at once
94
+ return_table.loc[key] = table.apply(
95
+ lambda serie: test_col_val(
96
+ serie,
97
+ value["func"],
98
+ value["prop"],
99
+ skipna=skipna,
100
+ limited_output=limited_output,
101
+ verbose=verbose,
102
+ )
103
+ )
104
+ if verbose:
105
+ display_logs_depending_process_time(
106
+ f'\t> Done with type "{key}" in {round(time() - start_type, 3)}s ({idx+1}/{len(test_funcs)})',
107
+ time() - start_type
108
+ )
109
+ if verbose:
110
+ display_logs_depending_process_time(f"Done testing columns in {round(time() - start, 3)}s", time() - start)
111
+ return return_table
112
+
113
+
114
+ def test_label(table: pd.DataFrame, all_tests: list, limited_output: bool, verbose: bool = False):
115
+ # Initialising dict for tests
116
+ if verbose:
117
+ start = time()
118
+ logging.info("Testing labels to get types")
119
+ test_funcs = dict()
120
+ for test in all_tests:
121
+ name = test.__name__.split(".")[-1]
122
+ test_funcs[name] = {"func": test._is, "prop": test.PROPORTION}
123
+
124
+ return_table = pd.DataFrame(columns=table.columns)
125
+ for idx, (key, value) in enumerate(test_funcs.items()):
126
+ if verbose:
127
+ start_type = time()
128
+ return_table.loc[key] = [
129
+ test_col_label(
130
+ col_name, value["func"], value["prop"], limited_output=limited_output
131
+ )
132
+ for col_name in table.columns
133
+ ]
134
+ if verbose:
135
+ display_logs_depending_process_time(
136
+ f'\t- Done with type "{key}" in {round(time() - start_type, 3)}s ({idx+1}/{len(test_funcs)})',
137
+ time() - start_type
138
+ )
139
+ if verbose:
140
+ display_logs_depending_process_time(f"Done testing labels in {round(time() - start, 3)}s", time() - start)
141
+ return return_table
@@ -0,0 +1,11 @@
1
+ import gzip
2
+ from io import BytesIO
3
+
4
+
5
+ def unzip(binary_file: BytesIO, engine: str) -> BytesIO:
6
+ if engine == "gzip":
7
+ with gzip.open(binary_file, mode="rb") as binary_file:
8
+ file_content = binary_file.read()
9
+ else:
10
+ raise NotImplementedError(f"{engine} is not yet supported")
11
+ return BytesIO(file_content)
@@ -0,0 +1,55 @@
1
+ import logging
2
+ from time import time
3
+ from typing import TextIO
4
+
5
+ import pandas as pd
6
+
7
+ from csv_detective.utils import display_logs_depending_process_time
8
+
9
+
10
+ def parse_csv(
11
+ the_file: TextIO,
12
+ encoding: str,
13
+ sep: str,
14
+ num_rows: int,
15
+ skiprows: int,
16
+ random_state: int = 42,
17
+ verbose: bool = False,
18
+ ) -> tuple[pd.DataFrame, int, int]:
19
+ if verbose:
20
+ start = time()
21
+ logging.info("Parsing table")
22
+ table = None
23
+
24
+ if not isinstance(the_file, str):
25
+ the_file.seek(0)
26
+
27
+ total_lines = None
28
+ for encoding in [encoding, "ISO-8859-1", "utf-8"]:
29
+ if encoding is None:
30
+ continue
31
+
32
+ if "ISO-8859" in encoding:
33
+ encoding = "ISO-8859-1"
34
+ try:
35
+ table = pd.read_csv(
36
+ the_file, sep=sep, dtype="unicode", encoding=encoding, skiprows=skiprows
37
+ )
38
+ total_lines = len(table)
39
+ nb_duplicates = len(table.loc[table.duplicated()])
40
+ if num_rows > 0:
41
+ num_rows = min(num_rows - 1, total_lines)
42
+ table = table.sample(num_rows, random_state=random_state)
43
+ # else : table is unchanged
44
+ break
45
+ except TypeError:
46
+ print("Trying encoding : {encoding}".format(encoding=encoding))
47
+
48
+ if table is None:
49
+ raise ValueError("Could not load file")
50
+ if verbose:
51
+ display_logs_depending_process_time(
52
+ f'Table parsed successfully in {round(time() - start, 3)}s',
53
+ time() - start,
54
+ )
55
+ return table, total_lines, nb_duplicates
@@ -0,0 +1,169 @@
1
+ from io import BytesIO
2
+ from time import time
3
+ from typing import Optional
4
+
5
+ import openpyxl
6
+ import pandas as pd
7
+ import requests
8
+ import xlrd
9
+
10
+ from csv_detective.detection.engine import engine_to_file
11
+ from csv_detective.detection.rows import remove_empty_first_rows
12
+ from csv_detective.utils import (
13
+ display_logs_depending_process_time,
14
+ is_url,
15
+ )
16
+
17
+ NEW_EXCEL_EXT = [".xlsx", ".xlsm", ".xltx", ".xltm"]
18
+ OLD_EXCEL_EXT = [".xls"]
19
+ OPEN_OFFICE_EXT = [".odf", ".ods", ".odt"]
20
+ XLS_LIKE_EXT = NEW_EXCEL_EXT + OLD_EXCEL_EXT + OPEN_OFFICE_EXT
21
+
22
+
23
+ def parse_excel(
24
+ file_path: str,
25
+ num_rows: int = -1,
26
+ engine: Optional[str] = None,
27
+ sheet_name: Optional[str] = None,
28
+ random_state: int = 42,
29
+ verbose: bool = False,
30
+ ) -> tuple[pd.DataFrame, int, int, str, str, int]:
31
+ """"Excel-like parsing is really slow, could be a good improvement for future development"""
32
+ if verbose:
33
+ start = time()
34
+ no_sheet_specified = sheet_name is None
35
+
36
+ if (
37
+ engine in ['openpyxl', 'xlrd'] or
38
+ any([file_path.endswith(k) for k in NEW_EXCEL_EXT + OLD_EXCEL_EXT])
39
+ ):
40
+ remote_content = None
41
+ if is_url(file_path):
42
+ r = requests.get(file_path)
43
+ r.raise_for_status()
44
+ remote_content = BytesIO(r.content)
45
+ if not engine:
46
+ if any([file_path.endswith(k) for k in NEW_EXCEL_EXT]):
47
+ engine = "openpyxl"
48
+ else:
49
+ engine = "xlrd"
50
+ if sheet_name is None:
51
+ if verbose:
52
+ display_logs_depending_process_time(
53
+ f'Detected {engine_to_file[engine]} file, no sheet specified, reading the largest one',
54
+ time() - start,
55
+ )
56
+ try:
57
+ if engine == "openpyxl":
58
+ # openpyxl doesn't want to open files that don't have a valid extension
59
+ # see: https://foss.heptapod.net/openpyxl/openpyxl/-/issues/2157
60
+ # if the file is remote, we have a remote content anyway so it's fine
61
+ if not remote_content and '.' not in file_path.split('/')[-1]:
62
+ with open(file_path, 'rb') as f:
63
+ remote_content = BytesIO(f.read())
64
+ # faster than loading all sheets
65
+ wb = openpyxl.load_workbook(remote_content or file_path, read_only=True)
66
+ try:
67
+ sizes = {s.title: s.max_row * s.max_column for s in wb.worksheets}
68
+ except TypeError:
69
+ # sometimes read_only can't get the info, so we have to open the file for real
70
+ # this takes more time but it's for a limited number of files
71
+ # and it's this or nothing
72
+ wb = openpyxl.load_workbook(remote_content or file_path)
73
+ sizes = {s.title: s.max_row * s.max_column for s in wb.worksheets}
74
+ else:
75
+ if remote_content:
76
+ wb = xlrd.open_workbook(file_contents=remote_content.read())
77
+ else:
78
+ wb = xlrd.open_workbook(file_path)
79
+ sizes = {s.name: s.nrows * s.ncols for s in wb.sheets()}
80
+ sheet_name = max(sizes, key=sizes.get)
81
+ except xlrd.biffh.XLRDError:
82
+ # sometimes a xls file is recognized as ods
83
+ if verbose:
84
+ display_logs_depending_process_time(
85
+ 'Could not read file with classic xls reader, trying with ODS',
86
+ time() - start,
87
+ )
88
+ engine = "odf"
89
+
90
+ if engine == "odf" or any([file_path.endswith(k) for k in OPEN_OFFICE_EXT]):
91
+ # for ODS files, no way to get sheets' sizes without
92
+ # loading the file one way or another (pandas or pure odfpy)
93
+ # so all in one
94
+ engine = "odf"
95
+ if sheet_name is None:
96
+ if verbose:
97
+ display_logs_depending_process_time(
98
+ f'Detected {engine_to_file[engine]} file, no sheet specified, reading the largest one',
99
+ time() - start,
100
+ )
101
+ tables = pd.read_excel(
102
+ file_path,
103
+ engine="odf",
104
+ sheet_name=None,
105
+ dtype="unicode",
106
+ )
107
+ sizes = {sheet_name: table.size for sheet_name, table in tables.items()}
108
+ sheet_name = max(sizes, key=sizes.get)
109
+ if verbose:
110
+ display_logs_depending_process_time(
111
+ f'Going forwards with sheet "{sheet_name}"',
112
+ time() - start,
113
+ )
114
+ table = tables[sheet_name]
115
+ else:
116
+ if verbose:
117
+ display_logs_depending_process_time(
118
+ f'Detected {engine_to_file[engine]} file, reading sheet "{sheet_name}"',
119
+ time() - start,
120
+ )
121
+ table = pd.read_excel(
122
+ file_path,
123
+ engine="odf",
124
+ sheet_name=sheet_name,
125
+ dtype="unicode",
126
+ )
127
+ table, header_row_idx = remove_empty_first_rows(table)
128
+ total_lines = len(table)
129
+ nb_duplicates = len(table.loc[table.duplicated()])
130
+ if num_rows > 0:
131
+ num_rows = min(num_rows - 1, total_lines)
132
+ table = table.sample(num_rows, random_state=random_state)
133
+ if verbose:
134
+ display_logs_depending_process_time(
135
+ f'Table parsed successfully in {round(time() - start, 3)}s',
136
+ time() - start,
137
+ )
138
+ return table, total_lines, nb_duplicates, sheet_name, engine, header_row_idx
139
+
140
+ # so here we end up with (old and new) excel files only
141
+ if verbose:
142
+ if no_sheet_specified:
143
+ display_logs_depending_process_time(
144
+ f'Going forwards with sheet "{sheet_name}"',
145
+ time() - start,
146
+ )
147
+ else:
148
+ display_logs_depending_process_time(
149
+ f'Detected {engine_to_file[engine]} file, reading sheet "{sheet_name}"',
150
+ time() - start,
151
+ )
152
+ table = pd.read_excel(
153
+ file_path,
154
+ engine=engine,
155
+ sheet_name=sheet_name,
156
+ dtype="unicode",
157
+ )
158
+ table, header_row_idx = remove_empty_first_rows(table)
159
+ total_lines = len(table)
160
+ nb_duplicates = len(table.loc[table.duplicated()])
161
+ if num_rows > 0:
162
+ num_rows = min(num_rows - 1, total_lines)
163
+ table = table.sample(num_rows, random_state=random_state)
164
+ if verbose:
165
+ display_logs_depending_process_time(
166
+ f'Table parsed successfully in {round(time() - start, 3)}s',
167
+ time() - start,
168
+ )
169
+ return table, total_lines, nb_duplicates, sheet_name, engine, header_row_idx
@@ -0,0 +1,97 @@
1
+ from io import BytesIO, StringIO
2
+ from typing import Union
3
+
4
+ import pandas as pd
5
+ import requests
6
+
7
+ from csv_detective.detection.columns import detect_heading_columns, detect_trailing_columns
8
+ from csv_detective.detection.encoding import detect_encoding
9
+ from csv_detective.detection.engine import (
10
+ COMPRESSION_ENGINES,
11
+ EXCEL_ENGINES,
12
+ detect_engine,
13
+ )
14
+ from csv_detective.detection.headers import detect_headers
15
+ from csv_detective.detection.separator import detect_separator
16
+ from csv_detective.utils import is_url
17
+ from .compression import unzip
18
+ from .csv import parse_csv
19
+ from .excel import (
20
+ XLS_LIKE_EXT,
21
+ parse_excel,
22
+ )
23
+
24
+
25
+ def load_file(
26
+ file_path: str,
27
+ num_rows: int = 500,
28
+ encoding: str = None,
29
+ sep: str = None,
30
+ verbose: bool = False,
31
+ sheet_name: Union[str, int] = None,
32
+ ) -> tuple[pd.DataFrame, dict]:
33
+ file_name = file_path.split('/')[-1]
34
+ engine = None
35
+ if '.' not in file_name or not file_name.endswith("csv"):
36
+ # file has no extension, we'll investigate how to read it
37
+ engine = detect_engine(file_path, verbose=verbose)
38
+
39
+ if engine in EXCEL_ENGINES or any([file_path.endswith(k) for k in XLS_LIKE_EXT]):
40
+ table, total_lines, nb_duplicates, sheet_name, engine, header_row_idx = parse_excel(
41
+ file_path=file_path,
42
+ num_rows=num_rows,
43
+ engine=engine,
44
+ sheet_name=sheet_name,
45
+ verbose=verbose,
46
+ )
47
+ header = table.columns.to_list()
48
+ analysis = {
49
+ "engine": engine,
50
+ "sheet_name": sheet_name,
51
+ }
52
+ else:
53
+ # fetching or reading file as binary
54
+ if is_url(file_path):
55
+ r = requests.get(file_path, allow_redirects=True)
56
+ r.raise_for_status()
57
+ binary_file = BytesIO(r.content)
58
+ else:
59
+ binary_file = open(file_path, "rb")
60
+ # handling compression
61
+ if engine in COMPRESSION_ENGINES:
62
+ binary_file: BytesIO = unzip(binary_file=binary_file, engine=engine)
63
+ # detecting encoding if not specified
64
+ if encoding is None:
65
+ encoding: str = detect_encoding(binary_file, verbose=verbose)
66
+ binary_file.seek(0)
67
+ # decoding and reading file
68
+ if is_url(file_path) or engine in COMPRESSION_ENGINES:
69
+ str_file = StringIO(binary_file.read().decode(encoding=encoding))
70
+ else:
71
+ str_file = open(file_path, "r", encoding=encoding)
72
+ if sep is None:
73
+ sep = detect_separator(str_file, verbose=verbose)
74
+ header_row_idx, header = detect_headers(str_file, sep, verbose=verbose)
75
+ if header is None:
76
+ return {"error": True}
77
+ elif isinstance(header, list):
78
+ if any([x is None for x in header]):
79
+ return {"error": True}
80
+ heading_columns = detect_heading_columns(str_file, sep, verbose=verbose)
81
+ trailing_columns = detect_trailing_columns(str_file, sep, heading_columns, verbose=verbose)
82
+ table, total_lines, nb_duplicates = parse_csv(
83
+ str_file, encoding, sep, num_rows, header_row_idx, verbose=verbose
84
+ )
85
+ analysis = {
86
+ "encoding": encoding,
87
+ "separator": sep,
88
+ "heading_columns": heading_columns,
89
+ "trailing_columns": trailing_columns,
90
+ }
91
+ analysis.update({
92
+ "header_row_idx": header_row_idx,
93
+ "header": header,
94
+ "total_lines": total_lines,
95
+ "nb_duplicates": nb_duplicates,
96
+ })
97
+ return table, analysis