csv-detective 0.8.1.dev1674__py3-none-any.whl → 0.8.1.dev1703__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- csv_detective/__init__.py +0 -2
- csv_detective-0.8.1.dev1674.data/data/share/csv_detective/README.md → csv_detective-0.8.1.dev1703.dist-info/METADATA +31 -0
- {csv_detective-0.8.1.dev1674.dist-info → csv_detective-0.8.1.dev1703.dist-info}/RECORD +10 -10
- {csv_detective-0.8.1.dev1674.dist-info → csv_detective-0.8.1.dev1703.dist-info}/top_level.txt +2 -0
- venv/bin/activate_this.py +38 -0
- venv/bin/jp.py +54 -0
- venv/bin/runxlrd.py +410 -0
- csv_detective-0.8.1.dev1674.data/data/share/csv_detective/CHANGELOG.md +0 -186
- csv_detective-0.8.1.dev1674.dist-info/METADATA +0 -268
- csv_detective-0.8.1.dev1674.dist-info/licenses/LICENSE +0 -21
- {csv_detective-0.8.1.dev1674.dist-info → csv_detective-0.8.1.dev1703.dist-info}/WHEEL +0 -0
- {csv_detective-0.8.1.dev1674.dist-info → csv_detective-0.8.1.dev1703.dist-info}/entry_points.txt +0 -0
- {csv_detective-0.8.1.dev1674.data/data/share/csv_detective → csv_detective-0.8.1.dev1703.dist-info/licenses}/LICENSE +0 -0
csv_detective/__init__.py
CHANGED
|
@@ -1,3 +1,34 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: csv-detective
|
|
3
|
+
Version: 0.8.1.dev1703
|
|
4
|
+
Summary: Detect tabular files column content
|
|
5
|
+
Author-email: Etalab <opendatateam@data.gouv.fr>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Source, https://github.com/datagouv/csv_detective
|
|
8
|
+
Keywords: CSV,data processing,encoding,guess,parser,tabular
|
|
9
|
+
Requires-Python: <3.14,>=3.9
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Requires-Dist: boto3<2,>=1.34.0
|
|
13
|
+
Requires-Dist: dateparser<2,>=1.2.0
|
|
14
|
+
Requires-Dist: faust-cchardet==2.1.19
|
|
15
|
+
Requires-Dist: pandas<3,>=2.2.0
|
|
16
|
+
Requires-Dist: python-dateutil<3,>=2.8.2
|
|
17
|
+
Requires-Dist: Unidecode<2,>=1.3.6
|
|
18
|
+
Requires-Dist: openpyxl==3.1.5
|
|
19
|
+
Requires-Dist: xlrd==2.0.1
|
|
20
|
+
Requires-Dist: odfpy==1.4.1
|
|
21
|
+
Requires-Dist: requests<3,>=2.32.3
|
|
22
|
+
Requires-Dist: python-magic==0.4.27
|
|
23
|
+
Requires-Dist: frformat==0.4.0
|
|
24
|
+
Requires-Dist: Faker>=33.0.0
|
|
25
|
+
Requires-Dist: rstr==3.2.2
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest>=8.3.0; extra == "dev"
|
|
28
|
+
Requires-Dist: responses>=0.25.0; extra == "dev"
|
|
29
|
+
Requires-Dist: bumpx>=0.3.10; extra == "dev"
|
|
30
|
+
Dynamic: license-file
|
|
31
|
+
|
|
1
32
|
# CSV Detective
|
|
2
33
|
|
|
3
34
|
This is a package to **automatically detect column content in tabular files**. The script reads either the whole file or the first few rows and performs various checks to see for each column if it matches with various content types. This is currently done through regex and string comparison.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
csv_detective/__init__.py,sha256=
|
|
1
|
+
csv_detective/__init__.py,sha256=XY7pnoNHlocvyUiK8EQpJYPSQt5BRWWJD8KiPlvI9pU,164
|
|
2
2
|
csv_detective/cli.py,sha256=VNztFz2nc90E3zkghF8PYtXTEZ6TrBSCQMi9v1ljkJs,1414
|
|
3
3
|
csv_detective/explore_csv.py,sha256=VEeAJaz3FPOmGmQ-Yuf3FuSRRPULM03FrTf3qwZX52s,9222
|
|
4
4
|
csv_detective/load_tests.py,sha256=GILvfkd4OVI-72mA4nzbPlZqgcXZ4wznOhGfZ1ucWkM,2385
|
|
@@ -150,10 +150,7 @@ csv_detective/parsing/csv.py,sha256=11mibDnJhIjykXLGZvA5ZEU5U7KgxIrbyO6BNv6jlro,
|
|
|
150
150
|
csv_detective/parsing/excel.py,sha256=AslE2S1e67o8yTIAIhp-lAnJ6-XqeBBRz1-VMFqhZBM,7055
|
|
151
151
|
csv_detective/parsing/load.py,sha256=u6fbGFZsL2GwPQRzhAXgt32JpUur7vbQdErREHxNJ-w,3661
|
|
152
152
|
csv_detective/parsing/text.py,sha256=_TprGi0gHZlRsafizI3dqQhBehZW4BazqxmypMcAZ-o,1824
|
|
153
|
-
csv_detective-0.8.1.
|
|
154
|
-
csv_detective-0.8.1.dev1674.data/data/share/csv_detective/LICENSE,sha256=A1dQrzxyxRHRih02KwibWj1khQyF7GeA6SqdOU87Gk4,1088
|
|
155
|
-
csv_detective-0.8.1.dev1674.data/data/share/csv_detective/README.md,sha256=gKLFmC8kuCCywS9eAhMak_JNriUWWNOsBKleAu5TIEY,8501
|
|
156
|
-
csv_detective-0.8.1.dev1674.dist-info/licenses/LICENSE,sha256=A1dQrzxyxRHRih02KwibWj1khQyF7GeA6SqdOU87Gk4,1088
|
|
153
|
+
csv_detective-0.8.1.dev1703.dist-info/licenses/LICENSE,sha256=A1dQrzxyxRHRih02KwibWj1khQyF7GeA6SqdOU87Gk4,1088
|
|
157
154
|
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
158
155
|
tests/test_example.py,sha256=JeHxSK0IVDcSrOhSZlNGSQv4JAc_r6mzvJM8PfmLTMw,2018
|
|
159
156
|
tests/test_fields.py,sha256=IwMpjOn8W5kDCvJYp3Cer4m571qomzjupOAvSRFMg_Q,11819
|
|
@@ -161,8 +158,11 @@ tests/test_file.py,sha256=0bHV9wx9mSRoav_DVF19g694yohb1p0bw7rtcBeKG-8,8451
|
|
|
161
158
|
tests/test_labels.py,sha256=Nkr645bUewrj8hjNDKr67FQ6Sy_TID6f3E5Kfkl231M,464
|
|
162
159
|
tests/test_structure.py,sha256=bv-tjgXohvQAxwmxzH0BynFpK2TyPjcxvtIAmIRlZmA,1393
|
|
163
160
|
tests/test_validation.py,sha256=CTGonR6htxcWF9WH8MxumDD8cF45Y-G4hm94SM4lFjU,3246
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
csv_detective-0.8.1.
|
|
168
|
-
csv_detective-0.8.1.
|
|
161
|
+
venv/bin/activate_this.py,sha256=NRy3waFmwW1pOaNUp33wNN0vD1Kzkd-zXX-Sgl4EiVI,1286
|
|
162
|
+
venv/bin/jp.py,sha256=7z7dvRg0M7HzpZG4ssQID7nScjvQx7bcYTxJWDOrS6E,1717
|
|
163
|
+
venv/bin/runxlrd.py,sha256=YlZMuycM_V_hzNt2yt3FyXPuwouMCmMhvj1oZaBeeuw,16092
|
|
164
|
+
csv_detective-0.8.1.dev1703.dist-info/METADATA,sha256=HsL5tsoa92LIZSGCAth5zhUCRd-ovvKqHQSO2CaSrIo,9527
|
|
165
|
+
csv_detective-0.8.1.dev1703.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
166
|
+
csv_detective-0.8.1.dev1703.dist-info/entry_points.txt,sha256=JjweTReFqKJmuvkegzlew2j3D5pZzfxvbEGOtGVGmaY,56
|
|
167
|
+
csv_detective-0.8.1.dev1703.dist-info/top_level.txt,sha256=cYKb4Ok3XgYA7rMDOYtxysjSJp_iUA9lJjynhVzue8g,30
|
|
168
|
+
csv_detective-0.8.1.dev1703.dist-info/RECORD,,
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Activate virtualenv for current interpreter:
|
|
3
|
+
|
|
4
|
+
import runpy
|
|
5
|
+
runpy.run_path(this_file)
|
|
6
|
+
|
|
7
|
+
This can be used when you must use an existing Python interpreter, not the virtualenv bin/python.
|
|
8
|
+
""" # noqa: D415
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
import site
|
|
14
|
+
import sys
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
abs_file = os.path.abspath(__file__)
|
|
18
|
+
except NameError as exc:
|
|
19
|
+
msg = "You must use import runpy; runpy.run_path(this_file)"
|
|
20
|
+
raise AssertionError(msg) from exc
|
|
21
|
+
|
|
22
|
+
bin_dir = os.path.dirname(abs_file)
|
|
23
|
+
base = bin_dir[: -len('bin') - 1] # strip away the bin part from the __file__, plus the path separator
|
|
24
|
+
|
|
25
|
+
# prepend bin to PATH (this file is inside the bin directory)
|
|
26
|
+
os.environ["PATH"] = os.pathsep.join([bin_dir, *os.environ.get("PATH", "").split(os.pathsep)])
|
|
27
|
+
os.environ["VIRTUAL_ENV"] = base # virtual env is right above bin directory
|
|
28
|
+
os.environ["VIRTUAL_ENV_PROMPT"] = '' or os.path.basename(base)
|
|
29
|
+
|
|
30
|
+
# add the virtual environments libraries to the host python import mechanism
|
|
31
|
+
prev_length = len(sys.path)
|
|
32
|
+
for lib in '../lib/python3.9/site-packages'.split(os.pathsep):
|
|
33
|
+
path = os.path.realpath(os.path.join(bin_dir, lib))
|
|
34
|
+
site.addsitedir(path.decode("utf-8") if '' else path)
|
|
35
|
+
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
|
|
36
|
+
|
|
37
|
+
sys.real_prefix = sys.prefix
|
|
38
|
+
sys.prefix = base
|
venv/bin/jp.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
#!/home/circleci/project/venv/bin/python
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
import json
|
|
5
|
+
import argparse
|
|
6
|
+
from pprint import pformat
|
|
7
|
+
|
|
8
|
+
import jmespath
|
|
9
|
+
from jmespath import exceptions
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def main():
|
|
13
|
+
parser = argparse.ArgumentParser()
|
|
14
|
+
parser.add_argument('expression')
|
|
15
|
+
parser.add_argument('-f', '--filename',
|
|
16
|
+
help=('The filename containing the input data. '
|
|
17
|
+
'If a filename is not given then data is '
|
|
18
|
+
'read from stdin.'))
|
|
19
|
+
parser.add_argument('--ast', action='store_true',
|
|
20
|
+
help=('Pretty print the AST, do not search the data.'))
|
|
21
|
+
args = parser.parse_args()
|
|
22
|
+
expression = args.expression
|
|
23
|
+
if args.ast:
|
|
24
|
+
# Only print the AST
|
|
25
|
+
expression = jmespath.compile(args.expression)
|
|
26
|
+
sys.stdout.write(pformat(expression.parsed))
|
|
27
|
+
sys.stdout.write('\n')
|
|
28
|
+
return 0
|
|
29
|
+
if args.filename:
|
|
30
|
+
with open(args.filename, 'r') as f:
|
|
31
|
+
data = json.load(f)
|
|
32
|
+
else:
|
|
33
|
+
data = sys.stdin.read()
|
|
34
|
+
data = json.loads(data)
|
|
35
|
+
try:
|
|
36
|
+
sys.stdout.write(json.dumps(
|
|
37
|
+
jmespath.search(expression, data), indent=4, ensure_ascii=False))
|
|
38
|
+
sys.stdout.write('\n')
|
|
39
|
+
except exceptions.ArityError as e:
|
|
40
|
+
sys.stderr.write("invalid-arity: %s\n" % e)
|
|
41
|
+
return 1
|
|
42
|
+
except exceptions.JMESPathTypeError as e:
|
|
43
|
+
sys.stderr.write("invalid-type: %s\n" % e)
|
|
44
|
+
return 1
|
|
45
|
+
except exceptions.UnknownFunctionError as e:
|
|
46
|
+
sys.stderr.write("unknown-function: %s\n" % e)
|
|
47
|
+
return 1
|
|
48
|
+
except exceptions.ParseError as e:
|
|
49
|
+
sys.stderr.write("syntax-error: %s\n" % e)
|
|
50
|
+
return 1
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
if __name__ == '__main__':
|
|
54
|
+
sys.exit(main())
|
venv/bin/runxlrd.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
1
|
+
#!/home/circleci/project/venv/bin/python
|
|
2
|
+
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
|
|
3
|
+
# This script is part of the xlrd package, which is released under a
|
|
4
|
+
# BSD-style licence.
|
|
5
|
+
|
|
6
|
+
from __future__ import print_function
|
|
7
|
+
|
|
8
|
+
cmd_doc = """
|
|
9
|
+
Commands:
|
|
10
|
+
|
|
11
|
+
2rows Print the contents of first and last row in each sheet
|
|
12
|
+
3rows Print the contents of first, second and last row in each sheet
|
|
13
|
+
bench Same as "show", but doesn't print -- for profiling
|
|
14
|
+
biff_count[1] Print a count of each type of BIFF record in the file
|
|
15
|
+
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
|
|
16
|
+
fonts hdr + print a dump of all font objects
|
|
17
|
+
hdr Mini-overview of file (no per-sheet information)
|
|
18
|
+
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
|
|
19
|
+
labels Dump of sheet.col_label_ranges and ...row... for each sheet
|
|
20
|
+
name_dump Dump of each object in book.name_obj_list
|
|
21
|
+
names Print brief information for each NAME record
|
|
22
|
+
ov Overview of file
|
|
23
|
+
profile Like "hotshot", but uses cProfile
|
|
24
|
+
show Print the contents of all rows in each sheet
|
|
25
|
+
version[0] Print versions of xlrd and Python and exit
|
|
26
|
+
xfc Print "XF counts" and cell-type counts -- see code for details
|
|
27
|
+
|
|
28
|
+
[0] means no file arg
|
|
29
|
+
[1] means only one file arg i.e. no glob.glob pattern
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
options = None
|
|
33
|
+
if __name__ == "__main__":
|
|
34
|
+
import xlrd
|
|
35
|
+
import sys
|
|
36
|
+
import time
|
|
37
|
+
import glob
|
|
38
|
+
import traceback
|
|
39
|
+
import gc
|
|
40
|
+
|
|
41
|
+
from xlrd.timemachine import xrange, REPR
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class LogHandler(object):
|
|
45
|
+
|
|
46
|
+
def __init__(self, logfileobj):
|
|
47
|
+
self.logfileobj = logfileobj
|
|
48
|
+
self.fileheading = None
|
|
49
|
+
self.shown = 0
|
|
50
|
+
|
|
51
|
+
def setfileheading(self, fileheading):
|
|
52
|
+
self.fileheading = fileheading
|
|
53
|
+
self.shown = 0
|
|
54
|
+
|
|
55
|
+
def write(self, text):
|
|
56
|
+
if self.fileheading and not self.shown:
|
|
57
|
+
self.logfileobj.write(self.fileheading)
|
|
58
|
+
self.shown = 1
|
|
59
|
+
self.logfileobj.write(text)
|
|
60
|
+
|
|
61
|
+
null_cell = xlrd.empty_cell
|
|
62
|
+
|
|
63
|
+
def show_row(bk, sh, rowx, colrange, printit):
|
|
64
|
+
if bk.ragged_rows:
|
|
65
|
+
colrange = range(sh.row_len(rowx))
|
|
66
|
+
if not colrange: return
|
|
67
|
+
if printit: print()
|
|
68
|
+
if bk.formatting_info:
|
|
69
|
+
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
|
|
70
|
+
if printit:
|
|
71
|
+
print("cell %s%d: type=%d, data: %r, xfx: %s"
|
|
72
|
+
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
|
|
73
|
+
else:
|
|
74
|
+
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
|
|
75
|
+
if printit:
|
|
76
|
+
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
|
|
77
|
+
|
|
78
|
+
def get_row_data(bk, sh, rowx, colrange):
|
|
79
|
+
result = []
|
|
80
|
+
dmode = bk.datemode
|
|
81
|
+
ctys = sh.row_types(rowx)
|
|
82
|
+
cvals = sh.row_values(rowx)
|
|
83
|
+
for colx in colrange:
|
|
84
|
+
cty = ctys[colx]
|
|
85
|
+
cval = cvals[colx]
|
|
86
|
+
if bk.formatting_info:
|
|
87
|
+
cxfx = str(sh.cell_xf_index(rowx, colx))
|
|
88
|
+
else:
|
|
89
|
+
cxfx = ''
|
|
90
|
+
if cty == xlrd.XL_CELL_DATE:
|
|
91
|
+
try:
|
|
92
|
+
showval = xlrd.xldate_as_tuple(cval, dmode)
|
|
93
|
+
except xlrd.XLDateError as e:
|
|
94
|
+
showval = "%s:%s" % (type(e).__name__, e)
|
|
95
|
+
cty = xlrd.XL_CELL_ERROR
|
|
96
|
+
elif cty == xlrd.XL_CELL_ERROR:
|
|
97
|
+
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
|
|
98
|
+
else:
|
|
99
|
+
showval = cval
|
|
100
|
+
result.append((colx, cty, showval, cxfx))
|
|
101
|
+
return result
|
|
102
|
+
|
|
103
|
+
def bk_header(bk):
|
|
104
|
+
print()
|
|
105
|
+
print("BIFF version: %s; datemode: %s"
|
|
106
|
+
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
|
|
107
|
+
print("codepage: %r (encoding: %s); countries: %r"
|
|
108
|
+
% (bk.codepage, bk.encoding, bk.countries))
|
|
109
|
+
print("Last saved by: %r" % bk.user_name)
|
|
110
|
+
print("Number of data sheets: %d" % bk.nsheets)
|
|
111
|
+
print("Use mmap: %d; Formatting: %d; On demand: %d"
|
|
112
|
+
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
|
|
113
|
+
print("Ragged rows: %d" % bk.ragged_rows)
|
|
114
|
+
if bk.formatting_info:
|
|
115
|
+
print("FORMATs: %d, FONTs: %d, XFs: %d"
|
|
116
|
+
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
|
|
117
|
+
if not options.suppress_timing:
|
|
118
|
+
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
|
|
119
|
+
% (bk.load_time_stage_1, bk.load_time_stage_2))
|
|
120
|
+
print()
|
|
121
|
+
|
|
122
|
+
def show_fonts(bk):
|
|
123
|
+
print("Fonts:")
|
|
124
|
+
for x in xrange(len(bk.font_list)):
|
|
125
|
+
font = bk.font_list[x]
|
|
126
|
+
font.dump(header='== Index %d ==' % x, indent=4)
|
|
127
|
+
|
|
128
|
+
def show_names(bk, dump=0):
|
|
129
|
+
bk_header(bk)
|
|
130
|
+
if bk.biff_version < 50:
|
|
131
|
+
print("Names not extracted in this BIFF version")
|
|
132
|
+
return
|
|
133
|
+
nlist = bk.name_obj_list
|
|
134
|
+
print("Name list: %d entries" % len(nlist))
|
|
135
|
+
for nobj in nlist:
|
|
136
|
+
if dump:
|
|
137
|
+
nobj.dump(sys.stdout,
|
|
138
|
+
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
|
|
139
|
+
else:
|
|
140
|
+
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
|
|
141
|
+
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
|
|
142
|
+
|
|
143
|
+
def print_labels(sh, labs, title):
|
|
144
|
+
if not labs:return
|
|
145
|
+
for rlo, rhi, clo, chi in labs:
|
|
146
|
+
print("%s label range %s:%s contains:"
|
|
147
|
+
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
|
|
148
|
+
for rx in xrange(rlo, rhi):
|
|
149
|
+
for cx in xrange(clo, chi):
|
|
150
|
+
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
|
|
151
|
+
|
|
152
|
+
def show_labels(bk):
|
|
153
|
+
# bk_header(bk)
|
|
154
|
+
hdr = 0
|
|
155
|
+
for shx in range(bk.nsheets):
|
|
156
|
+
sh = bk.sheet_by_index(shx)
|
|
157
|
+
clabs = sh.col_label_ranges
|
|
158
|
+
rlabs = sh.row_label_ranges
|
|
159
|
+
if clabs or rlabs:
|
|
160
|
+
if not hdr:
|
|
161
|
+
bk_header(bk)
|
|
162
|
+
hdr = 1
|
|
163
|
+
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
|
|
164
|
+
(shx, sh.name, sh.nrows, sh.ncols))
|
|
165
|
+
print_labels(sh, clabs, 'Col')
|
|
166
|
+
print_labels(sh, rlabs, 'Row')
|
|
167
|
+
if bk.on_demand: bk.unload_sheet(shx)
|
|
168
|
+
|
|
169
|
+
def show(bk, nshow=65535, printit=1):
|
|
170
|
+
bk_header(bk)
|
|
171
|
+
if 0:
|
|
172
|
+
rclist = xlrd.sheet.rc_stats.items()
|
|
173
|
+
rclist = sorted(rclist)
|
|
174
|
+
print("rc stats")
|
|
175
|
+
for k, v in rclist:
|
|
176
|
+
print("0x%04x %7d" % (k, v))
|
|
177
|
+
if options.onesheet:
|
|
178
|
+
try:
|
|
179
|
+
shx = int(options.onesheet)
|
|
180
|
+
except ValueError:
|
|
181
|
+
shx = bk.sheet_by_name(options.onesheet).number
|
|
182
|
+
shxrange = [shx]
|
|
183
|
+
else:
|
|
184
|
+
shxrange = range(bk.nsheets)
|
|
185
|
+
# print("shxrange", list(shxrange))
|
|
186
|
+
for shx in shxrange:
|
|
187
|
+
sh = bk.sheet_by_index(shx)
|
|
188
|
+
nrows, ncols = sh.nrows, sh.ncols
|
|
189
|
+
colrange = range(ncols)
|
|
190
|
+
anshow = min(nshow, nrows)
|
|
191
|
+
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
|
|
192
|
+
(shx, REPR(sh.name), sh.nrows, sh.ncols))
|
|
193
|
+
if nrows and ncols:
|
|
194
|
+
# Beat the bounds
|
|
195
|
+
for rowx in xrange(nrows):
|
|
196
|
+
nc = sh.row_len(rowx)
|
|
197
|
+
if nc:
|
|
198
|
+
sh.row_types(rowx)[nc-1]
|
|
199
|
+
sh.row_values(rowx)[nc-1]
|
|
200
|
+
sh.cell(rowx, nc-1)
|
|
201
|
+
for rowx in xrange(anshow-1):
|
|
202
|
+
if not printit and rowx % 10000 == 1 and rowx > 1:
|
|
203
|
+
print("done %d rows" % (rowx-1,))
|
|
204
|
+
show_row(bk, sh, rowx, colrange, printit)
|
|
205
|
+
if anshow and nrows:
|
|
206
|
+
show_row(bk, sh, nrows-1, colrange, printit)
|
|
207
|
+
print()
|
|
208
|
+
if bk.on_demand: bk.unload_sheet(shx)
|
|
209
|
+
|
|
210
|
+
def count_xfs(bk):
|
|
211
|
+
bk_header(bk)
|
|
212
|
+
for shx in range(bk.nsheets):
|
|
213
|
+
sh = bk.sheet_by_index(shx)
|
|
214
|
+
nrows = sh.nrows
|
|
215
|
+
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
|
|
216
|
+
(shx, sh.name, sh.nrows, sh.ncols))
|
|
217
|
+
# Access all xfindexes to force gathering stats
|
|
218
|
+
type_stats = [0, 0, 0, 0, 0, 0, 0]
|
|
219
|
+
for rowx in xrange(nrows):
|
|
220
|
+
for colx in xrange(sh.row_len(rowx)):
|
|
221
|
+
xfx = sh.cell_xf_index(rowx, colx)
|
|
222
|
+
assert xfx >= 0
|
|
223
|
+
cty = sh.cell_type(rowx, colx)
|
|
224
|
+
type_stats[cty] += 1
|
|
225
|
+
print("XF stats", sh._xf_index_stats)
|
|
226
|
+
print("type stats", type_stats)
|
|
227
|
+
print()
|
|
228
|
+
if bk.on_demand: bk.unload_sheet(shx)
|
|
229
|
+
|
|
230
|
+
def main(cmd_args):
|
|
231
|
+
import optparse
|
|
232
|
+
global options
|
|
233
|
+
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
|
|
234
|
+
oparser = optparse.OptionParser(usage)
|
|
235
|
+
oparser.add_option(
|
|
236
|
+
"-l", "--logfilename",
|
|
237
|
+
default="",
|
|
238
|
+
help="contains error messages")
|
|
239
|
+
oparser.add_option(
|
|
240
|
+
"-v", "--verbosity",
|
|
241
|
+
type="int", default=0,
|
|
242
|
+
help="level of information and diagnostics provided")
|
|
243
|
+
oparser.add_option(
|
|
244
|
+
"-m", "--mmap",
|
|
245
|
+
type="int", default=-1,
|
|
246
|
+
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
|
|
247
|
+
oparser.add_option(
|
|
248
|
+
"-e", "--encoding",
|
|
249
|
+
default="",
|
|
250
|
+
help="encoding override")
|
|
251
|
+
oparser.add_option(
|
|
252
|
+
"-f", "--formatting",
|
|
253
|
+
type="int", default=0,
|
|
254
|
+
help="0 (default): no fmt info\n"
|
|
255
|
+
"1: fmt info (all cells)\n",
|
|
256
|
+
)
|
|
257
|
+
oparser.add_option(
|
|
258
|
+
"-g", "--gc",
|
|
259
|
+
type="int", default=0,
|
|
260
|
+
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
|
|
261
|
+
oparser.add_option(
|
|
262
|
+
"-s", "--onesheet",
|
|
263
|
+
default="",
|
|
264
|
+
help="restrict output to this sheet (name or index)")
|
|
265
|
+
oparser.add_option(
|
|
266
|
+
"-u", "--unnumbered",
|
|
267
|
+
action="store_true", default=0,
|
|
268
|
+
help="omit line numbers or offsets in biff_dump")
|
|
269
|
+
oparser.add_option(
|
|
270
|
+
"-d", "--on-demand",
|
|
271
|
+
action="store_true", default=0,
|
|
272
|
+
help="load sheets on demand instead of all at once")
|
|
273
|
+
oparser.add_option(
|
|
274
|
+
"-t", "--suppress-timing",
|
|
275
|
+
action="store_true", default=0,
|
|
276
|
+
help="don't print timings (diffs are less messy)")
|
|
277
|
+
oparser.add_option(
|
|
278
|
+
"-r", "--ragged-rows",
|
|
279
|
+
action="store_true", default=0,
|
|
280
|
+
help="open_workbook(..., ragged_rows=True)")
|
|
281
|
+
options, args = oparser.parse_args(cmd_args)
|
|
282
|
+
if len(args) == 1 and args[0] in ("version", ):
|
|
283
|
+
pass
|
|
284
|
+
elif len(args) < 2:
|
|
285
|
+
oparser.error("Expected at least 2 args, found %d" % len(args))
|
|
286
|
+
cmd = args[0]
|
|
287
|
+
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
|
|
288
|
+
if cmd == 'biff_dump':
|
|
289
|
+
xlrd.dump(args[1], unnumbered=options.unnumbered)
|
|
290
|
+
sys.exit(0)
|
|
291
|
+
if cmd == 'biff_count':
|
|
292
|
+
xlrd.count_records(args[1])
|
|
293
|
+
sys.exit(0)
|
|
294
|
+
if cmd == 'version':
|
|
295
|
+
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
|
|
296
|
+
print("Python:", sys.version)
|
|
297
|
+
sys.exit(0)
|
|
298
|
+
if options.logfilename:
|
|
299
|
+
logfile = LogHandler(open(options.logfilename, 'w'))
|
|
300
|
+
else:
|
|
301
|
+
logfile = sys.stdout
|
|
302
|
+
mmap_opt = options.mmap
|
|
303
|
+
mmap_arg = xlrd.USE_MMAP
|
|
304
|
+
if mmap_opt in (1, 0):
|
|
305
|
+
mmap_arg = mmap_opt
|
|
306
|
+
elif mmap_opt != -1:
|
|
307
|
+
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
|
|
308
|
+
fmt_opt = options.formatting | (cmd in ('xfc', ))
|
|
309
|
+
gc_mode = options.gc
|
|
310
|
+
if gc_mode:
|
|
311
|
+
gc.disable()
|
|
312
|
+
for pattern in args[1:]:
|
|
313
|
+
for fname in glob.glob(pattern):
|
|
314
|
+
print("\n=== File: %s ===" % fname)
|
|
315
|
+
if logfile != sys.stdout:
|
|
316
|
+
logfile.setfileheading("\n=== File: %s ===\n" % fname)
|
|
317
|
+
if gc_mode == 1:
|
|
318
|
+
n_unreachable = gc.collect()
|
|
319
|
+
if n_unreachable:
|
|
320
|
+
print("GC before open:", n_unreachable, "unreachable objects")
|
|
321
|
+
try:
|
|
322
|
+
t0 = time.time()
|
|
323
|
+
bk = xlrd.open_workbook(
|
|
324
|
+
fname,
|
|
325
|
+
verbosity=options.verbosity, logfile=logfile,
|
|
326
|
+
use_mmap=mmap_arg,
|
|
327
|
+
encoding_override=options.encoding,
|
|
328
|
+
formatting_info=fmt_opt,
|
|
329
|
+
on_demand=options.on_demand,
|
|
330
|
+
ragged_rows=options.ragged_rows,
|
|
331
|
+
)
|
|
332
|
+
t1 = time.time()
|
|
333
|
+
if not options.suppress_timing:
|
|
334
|
+
print("Open took %.2f seconds" % (t1-t0,))
|
|
335
|
+
except xlrd.XLRDError as e:
|
|
336
|
+
print("*** Open failed: %s: %s" % (type(e).__name__, e))
|
|
337
|
+
continue
|
|
338
|
+
except KeyboardInterrupt:
|
|
339
|
+
print("*** KeyboardInterrupt ***")
|
|
340
|
+
traceback.print_exc(file=sys.stdout)
|
|
341
|
+
sys.exit(1)
|
|
342
|
+
except BaseException as e:
|
|
343
|
+
print("*** Open failed: %s: %s" % (type(e).__name__, e))
|
|
344
|
+
traceback.print_exc(file=sys.stdout)
|
|
345
|
+
continue
|
|
346
|
+
t0 = time.time()
|
|
347
|
+
if cmd == 'hdr':
|
|
348
|
+
bk_header(bk)
|
|
349
|
+
elif cmd == 'ov': # OverView
|
|
350
|
+
show(bk, 0)
|
|
351
|
+
elif cmd == 'show': # all rows
|
|
352
|
+
show(bk)
|
|
353
|
+
elif cmd == '2rows': # first row and last row
|
|
354
|
+
show(bk, 2)
|
|
355
|
+
elif cmd == '3rows': # first row, 2nd row and last row
|
|
356
|
+
show(bk, 3)
|
|
357
|
+
elif cmd == 'bench':
|
|
358
|
+
show(bk, printit=0)
|
|
359
|
+
elif cmd == 'fonts':
|
|
360
|
+
bk_header(bk)
|
|
361
|
+
show_fonts(bk)
|
|
362
|
+
elif cmd == 'names': # named reference list
|
|
363
|
+
show_names(bk)
|
|
364
|
+
elif cmd == 'name_dump': # named reference list
|
|
365
|
+
show_names(bk, dump=1)
|
|
366
|
+
elif cmd == 'labels':
|
|
367
|
+
show_labels(bk)
|
|
368
|
+
elif cmd == 'xfc':
|
|
369
|
+
count_xfs(bk)
|
|
370
|
+
else:
|
|
371
|
+
print("*** Unknown command <%s>" % cmd)
|
|
372
|
+
sys.exit(1)
|
|
373
|
+
del bk
|
|
374
|
+
if gc_mode == 1:
|
|
375
|
+
n_unreachable = gc.collect()
|
|
376
|
+
if n_unreachable:
|
|
377
|
+
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
|
|
378
|
+
if not options.suppress_timing:
|
|
379
|
+
t1 = time.time()
|
|
380
|
+
print("\ncommand took %.2f seconds\n" % (t1-t0,))
|
|
381
|
+
|
|
382
|
+
return None
|
|
383
|
+
|
|
384
|
+
av = sys.argv[1:]
|
|
385
|
+
if not av:
|
|
386
|
+
main(av)
|
|
387
|
+
firstarg = av[0].lower()
|
|
388
|
+
if firstarg == "hotshot":
|
|
389
|
+
import hotshot
|
|
390
|
+
import hotshot.stats
|
|
391
|
+
av = av[1:]
|
|
392
|
+
prof_log_name = "XXXX.prof"
|
|
393
|
+
prof = hotshot.Profile(prof_log_name)
|
|
394
|
+
# benchtime, result = prof.runcall(main, *av)
|
|
395
|
+
result = prof.runcall(main, *(av, ))
|
|
396
|
+
print("result", repr(result))
|
|
397
|
+
prof.close()
|
|
398
|
+
stats = hotshot.stats.load(prof_log_name)
|
|
399
|
+
stats.strip_dirs()
|
|
400
|
+
stats.sort_stats('time', 'calls')
|
|
401
|
+
stats.print_stats(20)
|
|
402
|
+
elif firstarg == "profile":
|
|
403
|
+
import cProfile
|
|
404
|
+
av = av[1:]
|
|
405
|
+
cProfile.run('main(av)', 'YYYY.prof')
|
|
406
|
+
import pstats
|
|
407
|
+
p = pstats.Stats('YYYY.prof')
|
|
408
|
+
p.strip_dirs().sort_stats('cumulative').print_stats(30)
|
|
409
|
+
else:
|
|
410
|
+
main(av)
|
|
@@ -1,186 +0,0 @@
|
|
|
1
|
-
# Changelog
|
|
2
|
-
|
|
3
|
-
## Current (in progress)
|
|
4
|
-
|
|
5
|
-
- Refactor label testing [#119](https://github.com/datagouv/csv-detective/pull/119)
|
|
6
|
-
- Refactor repo metadata and requirements [#120](https://github.com/datagouv/csv-detective/pull/120) [#122](https://github.com/datagouv/csv-detective/pull/122) [#135](https://github.com/datagouv/csv-detective/pull/135) [#136](https://github.com/datagouv/csv-detective/pull/136) [#138](https://github.com/datagouv/csv-detective/pull/138)
|
|
7
|
-
- Better URL detection [#121](https://github.com/datagouv/csv-detective/pull/121)
|
|
8
|
-
- For big files, analyse on sample then validate on whole file [#124](https://github.com/datagouv/csv-detective/pull/124) [#129](https://github.com/datagouv/csv-detective/pull/129)
|
|
9
|
-
- Fix imports [#125](https://github.com/datagouv/csv-detective/pull/125) [#126](https://github.com/datagouv/csv-detective/pull/126) [#127](https://github.com/datagouv/csv-detective/pull/127) [#128](https://github.com/datagouv/csv-detective/pull/128)
|
|
10
|
-
- Split aware and naive datetimes for hydra to cast them separately [#130](https://github.com/datagouv/csv-detective/pull/130)
|
|
11
|
-
- Validate using the testing function, to consider PROPORTIONS [#131](https://github.com/datagouv/csv-detective/pull/131)
|
|
12
|
-
- Remove `datetime_iso` format due to ambiguous cast in db (can be naive or aware) [#132](https://github.com/datagouv/csv-detective/pull/132)
|
|
13
|
-
- Add `lonlat_wgs` format and handle optional brackets for `latlon_wgs` [#133](https://github.com/datagouv/csv-detective/pull/133)
|
|
14
|
-
- Refactor format prioritizing [#134](https://github.com/datagouv/csv-detective/pull/134)
|
|
15
|
-
|
|
16
|
-
## 0.8.0 (2025-05-20)
|
|
17
|
-
|
|
18
|
-
- New function that creates a csv from a list of fields and constraints, or from a TableSchema [#101](https://github.com/datagouv/csv-detective/pull/101)
|
|
19
|
-
- Enable outputing loaded dataframe [#102](https://github.com/datagouv/csv-detective/pull/102)
|
|
20
|
-
- Better naming, hint types and minor refactors [#103](https://github.com/datagouv/csv-detective/pull/103)
|
|
21
|
-
- The returned dataframe has its columns properly cast to the detected types [#104](https://github.com/datagouv/csv-detective/pull/104)
|
|
22
|
-
- Raise an error if the encoding could not be guessed [#106](https://github.com/datagouv/csv-detective/pull/106)
|
|
23
|
-
- Fix CLI and minio routine [#107](https://github.com/datagouv/csv-detective/pull/107)
|
|
24
|
-
- Allow to only specify tests to skip ("all but...") [#108](https://github.com/datagouv/csv-detective/pull/108)
|
|
25
|
-
- Fix bool casting [#109](https://github.com/datagouv/csv-detective/pull/109)
|
|
26
|
-
- Handle csv.gz files [#110](https://github.com/datagouv/csv-detective/pull/110)
|
|
27
|
-
- Refactor file tests [#110](https://github.com/datagouv/csv-detective/pull/110)
|
|
28
|
-
- Restructure repo (breaking changes) [#111](https://github.com/datagouv/csv-detective/pull/111)
|
|
29
|
-
- Add validation function and associated flow [#112](https://github.com/datagouv/csv-detective/pull/112)
|
|
30
|
-
- Better float detection [#113](https://github.com/datagouv/csv-detective/pull/113)
|
|
31
|
-
- Refactor fields tests [#114](https://github.com/datagouv/csv-detective/pull/114)
|
|
32
|
-
- Better code waldec and add code import [#116](https://github.com/datagouv/csv-detective/pull/116)
|
|
33
|
-
- Better validation and refactors [#117](https://github.com/datagouv/csv-detective/pull/117)
|
|
34
|
-
- Fix validation [#118](https://github.com/datagouv/csv-detective/pull/118)
|
|
35
|
-
|
|
36
|
-
## 0.7.4 (2024-11-15)
|
|
37
|
-
|
|
38
|
-
- Enable calling main functions from base [#97](https://github.com/datagouv/csv-detective/pull/97)
|
|
39
|
-
- Better detection of ints and floats [#94](https://github.com/datagouv/csv-detective/pull/94)
|
|
40
|
-
- Better handle NaN values [#96](https://github.com/datagouv/csv-detective/pull/96)
|
|
41
|
-
- Reshape exemple.py, clean up code and improve changelog [#98](https://github.com/datagouv/csv-detective/pull/98)
|
|
42
|
-
|
|
43
|
-
## 0.7.3 (2024-10-07)
|
|
44
|
-
|
|
45
|
-
- Refactor tests import, now using folder arborescence instead of pre-made file [#93](https://github.com/datagouv/csv-detective/pull/93)
|
|
46
|
-
- Fix inversion (count<=>value) in profile [#95](https://github.com/datagouv/csv-detective/pull/95)
|
|
47
|
-
|
|
48
|
-
## 0.7.2 (2024-08-27)
|
|
49
|
-
|
|
50
|
-
- Outsource many formats to fr-format library [#87](https://github.com/datagouv/csv-detective/pull/87)
|
|
51
|
-
- Better date detection [#89](https://github.com/datagouv/csv-detective/pull/89)
|
|
52
|
-
- Update dependencies to make tests pass [#81](https://github.com/datagouv/csv-detective/pull/81)
|
|
53
|
-
- Update readme [#81](https://github.com/datagouv/csv-detective/pull/81)
|
|
54
|
-
- Hint type [#81](https://github.com/datagouv/csv-detective/pull/81)
|
|
55
|
-
- Minor refactors [#81](https://github.com/datagouv/csv-detective/pull/81)
|
|
56
|
-
|
|
57
|
-
## 0.7.1 (2024-03-27)
|
|
58
|
-
|
|
59
|
-
- Fixes after production release in hydra [#80](https://github.com/datagouv/csv-detective/pull/80)
|
|
60
|
-
|
|
61
|
-
## 0.7.0 (2024-03-21)
|
|
62
|
-
|
|
63
|
-
- Handle other file formats: xls, xlsx, ods (and more) and analysis through URLs [#73](https://github.com/datagouv/csv-detective/pull/73)
|
|
64
|
-
- Handle files with no extension (cc hydra) [#79](https://github.com/datagouv/csv-detective/pull/79)
|
|
65
|
-
|
|
66
|
-
## 0.6.8 (2024-01-18)
|
|
67
|
-
|
|
68
|
-
- prevent exporting NaN values in profile [#72](https://github.com/datagouv/csv-detective/pull/72)
|
|
69
|
-
- raise ValueError if analyzed file has various number of columns across first rows [#72](https://github.com/datagouv/csv-detective/pull/72)
|
|
70
|
-
|
|
71
|
-
## 0.6.7 (2024-01-15)
|
|
72
|
-
|
|
73
|
-
- Add logs for columns that would take too much time within a specific test [#70](https://github.com/datagouv/csv-detective/pull/70)
|
|
74
|
-
- Refactor some tests to improve performances and make detection more accurate [#69](https://github.com/datagouv/csv-detective/pull/69)
|
|
75
|
-
- Try alternative ways to clean text [#71](https://github.com/datagouv/csv-detective/pull/71)
|
|
76
|
-
|
|
77
|
-
## 0.6.6 (2023-11-24)
|
|
78
|
-
|
|
79
|
-
- Change setup.py to better convey dependencies [#67](https://github.com/datagouv/csv-detective/pull/67)
|
|
80
|
-
|
|
81
|
-
## 0.6.5 (2023-11-17)
|
|
82
|
-
|
|
83
|
-
- Change encoding detection for faust-cchardet (forked from cchardet) [#66](https://github.com/etalab/csv-detective/pull/66)
|
|
84
|
-
|
|
85
|
-
## 0.6.4 (2023-10-18)
|
|
86
|
-
|
|
87
|
-
- Better handling of ints and floats (now not accepting blanks and "+" in string) [#62](https://github.com/etalab/csv-detective/pull/62)
|
|
88
|
-
|
|
89
|
-
## 0.6.3 (2023-03-23)
|
|
90
|
-
|
|
91
|
-
- Faster routine [#59](https://github.com/etalab/csv-detective/pull/59)
|
|
92
|
-
|
|
93
|
-
## 0.6.2 (2023-02-10)
|
|
94
|
-
|
|
95
|
-
- Catch OverflowError for latitude and longitude checks [#58](https://github.com/etalab/csv-detective/pull/58)
|
|
96
|
-
|
|
97
|
-
## 0.6.0 (2023-02-10)
|
|
98
|
-
|
|
99
|
-
- Add CI and upgrade dependencies [#49](https://github.com/etalab/csv-detective/pull/49)
|
|
100
|
-
- Shuffle data before analysis [#56](https://github.com/etalab/csv-detective/pull/56)
|
|
101
|
-
- Better discrimination between `code_departement` and `code_region` [#56](https://github.com/etalab/csv-detective/pull/56)
|
|
102
|
-
- Add schema in output analysis [#57](https://github.com/etalab/csv-detective/pull/57)
|
|
103
|
-
|
|
104
|
-
## 0.4.7 [#51](https://github.com/etalab/csv-detective/pull/51)
|
|
105
|
-
|
|
106
|
-
- Allow possibility to analyze entire file instead of a limited number of rows [#48](https://github.com/etalab/csv-detective/pull/48)
|
|
107
|
-
- Better boolean detection [#42](https://github.com/etalab/csv-detective/issues/42)
|
|
108
|
-
- Differentiate python types and format for `date` and `datetime` [#43](https://github.com/etalab/csv-detective/issues/43)
|
|
109
|
-
- Better `code_departement` and `code_commune_insee` detection [#44](https://github.com/etalab/csv-detective/issues/44)
|
|
110
|
-
- Fix header line (`header_row_idx`) detection [#44](https://github.com/etalab/csv-detective/issues/44)
|
|
111
|
-
- Allow possibility to get profile of csv [#46](https://github.com/etalab/csv-detective/issues/46)
|
|
112
|
-
|
|
113
|
-
## 0.4.6 [#39](https://github.com/etalab/csv-detective/pull/39)
|
|
114
|
-
|
|
115
|
-
- Fix tests
|
|
116
|
-
- Prioritise lat / lon FR detection over more generic lat / lon.
|
|
117
|
-
- To reduce false positives, prevent detection of the following if label detection is missing: `['code_departement', 'code_commune_insee', 'code_postal', 'latitude_wgs', 'longitude_wgs', 'latitude_wgs_fr_metropole', 'longitude_wgs_fr_metropole', 'latitude_l93', 'longitude_l93']`
|
|
118
|
-
- Lower threshold of label detection so that if one relevant is detected in the label, it boosts the detection score.
|
|
119
|
-
- Add ISO country alpha-3 and numeric detection
|
|
120
|
-
- include camel case parsing in _process_text function
|
|
121
|
-
- Support optional brackets in latlon format
|
|
122
|
-
|
|
123
|
-
## 0.4.5 [#29](https://github.com/etalab/csv-detective/pull/29)
|
|
124
|
-
|
|
125
|
-
- Use `netloc` instead of `url` in location dict
|
|
126
|
-
|
|
127
|
-
## 0.4.4 [#24] (https://github.com/etalab/csv-detective/pull/28)
|
|
128
|
-
|
|
129
|
-
- Prevent crash on empty CSVs
|
|
130
|
-
- Add optional arguments encoding and sep to routine and routine_minio functions
|
|
131
|
-
- Field detection improvements (code_csp_insee and datetime RFC 822)
|
|
132
|
-
- Schema generation improvements with examples
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
## 0.4.3 [#24] (https://github.com/etalab/csv-detective/pull/24)
|
|
136
|
-
|
|
137
|
-
- Add uuid and MongoID detection
|
|
138
|
-
- Add new function dedicated to interaction with minio data
|
|
139
|
-
- Add table schema automatic generation (only on minio data)
|
|
140
|
-
- Modification of calculated score (consider label detection as a boost for score)
|
|
141
|
-
|
|
142
|
-
## 0.4.2 [#22] (https://github.com/etalab/csv-detective/pull/22)
|
|
143
|
-
|
|
144
|
-
Add type detection by header name
|
|
145
|
-
|
|
146
|
-
## 0.4.1 [#19] (https://github.com/etalab/csv-detective/pull/19)
|
|
147
|
-
|
|
148
|
-
Fix bug
|
|
149
|
-
* num_rows was causing problem when it was fix to other value than default - Fixed
|
|
150
|
-
|
|
151
|
-
## 0.4.0 [#18] (https://github.com/etalab/csv_detective/pull/18)
|
|
152
|
-
|
|
153
|
-
Add detailed output possibility
|
|
154
|
-
|
|
155
|
-
Details :
|
|
156
|
-
* two modes now for output report : "LIMITED" and "ALL"
|
|
157
|
-
* "ALL" option give user information on found proportion for each column types and each columns
|
|
158
|
-
|
|
159
|
-
## 0.3.0 [#15] (https://github.com/etalab/csv_detective/pull/15)
|
|
160
|
-
|
|
161
|
-
Fix bugs
|
|
162
|
-
|
|
163
|
-
Details :
|
|
164
|
-
* Facilitate ML Integration
|
|
165
|
-
* Add column types detection
|
|
166
|
-
* Fix documentation
|
|
167
|
-
|
|
168
|
-
## 0.2.1 - [#2](https://github.com/etalab/csv_detective/pull/2)
|
|
169
|
-
|
|
170
|
-
Add continuous integration
|
|
171
|
-
|
|
172
|
-
Details :
|
|
173
|
-
* Add configuration for CircleCI
|
|
174
|
-
* Add `CONTRIBUTING.md`
|
|
175
|
-
* Push automatically new versions to PyPI
|
|
176
|
-
* Use semantic versioning
|
|
177
|
-
|
|
178
|
-
## 0.2 - [#1](https://github.com/etalab/csv_detective/pull/1)
|
|
179
|
-
|
|
180
|
-
Port from python2 to python3
|
|
181
|
-
|
|
182
|
-
Details :
|
|
183
|
-
* Add license AGPLv3
|
|
184
|
-
* Update requirements
|
|
185
|
-
|
|
186
|
-
## 0.1
|
|
@@ -1,268 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: csv_detective
|
|
3
|
-
Version: 0.8.1.dev1674
|
|
4
|
-
Summary: Detect tabular files column content
|
|
5
|
-
Home-page: https://github.com/datagouv/csv_detective
|
|
6
|
-
Author: Etalab
|
|
7
|
-
Author-email: opendatateam@data.gouv.fr
|
|
8
|
-
License: https://spdx.org/licenses/MIT.html#licenseText
|
|
9
|
-
Project-URL: Source, https://github.com/datagouv/csv_detective
|
|
10
|
-
Keywords: CSV data processing encoding guess parser tabular
|
|
11
|
-
Classifier: Development Status :: 2 - Pre-Alpha
|
|
12
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
-
Classifier: Operating System :: OS Independent
|
|
14
|
-
Classifier: Programming Language :: Python :: 3
|
|
15
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
-
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
-
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
21
|
-
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
|
22
|
-
Requires-Python: >=3.9
|
|
23
|
-
Description-Content-Type: text/markdown
|
|
24
|
-
License-File: LICENSE
|
|
25
|
-
Requires-Dist: boto3<2,>=1.34.0
|
|
26
|
-
Requires-Dist: dateparser<2,>=1.2.0
|
|
27
|
-
Requires-Dist: faust-cchardet==2.1.19
|
|
28
|
-
Requires-Dist: pandas<3,>=2.2.0
|
|
29
|
-
Requires-Dist: python-dateutil<3,>=2.8.2
|
|
30
|
-
Requires-Dist: Unidecode<2,>=1.3.6
|
|
31
|
-
Requires-Dist: openpyxl==3.1.5
|
|
32
|
-
Requires-Dist: xlrd==2.0.1
|
|
33
|
-
Requires-Dist: odfpy==1.4.1
|
|
34
|
-
Requires-Dist: requests<3,>=2.32.3
|
|
35
|
-
Requires-Dist: python-magic==0.4.27
|
|
36
|
-
Requires-Dist: frformat==0.4.0
|
|
37
|
-
Requires-Dist: Faker>=33.0.0
|
|
38
|
-
Requires-Dist: rstr==3.2.2
|
|
39
|
-
Provides-Extra: dev
|
|
40
|
-
Requires-Dist: pytest==8.3.0; extra == "dev"
|
|
41
|
-
Requires-Dist: responses==0.25.0; extra == "dev"
|
|
42
|
-
Requires-Dist: bumpx==0.3.10; extra == "dev"
|
|
43
|
-
Dynamic: author
|
|
44
|
-
Dynamic: author-email
|
|
45
|
-
Dynamic: classifier
|
|
46
|
-
Dynamic: description
|
|
47
|
-
Dynamic: description-content-type
|
|
48
|
-
Dynamic: home-page
|
|
49
|
-
Dynamic: keywords
|
|
50
|
-
Dynamic: license
|
|
51
|
-
Dynamic: license-file
|
|
52
|
-
Dynamic: project-url
|
|
53
|
-
Dynamic: provides-extra
|
|
54
|
-
Dynamic: requires-dist
|
|
55
|
-
Dynamic: requires-python
|
|
56
|
-
Dynamic: summary
|
|
57
|
-
|
|
58
|
-
# CSV Detective
|
|
59
|
-
|
|
60
|
-
This is a package to **automatically detect column content in tabular files**. The script reads either the whole file or the first few rows and performs various checks to see for each column if it matches with various content types. This is currently done through regex and string comparison.
|
|
61
|
-
|
|
62
|
-
Currently supported file types: csv, xls, xlsx, ods.
|
|
63
|
-
|
|
64
|
-
You can also directly feed the URL of a remote file (from data.gouv.fr for instance).
|
|
65
|
-
|
|
66
|
-
## How To ?
|
|
67
|
-
|
|
68
|
-
### Install the package
|
|
69
|
-
|
|
70
|
-
You need to have python >= 3.9 installed. We recommend using a virtual environement.
|
|
71
|
-
|
|
72
|
-
```
|
|
73
|
-
pip install csv-detective
|
|
74
|
-
```
|
|
75
|
-
|
|
76
|
-
### Detect some columns
|
|
77
|
-
|
|
78
|
-
Say you have a tabular file located at `file_path`. This is how you could use `csv_detective`:
|
|
79
|
-
|
|
80
|
-
```
|
|
81
|
-
# Import the csv_detective package
|
|
82
|
-
from csv_detective import routine
|
|
83
|
-
import os # for this example only
|
|
84
|
-
|
|
85
|
-
# Replace by your file path
|
|
86
|
-
file_path = os.path.join('.', 'tests', 'code_postaux_v201410.csv')
|
|
87
|
-
|
|
88
|
-
# Open your file and run csv_detective
|
|
89
|
-
inspection_results = routine(
|
|
90
|
-
file_path, # or file URL
|
|
91
|
-
num_rows=-1, # Value -1 will analyze all lines of your file, you can change with the number of lines you wish to analyze
|
|
92
|
-
save_results=False, # Default False. If True, it will save result output into the same directory as the analyzed file, using the same name as your file and .json extension
|
|
93
|
-
output_profile=True, # Default False. If True, returned dict will contain a property "profile" indicating profile (min, max, mean, tops...) of every column of you csv
|
|
94
|
-
output_schema=True, # Default False. If True, returned dict will contain a property "schema" containing basic [tableschema](https://specs.frictionlessdata.io/table-schema/) of your file. This can be use to validate structure of other csv which should match same structure.
|
|
95
|
-
)
|
|
96
|
-
```
|
|
97
|
-
|
|
98
|
-
## So What Do You Get ?
|
|
99
|
-
|
|
100
|
-
### Output
|
|
101
|
-
|
|
102
|
-
The program creates a `Python` dictionnary with the following information :
|
|
103
|
-
|
|
104
|
-
```
|
|
105
|
-
{
|
|
106
|
-
"encoding": "windows-1252", # Encoding detected
|
|
107
|
-
"separator": ";", # Detected CSV separator
|
|
108
|
-
"header_row_idx": 0 # Index of the header (aka how many lines to skip to get it)
|
|
109
|
-
"headers": ['code commune INSEE', 'nom de la commune', 'code postal', "libellé d'acheminement"], # Header row
|
|
110
|
-
"total_lines": 42, # Number of rows (excluding header)
|
|
111
|
-
"nb_duplicates": 0, # Number of exact duplicates in rows
|
|
112
|
-
"heading_columns": 0, # Number of heading columns
|
|
113
|
-
"trailing_columns": 0, # Number of trailing columns
|
|
114
|
-
"categorical": ['Code commune'] # Columns that contain less than 25 different values (arbitrary threshold)
|
|
115
|
-
"columns": { # Property that conciliate detection from labels and content of a column
|
|
116
|
-
"Code commune": {
|
|
117
|
-
"python_type": "string",
|
|
118
|
-
"format": "code_commune_insee",
|
|
119
|
-
"score": 1.0
|
|
120
|
-
},
|
|
121
|
-
},
|
|
122
|
-
"columns_labels": { # Property that return detection from header columns
|
|
123
|
-
"Code commune": {
|
|
124
|
-
"python_type": "string",
|
|
125
|
-
"format": "code_commune_insee",
|
|
126
|
-
"score": 0.5
|
|
127
|
-
},
|
|
128
|
-
},
|
|
129
|
-
"columns_fields": { # Property that return detection from content columns
|
|
130
|
-
"Code commune": {
|
|
131
|
-
"python_type": "string",
|
|
132
|
-
"format": "code_commune_insee",
|
|
133
|
-
"score": 1.25
|
|
134
|
-
},
|
|
135
|
-
},
|
|
136
|
-
"profile": {
|
|
137
|
-
"column_name" : {
|
|
138
|
-
"min": 1, # only int and float
|
|
139
|
-
"max: 12, # only int and float
|
|
140
|
-
"mean": 5, # only int and float
|
|
141
|
-
"std": 5, # only int and float
|
|
142
|
-
"tops": [ # 10 most frequent values in the column
|
|
143
|
-
"xxx",
|
|
144
|
-
"yyy",
|
|
145
|
-
"..."
|
|
146
|
-
],
|
|
147
|
-
"nb_distinct": 67, # number of distinct values
|
|
148
|
-
"nb_missing_values": 102 # number of empty cells in the column
|
|
149
|
-
}
|
|
150
|
-
},
|
|
151
|
-
"schema": { # TableSchema of the file if `output_schema` was set to `True`
|
|
152
|
-
"$schema": "https://frictionlessdata.io/schemas/table-schema.json",
|
|
153
|
-
"name": "",
|
|
154
|
-
"title": "",
|
|
155
|
-
"description": "",
|
|
156
|
-
"countryCode": "FR",
|
|
157
|
-
"homepage": "",
|
|
158
|
-
"path": "https://github.com/datagouv/csv-detective",
|
|
159
|
-
"resources": [],
|
|
160
|
-
"sources": [
|
|
161
|
-
{"title": "Spécification Tableschema", "path": "https://specs.frictionlessdata.io/table-schema"},
|
|
162
|
-
{"title": "schema.data.gouv.fr", "path": "https://schema.data.gouv.fr"}
|
|
163
|
-
],
|
|
164
|
-
"created": "2023-02-10",
|
|
165
|
-
"lastModified": "2023-02-10",
|
|
166
|
-
"version": "0.0.1",
|
|
167
|
-
"contributors": [
|
|
168
|
-
{"title": "Table schema bot", "email": "schema@data.gouv.fr", "organisation": "data.gouv.fr", "role": "author"}
|
|
169
|
-
],
|
|
170
|
-
"fields": [
|
|
171
|
-
{
|
|
172
|
-
"name": "Code commune",
|
|
173
|
-
"description": "Le code INSEE de la commune",
|
|
174
|
-
"example": "23150",
|
|
175
|
-
"type": "string",
|
|
176
|
-
"formatFR": "code_commune_insee",
|
|
177
|
-
"constraints": {
|
|
178
|
-
"required": False,
|
|
179
|
-
"pattern": "^([013-9]\\d|2[AB1-9])\\d{3}$",
|
|
180
|
-
}
|
|
181
|
-
}
|
|
182
|
-
]
|
|
183
|
-
}
|
|
184
|
-
}
|
|
185
|
-
```
|
|
186
|
-
|
|
187
|
-
The output slightly differs depending on the file format:
|
|
188
|
-
- csv files have `encoding` and `separator`
|
|
189
|
-
- xls, xls, ods files have `engine` and `sheet_name`
|
|
190
|
-
|
|
191
|
-
### What Formats Can Be Detected
|
|
192
|
-
|
|
193
|
-
Includes :
|
|
194
|
-
|
|
195
|
-
- Communes, Départements, Régions, Pays
|
|
196
|
-
- Codes Communes, Codes Postaux, Codes Departement, ISO Pays
|
|
197
|
-
- Codes CSP, Description CSP, SIREN
|
|
198
|
-
- E-Mails, URLs, Téléphones FR
|
|
199
|
-
- Years, Dates, Jours de la Semaine FR
|
|
200
|
-
- UUIDs, Mongo ObjectIds
|
|
201
|
-
|
|
202
|
-
### Format detection and scoring
|
|
203
|
-
For each column, 3 scores are computed for each format, the higher the score, the more likely the format:
|
|
204
|
-
- the field score based on the values contained in the column (0.0 to 1.0).
|
|
205
|
-
- the label score based on the header of the column (0.0 to 1.0).
|
|
206
|
-
- the overall score, computed as `field_score * (1 + label_score/2)` (0.0 to 1.5).
|
|
207
|
-
|
|
208
|
-
The overall score computation aims to give more weight to the column contents while
|
|
209
|
-
still leveraging the column header.
|
|
210
|
-
|
|
211
|
-
#### `limited_output` - Select the output mode you want for json report
|
|
212
|
-
|
|
213
|
-
This option allows you to select the output mode you want to pass. To do so, you have to pass a `limited_output` argument to the `routine` function. This variable has two possible values:
|
|
214
|
-
|
|
215
|
-
- `limited_output` defaults to `True` which means report will contain only detected column formats based on a pre-selected threshold proportion in data. Report result is the standard output (an example can be found above in 'Output' section).
|
|
216
|
-
Only the format with highest score is present in the output.
|
|
217
|
-
- `limited_output=False` means report will contain a full list of all column format possibilities for each input data columns with a value associated which match to the proportion of found column type in data. With this report, user can adjust its rules of detection based on a specific threshold and has a better vision of quality detection for each columns. Results could also be easily transformed into a dataframe (columns types in column / column names in rows) for analysis and test.
|
|
218
|
-
|
|
219
|
-
## Improvement suggestions
|
|
220
|
-
|
|
221
|
-
- Smarter refactors
|
|
222
|
-
- Improve performances
|
|
223
|
-
- Test other ways to load and process data (`pandas` alternatives)
|
|
224
|
-
- Add more and more detection modules...
|
|
225
|
-
|
|
226
|
-
Related ideas:
|
|
227
|
-
|
|
228
|
-
- store column names to make a learning model based on column names for (possible pre-screen)
|
|
229
|
-
- normalising data based on column prediction
|
|
230
|
-
- entity resolution (good luck...)
|
|
231
|
-
|
|
232
|
-
## Why Could This Be of Any Use ?
|
|
233
|
-
|
|
234
|
-
Organisations such as [data.gouv.fr](http://data.gouv.fr) aggregate huge amounts of un-normalised data. Performing cross-examination across datasets can be difficult. This tool could help enrich the datasets metadata and facilitate linking them together.
|
|
235
|
-
|
|
236
|
-
[`udata-hydra`](https://github.com/etalab/udata-hydra) is a crawler that checks, analyzes (using `csv-detective`) and APIfies all tabular files from [data.gouv.fr](http://data.gouv.fr).
|
|
237
|
-
|
|
238
|
-
An early version of this analysis of all resources on data.gouv.fr can be found [here](https://github.com/Leobouloc/data.gouv-exploration).
|
|
239
|
-
|
|
240
|
-
## Release
|
|
241
|
-
|
|
242
|
-
The release process uses `bumpx`.
|
|
243
|
-
|
|
244
|
-
```shell
|
|
245
|
-
pip install -r requirements-build.txt
|
|
246
|
-
```
|
|
247
|
-
|
|
248
|
-
### Process
|
|
249
|
-
|
|
250
|
-
1. `bumpx` will handle bumping the version according to your command (patch, minor, major)
|
|
251
|
-
2. It will update the CHANGELOG according to the new version being published
|
|
252
|
-
3. It will push a tag with the given version to github
|
|
253
|
-
4. CircleCI will pickup this tag, build the package and publish it to pypi
|
|
254
|
-
5. `bumpx` will have everything ready for the next version (version, changelog...)
|
|
255
|
-
|
|
256
|
-
### Dry run
|
|
257
|
-
|
|
258
|
-
```shell
|
|
259
|
-
bumpx -d -v
|
|
260
|
-
```
|
|
261
|
-
|
|
262
|
-
### Release
|
|
263
|
-
|
|
264
|
-
This will release a patch version:
|
|
265
|
-
|
|
266
|
-
```shell
|
|
267
|
-
bumpx -v
|
|
268
|
-
```
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
MIT License
|
|
2
|
-
|
|
3
|
-
Copyright (c) 2025 data.gouv.fr
|
|
4
|
-
|
|
5
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
-
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
-
in the Software without restriction, including without limitation the rights
|
|
8
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
-
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
-
furnished to do so, subject to the following conditions:
|
|
11
|
-
|
|
12
|
-
The above copyright notice and this permission notice shall be included in all
|
|
13
|
-
copies or substantial portions of the Software.
|
|
14
|
-
|
|
15
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
-
SOFTWARE.
|
|
File without changes
|
{csv_detective-0.8.1.dev1674.dist-info → csv_detective-0.8.1.dev1703.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
|
File without changes
|