csv-detective 0.10.2549__py3-none-any.whl → 0.10.12674__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- csv_detective/detection/formats.py +11 -38
- csv_detective/explore_csv.py +3 -2
- csv_detective/format.py +11 -4
- csv_detective/formats/adresse.py +9 -9
- csv_detective/formats/binary.py +2 -1
- csv_detective/formats/booleen.py +3 -2
- csv_detective/formats/code_commune_insee.py +12 -10
- csv_detective/formats/code_csp_insee.py +1 -1
- csv_detective/formats/code_departement.py +8 -7
- csv_detective/formats/code_fantoir.py +6 -5
- csv_detective/formats/code_import.py +1 -1
- csv_detective/formats/code_postal.py +10 -9
- csv_detective/formats/code_region.py +7 -6
- csv_detective/formats/code_rna.py +7 -6
- csv_detective/formats/code_waldec.py +1 -1
- csv_detective/formats/commune.py +5 -5
- csv_detective/formats/csp_insee.py +6 -5
- csv_detective/formats/data/insee_ape700.txt +1 -1
- csv_detective/formats/data/iso_country_code_alpha2.txt +153 -397
- csv_detective/formats/data/iso_country_code_alpha3.txt +132 -132
- csv_detective/formats/data/iso_country_code_numeric.txt +94 -94
- csv_detective/formats/date.py +18 -17
- csv_detective/formats/date_fr.py +1 -1
- csv_detective/formats/datetime_aware.py +7 -2
- csv_detective/formats/datetime_naive.py +3 -0
- csv_detective/formats/datetime_rfc822.py +1 -0
- csv_detective/formats/departement.py +15 -15
- csv_detective/formats/email.py +13 -13
- csv_detective/formats/float.py +2 -1
- csv_detective/formats/geojson.py +10 -10
- csv_detective/formats/insee_ape700.py +10 -8
- csv_detective/formats/insee_canton.py +6 -6
- csv_detective/formats/int.py +2 -1
- csv_detective/formats/iso_country_code_alpha2.py +14 -14
- csv_detective/formats/iso_country_code_alpha3.py +6 -13
- csv_detective/formats/iso_country_code_numeric.py +2 -9
- csv_detective/formats/jour_de_la_semaine.py +11 -12
- csv_detective/formats/json.py +6 -0
- csv_detective/formats/latitude_l93.py +8 -22
- csv_detective/formats/latitude_wgs.py +31 -29
- csv_detective/formats/latitude_wgs_fr_metropole.py +7 -30
- csv_detective/formats/latlon_wgs.py +30 -28
- csv_detective/formats/longitude_l93.py +8 -13
- csv_detective/formats/longitude_wgs.py +34 -19
- csv_detective/formats/longitude_wgs_fr_metropole.py +6 -19
- csv_detective/formats/lonlat_wgs.py +12 -11
- csv_detective/formats/mois_de_lannee.py +1 -1
- csv_detective/formats/money.py +1 -1
- csv_detective/formats/mongo_object_id.py +1 -1
- csv_detective/formats/pays.py +11 -13
- csv_detective/formats/percent.py +1 -1
- csv_detective/formats/region.py +13 -13
- csv_detective/formats/sexe.py +1 -1
- csv_detective/formats/siren.py +9 -10
- csv_detective/formats/siret.py +9 -9
- csv_detective/formats/tel_fr.py +7 -13
- csv_detective/formats/uai.py +17 -18
- csv_detective/formats/url.py +16 -16
- csv_detective/formats/username.py +1 -1
- csv_detective/formats/uuid.py +1 -1
- csv_detective/formats/year.py +7 -12
- csv_detective/output/dataframe.py +6 -1
- csv_detective/output/profile.py +5 -1
- csv_detective/parsing/text.py +13 -12
- {csv_detective-0.10.2549.dist-info → csv_detective-0.10.12674.dist-info}/METADATA +2 -2
- csv_detective-0.10.12674.dist-info/RECORD +92 -0
- {csv_detective-0.10.2549.dist-info → csv_detective-0.10.12674.dist-info}/WHEEL +1 -1
- csv_detective-0.10.2549.dist-info/RECORD +0 -92
- {csv_detective-0.10.2549.dist-info → csv_detective-0.10.12674.dist-info}/entry_points.txt +0 -0
|
@@ -2,22 +2,17 @@ from frformat import LongitudeL93
|
|
|
2
2
|
|
|
3
3
|
from csv_detective.formats.float import _is as is_float
|
|
4
4
|
from csv_detective.formats.float import float_casting
|
|
5
|
+
from csv_detective.formats.longitude_wgs import SHARED_LONGITUDE_LABELS
|
|
5
6
|
|
|
6
7
|
proportion = 1
|
|
7
8
|
tags = ["fr", "geo"]
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
"
|
|
12
|
-
"
|
|
13
|
-
"
|
|
14
|
-
|
|
15
|
-
"lng",
|
|
16
|
-
"xlong",
|
|
17
|
-
"x",
|
|
18
|
-
"xf",
|
|
19
|
-
"xd",
|
|
20
|
-
]
|
|
9
|
+
mandatory_label = True
|
|
10
|
+
python_type = "float"
|
|
11
|
+
labels = SHARED_LONGITUDE_LABELS | {
|
|
12
|
+
"x l93": 1,
|
|
13
|
+
"longitude lb93": 1,
|
|
14
|
+
"lambx": 1,
|
|
15
|
+
}
|
|
21
16
|
|
|
22
17
|
_longitudel93 = LongitudeL93()
|
|
23
18
|
|
|
@@ -1,32 +1,47 @@
|
|
|
1
1
|
from csv_detective.formats.float import _is as is_float
|
|
2
|
+
from csv_detective.formats.int import _is as is_int
|
|
2
3
|
|
|
3
4
|
proportion = 1
|
|
4
5
|
tags = ["geo"]
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
"
|
|
9
|
-
"
|
|
10
|
-
"
|
|
11
|
-
"
|
|
12
|
-
"
|
|
13
|
-
"
|
|
14
|
-
"
|
|
15
|
-
"
|
|
16
|
-
"
|
|
17
|
-
|
|
6
|
+
mandatory_label = True
|
|
7
|
+
python_type = "float"
|
|
8
|
+
SHARED_LONGITUDE_LABELS = {
|
|
9
|
+
"longitude": 1,
|
|
10
|
+
"long": 0.75,
|
|
11
|
+
"lon": 0.75,
|
|
12
|
+
"lng": 0.5,
|
|
13
|
+
"x": 0.5,
|
|
14
|
+
"xf": 0.5,
|
|
15
|
+
"xd": 0.5,
|
|
16
|
+
"coordonnee x": 1,
|
|
17
|
+
"coord x": 1,
|
|
18
|
+
"xcoord": 1,
|
|
19
|
+
"xlon": 1,
|
|
20
|
+
"xlong": 1,
|
|
21
|
+
}
|
|
22
|
+
labels = SHARED_LONGITUDE_LABELS | {
|
|
23
|
+
"x gps": 1,
|
|
24
|
+
"longitude wgs84": 1,
|
|
25
|
+
"x wgs84": 1,
|
|
26
|
+
"wsg": 0.75,
|
|
27
|
+
"gps": 0.5,
|
|
28
|
+
}
|
|
18
29
|
|
|
19
30
|
|
|
20
31
|
def _is(val):
|
|
21
32
|
try:
|
|
22
|
-
return
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
33
|
+
return (
|
|
34
|
+
is_float(val)
|
|
35
|
+
and -180 <= float(val) <= 180
|
|
36
|
+
# we ideally would like a certain level of decimal precision
|
|
37
|
+
# but 1.200 is saved as 1.2 in csv so we just discriminate ints
|
|
38
|
+
and not is_int(val)
|
|
39
|
+
)
|
|
40
|
+
except Exception:
|
|
26
41
|
return False
|
|
27
42
|
|
|
28
43
|
|
|
29
44
|
_test_values = {
|
|
30
|
-
True: ["120", "-20.
|
|
31
|
-
False: ["-200"],
|
|
45
|
+
True: ["120.8263", "-20.27", "31.0"],
|
|
46
|
+
False: ["-200", "20"],
|
|
32
47
|
}
|
|
@@ -1,32 +1,19 @@
|
|
|
1
|
-
from csv_detective.formats.
|
|
1
|
+
from csv_detective.formats.longitude_wgs import _is as is_longitude, labels # noqa
|
|
2
2
|
|
|
3
3
|
proportion = 1
|
|
4
4
|
tags = ["fr", "geo"]
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
"lon",
|
|
8
|
-
"long",
|
|
9
|
-
"geocodage x gps",
|
|
10
|
-
"location longitude",
|
|
11
|
-
"xlongitude",
|
|
12
|
-
"lng",
|
|
13
|
-
"xlong",
|
|
14
|
-
"x",
|
|
15
|
-
"xf",
|
|
16
|
-
"xd",
|
|
17
|
-
]
|
|
5
|
+
mandatory_label = True
|
|
6
|
+
python_type = "float"
|
|
18
7
|
|
|
19
8
|
|
|
20
9
|
def _is(val):
|
|
21
10
|
try:
|
|
22
|
-
return
|
|
23
|
-
except
|
|
24
|
-
return False
|
|
25
|
-
except OverflowError:
|
|
11
|
+
return is_longitude(val) and -5.5 <= float(val) <= 9.8
|
|
12
|
+
except Exception:
|
|
26
13
|
return False
|
|
27
14
|
|
|
28
15
|
|
|
29
16
|
_test_values = {
|
|
30
|
-
True: ["-2.
|
|
17
|
+
True: ["-2.01", "8.0"],
|
|
31
18
|
False: ["12.8"],
|
|
32
19
|
}
|
|
@@ -4,19 +4,20 @@ from csv_detective.formats.longitude_wgs import _is as is_lon
|
|
|
4
4
|
|
|
5
5
|
proportion = 1
|
|
6
6
|
tags = ["geo"]
|
|
7
|
+
mandatory_label = True
|
|
7
8
|
|
|
8
|
-
specific =
|
|
9
|
-
"lonlat",
|
|
10
|
-
"lon lat",
|
|
11
|
-
"y x",
|
|
12
|
-
"yx",
|
|
13
|
-
|
|
9
|
+
specific = {
|
|
10
|
+
"lonlat": 1,
|
|
11
|
+
"lon lat": 1,
|
|
12
|
+
"y x": 0.75,
|
|
13
|
+
"yx": 0.75,
|
|
14
|
+
}
|
|
14
15
|
|
|
15
16
|
# we aim wide to catch exact matches if possible for the highest possible score
|
|
16
|
-
|
|
17
|
+
labels = (
|
|
17
18
|
SHARED_COORDS_LABELS
|
|
18
|
-
|
|
19
|
-
|
|
19
|
+
| specific
|
|
20
|
+
| {w + sep + suf: 1 for suf in specific for w in SHARED_COORDS_LABELS for sep in ["", " "]}
|
|
20
21
|
)
|
|
21
22
|
|
|
22
23
|
|
|
@@ -31,6 +32,6 @@ def _is(val):
|
|
|
31
32
|
|
|
32
33
|
|
|
33
34
|
_test_values = {
|
|
34
|
-
True: ["-22.6,43.
|
|
35
|
-
False: ["192,0.1", "92, -102", "[4.1,23.02", "4.1,23.02]", "-27,160.1"],
|
|
35
|
+
True: ["-22.6,43.012", "140.0,-10.70", "10.829, -40.71", "[-0.28,12.43]"],
|
|
36
|
+
False: ["192,0.1", "92, -102", "[4.1,23.02", "4.1,23.02]", "-27,160.1", "2,4", "-22, 43.0"],
|
|
36
37
|
}
|
csv_detective/formats/money.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from csv_detective.formats.float import _is as is_float
|
|
2
2
|
|
|
3
3
|
proportion = 0.8
|
|
4
|
-
labels =
|
|
4
|
+
labels = {"budget": 1, "salaire": 1, "euro": 1, "euros": 1, "prêt": 1, "montant": 1}
|
|
5
5
|
|
|
6
6
|
currencies = {"€", "$", "£", "¥"}
|
|
7
7
|
|
csv_detective/formats/pays.py
CHANGED
|
@@ -2,19 +2,17 @@ from frformat import Millesime, Options, Pays
|
|
|
2
2
|
|
|
3
3
|
proportion = 0.6
|
|
4
4
|
tags = ["fr", "geo"]
|
|
5
|
-
labels =
|
|
6
|
-
"pays",
|
|
7
|
-
"payslieu",
|
|
8
|
-
"paysorg",
|
|
9
|
-
"country",
|
|
10
|
-
"pays lib",
|
|
11
|
-
"lieupays",
|
|
12
|
-
"pays beneficiaire",
|
|
13
|
-
"nom du pays",
|
|
14
|
-
"
|
|
15
|
-
|
|
16
|
-
"journey end country",
|
|
17
|
-
]
|
|
5
|
+
labels = {
|
|
6
|
+
"pays": 1,
|
|
7
|
+
"payslieu": 1,
|
|
8
|
+
"paysorg": 1,
|
|
9
|
+
"country": 1,
|
|
10
|
+
"pays lib": 1,
|
|
11
|
+
"lieupays": 1,
|
|
12
|
+
"pays beneficiaire": 1,
|
|
13
|
+
"nom du pays": 1,
|
|
14
|
+
"libelle pays": 1,
|
|
15
|
+
}
|
|
18
16
|
|
|
19
17
|
_options = Options(
|
|
20
18
|
ignore_case=True,
|
csv_detective/formats/percent.py
CHANGED
csv_detective/formats/region.py
CHANGED
|
@@ -2,19 +2,19 @@ from frformat import Millesime, Options, Region
|
|
|
2
2
|
|
|
3
3
|
proportion = 1
|
|
4
4
|
tags = ["fr", "geo"]
|
|
5
|
-
labels =
|
|
6
|
-
"region",
|
|
7
|
-
"libelle region",
|
|
8
|
-
"nom region",
|
|
9
|
-
"libelle reg",
|
|
10
|
-
"nom reg",
|
|
11
|
-
"reg libusage",
|
|
12
|
-
"nom de la region",
|
|
13
|
-
"regionorg",
|
|
14
|
-
"regionlieu",
|
|
15
|
-
"reg",
|
|
16
|
-
"nom officiel region",
|
|
17
|
-
|
|
5
|
+
labels = {
|
|
6
|
+
"region": 1,
|
|
7
|
+
"libelle region": 1,
|
|
8
|
+
"nom region": 1,
|
|
9
|
+
"libelle reg": 1,
|
|
10
|
+
"nom reg": 1,
|
|
11
|
+
"reg libusage": 1,
|
|
12
|
+
"nom de la region": 1,
|
|
13
|
+
"regionorg": 1,
|
|
14
|
+
"regionlieu": 1,
|
|
15
|
+
"reg": 0.5,
|
|
16
|
+
"nom officiel region": 1,
|
|
17
|
+
}
|
|
18
18
|
|
|
19
19
|
_extra_valid_values_set = frozenset(
|
|
20
20
|
{
|
csv_detective/formats/sexe.py
CHANGED
csv_detective/formats/siren.py
CHANGED
|
@@ -2,16 +2,15 @@ import re
|
|
|
2
2
|
|
|
3
3
|
proportion = 0.9
|
|
4
4
|
tags = ["fr"]
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
"siren
|
|
8
|
-
"siren
|
|
9
|
-
"
|
|
10
|
-
"siren
|
|
11
|
-
"siren
|
|
12
|
-
"
|
|
13
|
-
|
|
14
|
-
]
|
|
5
|
+
mandatory_label = True
|
|
6
|
+
labels = {
|
|
7
|
+
"siren": 1,
|
|
8
|
+
"n° siren": 1,
|
|
9
|
+
"siren organisme": 1,
|
|
10
|
+
"siren titulaire": 1,
|
|
11
|
+
"numero siren": 1,
|
|
12
|
+
"epci": 1,
|
|
13
|
+
}
|
|
15
14
|
|
|
16
15
|
|
|
17
16
|
def _is(val):
|
csv_detective/formats/siret.py
CHANGED
|
@@ -2,15 +2,15 @@ import re
|
|
|
2
2
|
|
|
3
3
|
proportion = 0.8
|
|
4
4
|
tags = ["fr"]
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
"siret
|
|
8
|
-
"num siret",
|
|
9
|
-
"siretacheteur",
|
|
10
|
-
"n° siret",
|
|
11
|
-
"coll siret",
|
|
12
|
-
"epci",
|
|
13
|
-
|
|
5
|
+
mandatory_label = True
|
|
6
|
+
labels = {
|
|
7
|
+
"siret": 1,
|
|
8
|
+
"num siret": 1,
|
|
9
|
+
"siretacheteur": 1,
|
|
10
|
+
"n° siret": 1,
|
|
11
|
+
"coll siret": 1,
|
|
12
|
+
"epci": 1,
|
|
13
|
+
}
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
def _is(val):
|
csv_detective/formats/tel_fr.py
CHANGED
|
@@ -2,19 +2,13 @@ import re
|
|
|
2
2
|
|
|
3
3
|
proportion = 0.7
|
|
4
4
|
tags = ["fr"]
|
|
5
|
-
labels =
|
|
6
|
-
"telephone",
|
|
7
|
-
"tel",
|
|
8
|
-
"
|
|
9
|
-
"
|
|
10
|
-
"
|
|
11
|
-
|
|
12
|
-
"tel mob",
|
|
13
|
-
"telephone sav",
|
|
14
|
-
"telephone1",
|
|
15
|
-
"coordinates.phone",
|
|
16
|
-
"telephone du lieu",
|
|
17
|
-
]
|
|
5
|
+
labels = {
|
|
6
|
+
"telephone": 1,
|
|
7
|
+
"tel": 1,
|
|
8
|
+
"phone": 1,
|
|
9
|
+
"num tel": 1,
|
|
10
|
+
"tel mob": 1,
|
|
11
|
+
}
|
|
18
12
|
|
|
19
13
|
|
|
20
14
|
def _is(val):
|
csv_detective/formats/uai.py
CHANGED
|
@@ -2,24 +2,23 @@ import re
|
|
|
2
2
|
|
|
3
3
|
proportion = 0.8
|
|
4
4
|
tags = ["fr"]
|
|
5
|
-
labels =
|
|
6
|
-
"uai",
|
|
7
|
-
"code etablissement",
|
|
8
|
-
"code uai",
|
|
9
|
-
"uai - identifiant",
|
|
10
|
-
"numero uai",
|
|
11
|
-
"rne",
|
|
12
|
-
"numero de l'etablissement",
|
|
13
|
-
"code rne",
|
|
14
|
-
"codeetab",
|
|
15
|
-
"code uai de l'etablissement",
|
|
16
|
-
"ref uai",
|
|
17
|
-
"cd rne",
|
|
18
|
-
"numerouai",
|
|
19
|
-
"numero d etablissement",
|
|
20
|
-
"
|
|
21
|
-
|
|
22
|
-
]
|
|
5
|
+
labels = {
|
|
6
|
+
"uai": 1,
|
|
7
|
+
"code etablissement": 1,
|
|
8
|
+
"code uai": 1,
|
|
9
|
+
"uai - identifiant": 1,
|
|
10
|
+
"numero uai": 1,
|
|
11
|
+
"rne": 0.75,
|
|
12
|
+
"numero de l'etablissement": 1,
|
|
13
|
+
"code rne": 1,
|
|
14
|
+
"codeetab": 1,
|
|
15
|
+
"code uai de l'etablissement": 1,
|
|
16
|
+
"ref uai": 1,
|
|
17
|
+
"cd rne": 1,
|
|
18
|
+
"numerouai": 1,
|
|
19
|
+
"numero d etablissement": 1,
|
|
20
|
+
"numero etablissement": 1,
|
|
21
|
+
}
|
|
23
22
|
|
|
24
23
|
|
|
25
24
|
def _is(val):
|
csv_detective/formats/url.py
CHANGED
|
@@ -1,22 +1,22 @@
|
|
|
1
1
|
import re
|
|
2
2
|
|
|
3
3
|
proportion = 1
|
|
4
|
-
labels =
|
|
5
|
-
"url",
|
|
6
|
-
"url source",
|
|
7
|
-
"site web",
|
|
8
|
-
"source url",
|
|
9
|
-
"site internet",
|
|
10
|
-
"remote url",
|
|
11
|
-
"web",
|
|
12
|
-
"site",
|
|
13
|
-
"lien",
|
|
14
|
-
"site data",
|
|
15
|
-
"lien url",
|
|
16
|
-
"lien vers le fichier",
|
|
17
|
-
"sitweb",
|
|
18
|
-
"interneturl",
|
|
19
|
-
|
|
4
|
+
labels = {
|
|
5
|
+
"url": 1,
|
|
6
|
+
"url source": 1,
|
|
7
|
+
"site web": 1,
|
|
8
|
+
"source url": 1,
|
|
9
|
+
"site internet": 1,
|
|
10
|
+
"remote url": 1,
|
|
11
|
+
"web": 1,
|
|
12
|
+
"site": 1,
|
|
13
|
+
"lien": 1,
|
|
14
|
+
"site data": 1,
|
|
15
|
+
"lien url": 1,
|
|
16
|
+
"lien vers le fichier": 1,
|
|
17
|
+
"sitweb": 1,
|
|
18
|
+
"interneturl": 1,
|
|
19
|
+
}
|
|
20
20
|
|
|
21
21
|
pattern = re.compile(
|
|
22
22
|
r"^((https?|ftp)://|www\.)(([A-Za-z0-9-]+\.)+[A-Za-z]{2,6})"
|
csv_detective/formats/uuid.py
CHANGED
csv_detective/formats/year.py
CHANGED
|
@@ -1,17 +1,12 @@
|
|
|
1
1
|
proportion = 1
|
|
2
2
|
tags = ["temp"]
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
"
|
|
6
|
-
"annee
|
|
7
|
-
"
|
|
8
|
-
"exercice",
|
|
9
|
-
|
|
10
|
-
"annee de publication",
|
|
11
|
-
"exercice comptable",
|
|
12
|
-
"annee de naissance",
|
|
13
|
-
"annee ouverture",
|
|
14
|
-
]
|
|
3
|
+
python_type = "int"
|
|
4
|
+
labels = {
|
|
5
|
+
"year": 1,
|
|
6
|
+
"annee": 1,
|
|
7
|
+
"naissance": 1,
|
|
8
|
+
"exercice": 1,
|
|
9
|
+
}
|
|
15
10
|
|
|
16
11
|
|
|
17
12
|
def _is(val):
|
|
@@ -13,11 +13,16 @@ from csv_detective.parsing.csv import CHUNK_SIZE
|
|
|
13
13
|
from csv_detective.utils import display_logs_depending_process_time
|
|
14
14
|
|
|
15
15
|
|
|
16
|
-
def cast(value: str, _type: str) -> str | float | bool | date | datetime | bytes | None:
|
|
16
|
+
def cast(value: str, _type: str) -> str | int | float | bool | date | datetime | bytes | None:
|
|
17
17
|
if not isinstance(value, str) or not value:
|
|
18
18
|
# None is the current default value in hydra, should we keep this?
|
|
19
19
|
return None
|
|
20
20
|
match _type:
|
|
21
|
+
case "string":
|
|
22
|
+
# not used here, convenience for external use (cc hydra)
|
|
23
|
+
return value
|
|
24
|
+
case "int":
|
|
25
|
+
return int(value)
|
|
21
26
|
case "float":
|
|
22
27
|
return float_casting(value)
|
|
23
28
|
case "bool":
|
csv_detective/output/profile.py
CHANGED
|
@@ -81,7 +81,11 @@ def create_profile(
|
|
|
81
81
|
del cast_col
|
|
82
82
|
# for all formats we want most frequent values, nb unique values and nb missing values
|
|
83
83
|
tops_bruts = (
|
|
84
|
-
(
|
|
84
|
+
(
|
|
85
|
+
table[c].value_counts()
|
|
86
|
+
if _col_values is None
|
|
87
|
+
else (s := _col_values[c]).loc[s.index.notna()].sort_values(ascending=False)
|
|
88
|
+
)
|
|
85
89
|
.reset_index(name=_count_col)
|
|
86
90
|
.iloc[:10]
|
|
87
91
|
.to_dict(orient="records")
|
csv_detective/parsing/text.py
CHANGED
|
@@ -36,21 +36,22 @@ def is_word_in_string(word: str, string: str):
|
|
|
36
36
|
return len(word) > 2 and word in string
|
|
37
37
|
|
|
38
38
|
|
|
39
|
-
def header_score(header: str,
|
|
39
|
+
def header_score(header: str, valid_headers: dict[str, float]) -> float:
|
|
40
40
|
"""Returns:
|
|
41
|
-
-
|
|
42
|
-
- 0.5 if any of the words is within the
|
|
41
|
+
- the valid header's credibility if the header is exactly in the valid list
|
|
42
|
+
- 0.5*credibility if any of the words is within the valid list
|
|
43
43
|
- 0 otherwise"""
|
|
44
44
|
processed_header = _process_text(header)
|
|
45
45
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
)
|
|
49
|
-
words_combination_in_header = 0.5 * (
|
|
50
|
-
any(
|
|
51
|
-
is_word_in_string(words_combination, processed_header)
|
|
52
|
-
for words_combination in words_combinations_list
|
|
53
|
-
)
|
|
46
|
+
header_matches_valid = max(
|
|
47
|
+
(valid == processed_header) * credibility for valid, credibility in valid_headers.items()
|
|
54
48
|
)
|
|
55
49
|
|
|
56
|
-
return max(
|
|
50
|
+
return max(
|
|
51
|
+
header_matches_valid,
|
|
52
|
+
0.5
|
|
53
|
+
* max(
|
|
54
|
+
is_word_in_string(valid, processed_header) * credibility
|
|
55
|
+
for valid, credibility in valid_headers.items()
|
|
56
|
+
),
|
|
57
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: csv-detective
|
|
3
|
-
Version: 0.10.
|
|
3
|
+
Version: 0.10.12674
|
|
4
4
|
Summary: Detect tabular files column content
|
|
5
5
|
Keywords: CSV,data processing,encoding,guess,parser,tabular
|
|
6
6
|
Author: data.gouv.fr
|
|
@@ -24,7 +24,7 @@ Requires-Dist: pytest>=8.3.0 ; extra == 'dev'
|
|
|
24
24
|
Requires-Dist: responses>=0.25.0 ; extra == 'dev'
|
|
25
25
|
Requires-Dist: ruff>=0.9.3 ; extra == 'dev'
|
|
26
26
|
Requires-Python: >=3.10, <3.15
|
|
27
|
-
Project-URL: Source, https://github.com/datagouv/
|
|
27
|
+
Project-URL: Source, https://github.com/datagouv/csv-detective
|
|
28
28
|
Provides-Extra: dev
|
|
29
29
|
Description-Content-Type: text/markdown
|
|
30
30
|
|