Flowfile 0.3.8__py3-none-any.whl → 0.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Flowfile might be problematic. Click here for more details.
- flowfile/__init__.py +4 -3
- flowfile/api.py +1 -1
- flowfile/web/static/assets/{CloudConnectionManager-c20a740f.js → CloudConnectionManager-d7c2c028.js} +2 -2
- flowfile/web/static/assets/{CloudStorageReader-960b400a.js → CloudStorageReader-d467329f.js} +11 -78
- flowfile/web/static/assets/{CloudStorageWriter-e3decbdd.js → CloudStorageWriter-071b8b00.js} +12 -79
- flowfile/web/static/assets/{CloudStorageWriter-49c9a4b2.css → CloudStorageWriter-b0ee067f.css} +24 -24
- flowfile/web/static/assets/ContextMenu-2dea5e27.js +41 -0
- flowfile/web/static/assets/{SettingsSection-9c836ecc.css → ContextMenu-4c74eef1.css} +0 -21
- flowfile/web/static/assets/ContextMenu-63cfa99b.css +26 -0
- flowfile/web/static/assets/ContextMenu-785554c4.js +41 -0
- flowfile/web/static/assets/ContextMenu-a51e19ea.js +41 -0
- flowfile/web/static/assets/ContextMenu-c13f91d0.css +26 -0
- flowfile/web/static/assets/{CrossJoin-41efa4cb.css → CrossJoin-1119d18e.css} +18 -18
- flowfile/web/static/assets/{CrossJoin-d67e2405.js → CrossJoin-cf68ec7a.js} +14 -84
- flowfile/web/static/assets/{DatabaseConnectionSettings-a81e0f7e.js → DatabaseConnectionSettings-435c5dd8.js} +3 -3
- flowfile/web/static/assets/{DatabaseManager-9ea35e84.js → DatabaseManager-349e33a8.js} +2 -2
- flowfile/web/static/assets/{DatabaseReader-9578bfa5.js → DatabaseReader-8075bd28.js} +14 -114
- flowfile/web/static/assets/{DatabaseReader-f50c6558.css → DatabaseReader-ae61773c.css} +0 -27
- flowfile/web/static/assets/{DatabaseWriter-19531098.js → DatabaseWriter-3e2dda89.js} +13 -74
- flowfile/web/static/assets/{ExploreData-5bdae813.css → ExploreData-2d0cf4db.css} +8 -14
- flowfile/web/static/assets/ExploreData-76ec698c.js +192 -0
- flowfile/web/static/assets/{ExternalSource-2297ef96.js → ExternalSource-609a265c.js} +8 -79
- flowfile/web/static/assets/{Filter-f211c03a.js → Filter-97cff793.js} +12 -85
- flowfile/web/static/assets/{Filter-a9d08ba1.css → Filter-f62091b3.css} +3 -3
- flowfile/web/static/assets/{Formula-4207ea31.js → Formula-09de0ec9.js} +18 -85
- flowfile/web/static/assets/{Formula-29f19d21.css → Formula-bb96803d.css} +4 -4
- flowfile/web/static/assets/{FuzzyMatch-6857de82.css → FuzzyMatch-1010f966.css} +42 -42
- flowfile/web/static/assets/{FuzzyMatch-bf120df0.js → FuzzyMatch-bdf70248.js} +16 -87
- flowfile/web/static/assets/{GraphSolver-5bb7497a.js → GraphSolver-0b5a0e05.js} +13 -159
- flowfile/web/static/assets/GraphSolver-f0cb7bfb.css +22 -0
- flowfile/web/static/assets/{Unique-b5615727.css → GroupBy-b9505323.css} +8 -8
- flowfile/web/static/assets/{GroupBy-92c81b65.js → GroupBy-eaddadde.js} +12 -75
- flowfile/web/static/assets/{Join-4e49a274.js → Join-3313371b.js} +15 -85
- flowfile/web/static/assets/{Join-f45eff22.css → Join-fd79b451.css} +20 -20
- flowfile/web/static/assets/{ManualInput-a71b52c6.css → ManualInput-3246a08d.css} +20 -20
- flowfile/web/static/assets/{ManualInput-90998ae8.js → ManualInput-e8bfc0be.js} +11 -82
- flowfile/web/static/assets/{Output-81e3e917.js → Output-7303bb09.js} +13 -243
- flowfile/web/static/assets/Output-ddc9079f.css +37 -0
- flowfile/web/static/assets/{Pivot-a3419842.js → Pivot-3b1c54ef.js} +14 -138
- flowfile/web/static/assets/Pivot-cf333e3d.css +22 -0
- flowfile/web/static/assets/PivotValidation-3bb36c8f.js +61 -0
- flowfile/web/static/assets/PivotValidation-891ddfb0.css +13 -0
- flowfile/web/static/assets/PivotValidation-c46cd420.css +13 -0
- flowfile/web/static/assets/PivotValidation-eaa819c0.js +61 -0
- flowfile/web/static/assets/{PolarsCode-72710deb.js → PolarsCode-aa12e25d.js} +13 -80
- flowfile/web/static/assets/Read-6b17491f.css +62 -0
- flowfile/web/static/assets/Read-a2bfc618.js +243 -0
- flowfile/web/static/assets/RecordCount-aa0dc082.js +53 -0
- flowfile/web/static/assets/{RecordId-10baf191.js → RecordId-48ee1a3b.js} +8 -80
- flowfile/web/static/assets/SQLQueryComponent-36cef432.css +27 -0
- flowfile/web/static/assets/SQLQueryComponent-e149dbf2.js +38 -0
- flowfile/web/static/assets/{Sample-3ed9a0ae.js → Sample-f06cb97a.js} +8 -77
- flowfile/web/static/assets/{SecretManager-0d49c0e8.js → SecretManager-37f34886.js} +2 -2
- flowfile/web/static/assets/{Select-8a02a0b3.js → Select-b60e6c47.js} +11 -85
- flowfile/web/static/assets/SettingsSection-2e4d03c4.css +21 -0
- flowfile/web/static/assets/SettingsSection-5c696bee.css +20 -0
- flowfile/web/static/assets/SettingsSection-70e5a7b1.js +53 -0
- flowfile/web/static/assets/SettingsSection-71e6b7e3.css +21 -0
- flowfile/web/static/assets/{SettingsSection-4c0f45f5.js → SettingsSection-75b6cf4f.js} +2 -40
- flowfile/web/static/assets/SettingsSection-e57a672e.js +45 -0
- flowfile/web/static/assets/{GroupBy-ab1ea74b.css → Sort-3643d625.css} +8 -8
- flowfile/web/static/assets/{Sort-f55c9f9d.js → Sort-51b1ee4d.js} +12 -97
- flowfile/web/static/assets/{TextToRows-5dbc2145.js → TextToRows-26835f8f.js} +14 -83
- flowfile/web/static/assets/{TextToRows-c92d1ec2.css → TextToRows-5d2c1190.css} +9 -9
- flowfile/web/static/assets/{UnavailableFields-a1768e52.js → UnavailableFields-88a4cd0c.js} +2 -2
- flowfile/web/static/assets/Union-4d0088eb.js +77 -0
- flowfile/web/static/assets/{Union-8d9ac7f9.css → Union-af6c3d9b.css} +6 -6
- flowfile/web/static/assets/{Unique-46b250da.js → Unique-7d554a62.js} +22 -91
- flowfile/web/static/assets/{Sort-7ccfa0fe.css → Unique-f9fb0809.css} +8 -8
- flowfile/web/static/assets/Unpivot-1e422df3.css +30 -0
- flowfile/web/static/assets/{Unpivot-25ac84cc.js → Unpivot-4668595c.js} +12 -166
- flowfile/web/static/assets/UnpivotValidation-0d240eeb.css +13 -0
- flowfile/web/static/assets/UnpivotValidation-d4f0e0e8.js +51 -0
- flowfile/web/static/assets/{ExploreData-40476474.js → VueGraphicWalker-5324d566.js} +4 -264
- flowfile/web/static/assets/VueGraphicWalker-ed5ab88b.css +6 -0
- flowfile/web/static/assets/{api-6ef0dcef.js → api-271ed117.js} +1 -1
- flowfile/web/static/assets/{api-a0abbdc7.js → api-31e4fea6.js} +1 -1
- flowfile/web/static/assets/{designer-186f2e71.css → designer-091bdc3f.css} +819 -184
- flowfile/web/static/assets/{designer-13eabd83.js → designer-bf3d9487.js} +2214 -680
- flowfile/web/static/assets/{documentation-b87e7f6f.js → documentation-4d0a1cea.js} +1 -1
- flowfile/web/static/assets/{dropDown-13564764.js → dropDown-025888df.js} +1 -1
- flowfile/web/static/assets/{fullEditor-fd2cd6f9.js → fullEditor-1df991ec.js} +2 -2
- flowfile/web/static/assets/{genericNodeSettings-71e11604.js → genericNodeSettings-d3b2b2ac.js} +3 -3
- flowfile/web/static/assets/{index-f6c15e76.js → index-d0518598.js} +210 -31
- flowfile/web/static/assets/{Output-48f81019.css → outputCsv-9cc59e0b.css} +0 -143
- flowfile/web/static/assets/outputCsv-d8457527.js +86 -0
- flowfile/web/static/assets/outputExcel-b41305c0.css +102 -0
- flowfile/web/static/assets/outputExcel-be89153e.js +56 -0
- flowfile/web/static/assets/outputParquet-cf8cf3f2.css +4 -0
- flowfile/web/static/assets/outputParquet-fabb445a.js +31 -0
- flowfile/web/static/assets/readCsv-bca3ed53.css +52 -0
- flowfile/web/static/assets/readCsv-e8359522.js +178 -0
- flowfile/web/static/assets/readExcel-dabaf51b.js +203 -0
- flowfile/web/static/assets/readExcel-e1b381ea.css +64 -0
- flowfile/web/static/assets/readParquet-cee068e2.css +19 -0
- flowfile/web/static/assets/readParquet-e0771ef2.js +26 -0
- flowfile/web/static/assets/{secretApi-dd636aa2.js → secretApi-ce823eee.js} +1 -1
- flowfile/web/static/assets/{selectDynamic-af36165e.js → selectDynamic-5476546e.js} +7 -7
- flowfile/web/static/assets/{selectDynamic-b062bc9b.css → selectDynamic-aa913ff4.css} +16 -16
- flowfile/web/static/assets/{vue-codemirror.esm-2847001e.js → vue-codemirror.esm-9ed00d50.js} +29 -33
- flowfile/web/static/assets/{vue-content-loader.es-0371da73.js → vue-content-loader.es-7bca2d9b.js} +1 -1
- flowfile/web/static/index.html +1 -1
- {flowfile-0.3.8.dist-info → flowfile-0.3.10.dist-info}/METADATA +2 -1
- {flowfile-0.3.8.dist-info → flowfile-0.3.10.dist-info}/RECORD +147 -117
- flowfile_core/configs/flow_logger.py +5 -13
- flowfile_core/configs/node_store/nodes.py +303 -44
- flowfile_core/configs/settings.py +6 -3
- flowfile_core/database/connection.py +5 -21
- flowfile_core/fileExplorer/funcs.py +239 -121
- flowfile_core/flowfile/code_generator/code_generator.py +36 -0
- flowfile_core/flowfile/flow_data_engine/flow_data_engine.py +60 -80
- flowfile_core/flowfile/flow_data_engine/flow_file_column/main.py +61 -0
- flowfile_core/flowfile/flow_data_engine/fuzzy_matching/prepare_for_fuzzy_match.py +44 -3
- flowfile_core/flowfile/flow_data_engine/subprocess_operations/models.py +3 -3
- flowfile_core/flowfile/flow_data_engine/subprocess_operations/subprocess_operations.py +33 -10
- flowfile_core/flowfile/flow_graph.py +223 -118
- flowfile_core/flowfile/flow_node/flow_node.py +56 -19
- flowfile_core/flowfile/flow_node/models.py +0 -2
- flowfile_core/flowfile/flow_node/schema_callback.py +138 -43
- flowfile_core/flowfile/graph_tree/graph_tree.py +250 -0
- flowfile_core/flowfile/graph_tree/models.py +15 -0
- flowfile_core/flowfile/handler.py +22 -3
- flowfile_core/flowfile/manage/compatibility_enhancements.py +1 -1
- flowfile_core/flowfile/{flow_data_engine/fuzzy_matching/settings_validator.py → schema_callbacks.py} +72 -16
- flowfile_core/flowfile/setting_generator/settings.py +2 -2
- flowfile_core/flowfile/util/execution_orderer.py +9 -0
- flowfile_core/flowfile/util/node_skipper.py +8 -0
- flowfile_core/main.py +4 -1
- flowfile_core/routes/routes.py +59 -10
- flowfile_core/schemas/input_schema.py +0 -1
- flowfile_core/schemas/output_model.py +5 -2
- flowfile_core/schemas/schemas.py +48 -3
- flowfile_core/schemas/transform_schema.py +28 -38
- flowfile_frame/__init__.py +1 -4
- flowfile_frame/flow_frame.py +33 -4
- flowfile_frame/flow_frame.pyi +2 -0
- flowfile_worker/__init__.py +6 -35
- flowfile_worker/funcs.py +7 -3
- flowfile_worker/main.py +5 -2
- flowfile_worker/models.py +3 -1
- flowfile_worker/routes.py +47 -5
- shared/__init__.py +15 -0
- shared/storage_config.py +243 -0
- flowfile/web/static/assets/GraphSolver-17fd26db.css +0 -68
- flowfile/web/static/assets/Pivot-f415e85f.css +0 -35
- flowfile/web/static/assets/Read-80dc1675.css +0 -197
- flowfile/web/static/assets/Read-c4059daf.js +0 -701
- flowfile/web/static/assets/RecordCount-c2b5e095.js +0 -122
- flowfile/web/static/assets/Union-f2aefdc9.js +0 -146
- flowfile/web/static/assets/Unpivot-246e9bbd.css +0 -77
- flowfile/web/static/assets/nodeTitle-988d9efe.js +0 -227
- flowfile/web/static/assets/nodeTitle-f4b12bcb.css +0 -134
- flowfile_worker/polars_fuzzy_match/matcher.py +0 -435
- flowfile_worker/polars_fuzzy_match/models.py +0 -36
- flowfile_worker/polars_fuzzy_match/pre_process.py +0 -213
- flowfile_worker/polars_fuzzy_match/process.py +0 -86
- flowfile_worker/polars_fuzzy_match/utils.py +0 -50
- {flowfile-0.3.8.dist-info → flowfile-0.3.10.dist-info}/LICENSE +0 -0
- {flowfile-0.3.8.dist-info → flowfile-0.3.10.dist-info}/WHEEL +0 -0
- {flowfile-0.3.8.dist-info → flowfile-0.3.10.dist-info}/entry_points.txt +0 -0
- {flowfile_worker/polars_fuzzy_match → flowfile_core/flowfile/graph_tree}/__init__.py +0 -0
|
@@ -1,435 +0,0 @@
|
|
|
1
|
-
import polars as pl
|
|
2
|
-
from typing import List, Optional, Tuple
|
|
3
|
-
import tempfile
|
|
4
|
-
from logging import Logger
|
|
5
|
-
|
|
6
|
-
from flowfile_worker.polars_fuzzy_match.process import calculate_and_parse_fuzzy, process_fuzzy_frames
|
|
7
|
-
from flowfile_worker.polars_fuzzy_match.pre_process import pre_process_for_fuzzy_matching
|
|
8
|
-
from flowfile_worker.polars_fuzzy_match.models import FuzzyMapping
|
|
9
|
-
from flowfile_worker.polars_fuzzy_match.utils import cache_polars_frame_to_temp
|
|
10
|
-
from flowfile_worker.utils import collect_lazy_frame
|
|
11
|
-
import polars_simed as ps
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
HAS_POLARS_SIM = True
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def ensure_left_is_larger(left_df: pl.DataFrame,
|
|
18
|
-
right_df: pl.DataFrame,
|
|
19
|
-
left_col_name: str,
|
|
20
|
-
right_col_name: str) -> tuple:
|
|
21
|
-
"""
|
|
22
|
-
Ensures that the left dataframe is always the larger one.
|
|
23
|
-
If the right dataframe is larger, swaps them.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
left_df: The left dataframe
|
|
27
|
-
right_df: The right dataframe
|
|
28
|
-
left_col_name: Column name for the left dataframe
|
|
29
|
-
right_col_name: Column name for the right dataframe
|
|
30
|
-
|
|
31
|
-
Returns:
|
|
32
|
-
tuple: (left_df, right_df, left_col_name, right_col_name)
|
|
33
|
-
"""
|
|
34
|
-
left_frame_len = left_df.select(pl.len())[0, 0]
|
|
35
|
-
right_frame_len = right_df.select(pl.len())[0, 0]
|
|
36
|
-
|
|
37
|
-
# Swap dataframes if right is larger than left
|
|
38
|
-
if right_frame_len > left_frame_len:
|
|
39
|
-
return right_df, left_df, right_col_name, left_col_name
|
|
40
|
-
|
|
41
|
-
return left_df, right_df, left_col_name, right_col_name
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
def split_dataframe(df: pl.DataFrame, max_chunk_size: int = 500_000) -> List[pl.DataFrame]:
|
|
45
|
-
"""
|
|
46
|
-
Split a Polars DataFrame into multiple DataFrames with a maximum size.
|
|
47
|
-
|
|
48
|
-
Args:
|
|
49
|
-
df: The Polars DataFrame to split
|
|
50
|
-
max_chunk_size: Maximum number of rows per chunk (default: 500,000)
|
|
51
|
-
|
|
52
|
-
Returns:
|
|
53
|
-
List of Polars DataFrames, each containing at most max_chunk_size rows
|
|
54
|
-
"""
|
|
55
|
-
total_rows = df.select(pl.len())[0, 0]
|
|
56
|
-
|
|
57
|
-
# If DataFrame is smaller than max_chunk_size, return it as is
|
|
58
|
-
if total_rows <= max_chunk_size:
|
|
59
|
-
return [df]
|
|
60
|
-
|
|
61
|
-
# Calculate number of chunks needed
|
|
62
|
-
num_chunks = (total_rows + max_chunk_size - 1) // max_chunk_size # Ceiling division
|
|
63
|
-
|
|
64
|
-
chunks = []
|
|
65
|
-
for i in range(num_chunks):
|
|
66
|
-
start_idx = i * max_chunk_size
|
|
67
|
-
end_idx = min((i + 1) * max_chunk_size, total_rows)
|
|
68
|
-
|
|
69
|
-
# Extract chunk using slice
|
|
70
|
-
chunk = df.slice(start_idx, end_idx - start_idx)
|
|
71
|
-
chunks.append(chunk)
|
|
72
|
-
|
|
73
|
-
return chunks
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def cross_join_large_files(left_fuzzy_frame: pl.LazyFrame,
|
|
77
|
-
right_fuzzy_frame: pl.LazyFrame,
|
|
78
|
-
left_col_name: str,
|
|
79
|
-
right_col_name: str,
|
|
80
|
-
flowfile_logger: Logger,
|
|
81
|
-
) -> pl.LazyFrame:
|
|
82
|
-
if not HAS_POLARS_SIM:
|
|
83
|
-
raise Exception('The polars-sim library is required to perform this operation.')
|
|
84
|
-
|
|
85
|
-
left_df = collect_lazy_frame(left_fuzzy_frame)
|
|
86
|
-
right_df = collect_lazy_frame(right_fuzzy_frame)
|
|
87
|
-
|
|
88
|
-
left_df, right_df, left_col_name, right_col_name = ensure_left_is_larger(
|
|
89
|
-
left_df, right_df, left_col_name, right_col_name
|
|
90
|
-
)
|
|
91
|
-
left_chunks = split_dataframe(left_df, max_chunk_size=500_000) # Reduced chunk size
|
|
92
|
-
flowfile_logger.info(f"Splitting left dataframe into {len(left_chunks)} chunks.")
|
|
93
|
-
df_matches = []
|
|
94
|
-
|
|
95
|
-
# Process each chunk combination with error handling
|
|
96
|
-
for i, left_chunk in enumerate(left_chunks):
|
|
97
|
-
chunk_matches = ps.join_sim(
|
|
98
|
-
left=left_chunk,
|
|
99
|
-
right=right_df,
|
|
100
|
-
left_on=left_col_name,
|
|
101
|
-
right_on=right_col_name,
|
|
102
|
-
top_n=100,
|
|
103
|
-
add_similarity=False,
|
|
104
|
-
)
|
|
105
|
-
flowfile_logger.info(f"Processed chunk {int(i)} with {len(chunk_matches)} matches.")
|
|
106
|
-
df_matches.append(chunk_matches)
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
# Combine all matches
|
|
110
|
-
if df_matches:
|
|
111
|
-
return pl.concat(df_matches).lazy()
|
|
112
|
-
else:
|
|
113
|
-
columns = list(set(left_df.columns).union(set(right_df.columns)))
|
|
114
|
-
return pl.DataFrame(schema={col: pl.Null for col in columns}).lazy()
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
def cross_join_small_files(left_df: pl.LazyFrame, right_df: pl.LazyFrame) -> pl.LazyFrame:
|
|
118
|
-
return left_df.join(right_df, how='cross')
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
def cross_join_filter_existing_fuzzy_results(left_df: pl.LazyFrame, right_df: pl.LazyFrame,
|
|
122
|
-
existing_matches: pl.LazyFrame,
|
|
123
|
-
left_col_name: str, right_col_name: str):
|
|
124
|
-
"""
|
|
125
|
-
Process and filter fuzzy matching results by joining dataframes using existing match indices.
|
|
126
|
-
|
|
127
|
-
This function takes previously identified fuzzy matches (existing_matches) and performs
|
|
128
|
-
a series of operations to create a refined dataset of matches between the left and right
|
|
129
|
-
dataframes, preserving index relationships.
|
|
130
|
-
|
|
131
|
-
Parameters:
|
|
132
|
-
-----------
|
|
133
|
-
left_df : pl.LazyFrame
|
|
134
|
-
The left dataframe containing records to be matched.
|
|
135
|
-
right_df : pl.LazyFrame
|
|
136
|
-
The right dataframe containing records to be matched against.
|
|
137
|
-
existing_matches : pl.LazyFrame
|
|
138
|
-
A dataframe containing the indices of already identified matches between
|
|
139
|
-
left_df and right_df, with columns '__left_index' and '__right_index'.
|
|
140
|
-
left_col_name : str
|
|
141
|
-
The column name from left_df to include in the result.
|
|
142
|
-
right_col_name : str
|
|
143
|
-
The column name from right_df to include in the result.
|
|
144
|
-
|
|
145
|
-
Returns:
|
|
146
|
-
--------
|
|
147
|
-
pl.LazyFrame
|
|
148
|
-
A dataframe containing the unique matches between left_df and right_df,
|
|
149
|
-
with index information for both dataframes preserved. The resulting dataframe
|
|
150
|
-
includes the specified columns from both dataframes along with their respective
|
|
151
|
-
index aggregations.
|
|
152
|
-
|
|
153
|
-
Notes:
|
|
154
|
-
------
|
|
155
|
-
The function performs these operations:
|
|
156
|
-
1. Join existing matches with both dataframes using their respective indices
|
|
157
|
-
2. Select only the relevant columns and remove duplicates
|
|
158
|
-
3. Create aggregations that preserve the relationship between values and their indices
|
|
159
|
-
4. Join these aggregations back to create the final result set
|
|
160
|
-
"""
|
|
161
|
-
joined_df = (existing_matches
|
|
162
|
-
.select(['__left_index', '__right_index'])
|
|
163
|
-
.join(left_df, on='__left_index')
|
|
164
|
-
.join(right_df, on='__right_index')
|
|
165
|
-
.select(left_col_name, right_col_name, '__left_index', '__right_index')
|
|
166
|
-
)
|
|
167
|
-
return joined_df.group_by([left_col_name, right_col_name]).agg('__left_index', '__right_index')
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
def cross_join_no_existing_fuzzy_results(left_df: pl.LazyFrame, right_df: pl.LazyFrame, left_col_name: str,
|
|
171
|
-
right_col_name: str, temp_dir_ref: str,
|
|
172
|
-
flowfile_logger: Logger) -> pl.LazyFrame:
|
|
173
|
-
"""
|
|
174
|
-
Generate fuzzy matching results by performing a cross join between dataframes.
|
|
175
|
-
|
|
176
|
-
This function processes the input dataframes, determines the appropriate cross join method
|
|
177
|
-
based on the size of the resulting cartesian product, and returns the cross-joined results
|
|
178
|
-
for fuzzy matching when no existing matches are provided.
|
|
179
|
-
|
|
180
|
-
Parameters:
|
|
181
|
-
-----------
|
|
182
|
-
left_df : pl.LazyFrame
|
|
183
|
-
The left dataframe containing records to be matched.
|
|
184
|
-
right_df : pl.LazyFrame
|
|
185
|
-
The right dataframe containing records to be matched against.
|
|
186
|
-
left_col_name : str
|
|
187
|
-
The column name from left_df to use for fuzzy matching.
|
|
188
|
-
right_col_name : str
|
|
189
|
-
The column name from right_df to use for fuzzy matching.
|
|
190
|
-
temp_dir_ref : str
|
|
191
|
-
Reference to a temporary directory where intermediate results can be stored
|
|
192
|
-
during processing of large dataframes.
|
|
193
|
-
|
|
194
|
-
Returns:
|
|
195
|
-
--------
|
|
196
|
-
pl.LazyFrame
|
|
197
|
-
A dataframe containing the cross join results of left_df and right_df,
|
|
198
|
-
prepared for fuzzy matching operations.
|
|
199
|
-
|
|
200
|
-
Notes:
|
|
201
|
-
------
|
|
202
|
-
The function performs these operations:
|
|
203
|
-
1. Processes input frames using the process_fuzzy_frames helper function
|
|
204
|
-
2. Calculates the size of the cartesian product to determine processing approach
|
|
205
|
-
3. Uses either cross_join_large_files or cross_join_small_files based on the size:
|
|
206
|
-
- For cartesian products > 100M but < 1T (or 10M without polars-sim), uses large file method
|
|
207
|
-
- For smaller products, uses the small file method
|
|
208
|
-
4. Raises an exception if the cartesian product exceeds the maximum allowed size
|
|
209
|
-
|
|
210
|
-
Raises:
|
|
211
|
-
-------
|
|
212
|
-
Exception
|
|
213
|
-
If the cartesian product of the two dataframes exceeds the maximum allowed size
|
|
214
|
-
(1 trillion with polars-sim, 100 million without).
|
|
215
|
-
"""
|
|
216
|
-
(left_fuzzy_frame,
|
|
217
|
-
right_fuzzy_frame,
|
|
218
|
-
left_col_name,
|
|
219
|
-
right_col_name,
|
|
220
|
-
len_left_df,
|
|
221
|
-
len_right_df) = process_fuzzy_frames(left_df=left_df, right_df=right_df, left_col_name=left_col_name,
|
|
222
|
-
right_col_name=right_col_name, temp_dir_ref=temp_dir_ref)
|
|
223
|
-
cartesian_size = len_left_df * len_right_df
|
|
224
|
-
max_size = 100_000_000_000_000 if HAS_POLARS_SIM else 10_000_000
|
|
225
|
-
if cartesian_size > max_size:
|
|
226
|
-
flowfile_logger.error(f'The cartesian product of the two dataframes is too large to process: {cartesian_size}')
|
|
227
|
-
raise Exception('The cartesian product of the two dataframes is too large to process.')
|
|
228
|
-
if cartesian_size > 100_000_000:
|
|
229
|
-
flowfile_logger.info('Performing approximate fuzzy match for large dataframes to reduce memory usage.')
|
|
230
|
-
cross_join_frame = cross_join_large_files(left_fuzzy_frame, right_fuzzy_frame, left_col_name=left_col_name,
|
|
231
|
-
right_col_name=right_col_name, flowfile_logger=flowfile_logger)
|
|
232
|
-
else:
|
|
233
|
-
cross_join_frame = cross_join_small_files(left_fuzzy_frame, right_fuzzy_frame)
|
|
234
|
-
return cross_join_frame
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
def unique_df_large(_df: pl.DataFrame | pl.LazyFrame, cols: Optional[List[str]] = None) -> pl.DataFrame:
|
|
238
|
-
"""
|
|
239
|
-
Efficiently compute unique rows in large dataframes by partitioning.
|
|
240
|
-
|
|
241
|
-
This function processes large dataframes by first partitioning them by a selected column,
|
|
242
|
-
then finding unique combinations within each partition before recombining the results.
|
|
243
|
-
This approach is more memory-efficient for large datasets than calling .unique() directly.
|
|
244
|
-
|
|
245
|
-
Parameters:
|
|
246
|
-
-----------
|
|
247
|
-
_df : pl.DataFrame | pl.LazyFrame
|
|
248
|
-
The input dataframe to process. Can be either a Polars DataFrame or LazyFrame.
|
|
249
|
-
cols : Optional[List[str]]
|
|
250
|
-
The list of columns to consider when finding unique rows. If None, all columns
|
|
251
|
-
are used. The first column in this list is used as the partition column.
|
|
252
|
-
|
|
253
|
-
Returns:
|
|
254
|
-
--------
|
|
255
|
-
pl.DataFrame
|
|
256
|
-
A dataframe containing only the unique rows from the input dataframe,
|
|
257
|
-
based on the specified columns.
|
|
258
|
-
|
|
259
|
-
Notes:
|
|
260
|
-
------
|
|
261
|
-
The function performs these operations:
|
|
262
|
-
1. Converts LazyFrame to DataFrame if necessary
|
|
263
|
-
2. Partitions the dataframe by the first column in cols (or the first column of the dataframe if cols is None)
|
|
264
|
-
3. Applies the unique operation to each partition based on the remaining columns
|
|
265
|
-
4. Concatenates the results back into a single dataframe
|
|
266
|
-
5. Frees memory by deleting intermediate objects
|
|
267
|
-
|
|
268
|
-
This implementation uses tqdm to provide a progress bar during processing,
|
|
269
|
-
which is particularly helpful for large datasets where the operation may take time.
|
|
270
|
-
"""
|
|
271
|
-
if isinstance(_df, pl.LazyFrame):
|
|
272
|
-
_df = collect_lazy_frame(_df)
|
|
273
|
-
from tqdm import tqdm
|
|
274
|
-
partition_col = cols[0] if cols is not None else _df.columns[0]
|
|
275
|
-
other_cols = cols[1:] if cols is not None else _df.columns[1:]
|
|
276
|
-
partitioned_df = _df.partition_by(partition_col)
|
|
277
|
-
df = pl.concat([partition.unique(other_cols) for partition in tqdm(partitioned_df)])
|
|
278
|
-
del partitioned_df, _df
|
|
279
|
-
return df
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
def combine_matches(matching_dfs: List[pl.LazyFrame]):
|
|
283
|
-
all_matching_indexes = matching_dfs[-1].select('__left_index', '__right_index')
|
|
284
|
-
for matching_df in matching_dfs:
|
|
285
|
-
all_matching_indexes = all_matching_indexes.join(matching_df, on=['__left_index', '__right_index'])
|
|
286
|
-
return all_matching_indexes
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
def add_index_column(df: pl.LazyFrame, column_name: str, tempdir: str):
|
|
290
|
-
return cache_polars_frame_to_temp(df.with_row_index(name=column_name), tempdir)
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
def process_fuzzy_mapping(
|
|
294
|
-
fuzzy_map: FuzzyMapping,
|
|
295
|
-
left_df: pl.LazyFrame,
|
|
296
|
-
right_df: pl.LazyFrame,
|
|
297
|
-
existing_matches: Optional[pl.LazyFrame],
|
|
298
|
-
local_temp_dir_ref: str,
|
|
299
|
-
i: int,
|
|
300
|
-
flowfile_logger: Logger,
|
|
301
|
-
existing_number_of_matches: Optional[int] = None
|
|
302
|
-
) -> Tuple[pl.LazyFrame, int]:
|
|
303
|
-
"""
|
|
304
|
-
Process a single fuzzy mapping to generate matching dataframes.
|
|
305
|
-
|
|
306
|
-
Args:
|
|
307
|
-
fuzzy_map: The fuzzy mapping configuration containing match columns and thresholds
|
|
308
|
-
left_df: Left dataframe with index column
|
|
309
|
-
right_df: Right dataframe with index column
|
|
310
|
-
existing_matches: Previously computed matches (or None)
|
|
311
|
-
local_temp_dir_ref: Temporary directory reference for caching interim results
|
|
312
|
-
i: Index of the current fuzzy mapping
|
|
313
|
-
flowfile_logger: Logger instance for progress tracking
|
|
314
|
-
existing_number_of_matches: Number of existing matches (if available)
|
|
315
|
-
|
|
316
|
-
Returns:
|
|
317
|
-
Tuple[pl.LazyFrame, int]: The final matching dataframe and the number of matches
|
|
318
|
-
"""
|
|
319
|
-
# Determine join strategy based on existing matches
|
|
320
|
-
if existing_matches is not None:
|
|
321
|
-
existing_matches = existing_matches.select('__left_index', '__right_index')
|
|
322
|
-
flowfile_logger.info(f'Filtering existing fuzzy matches for {fuzzy_map.left_col} and {fuzzy_map.right_col}')
|
|
323
|
-
cross_join_frame = cross_join_filter_existing_fuzzy_results(
|
|
324
|
-
left_df=left_df,
|
|
325
|
-
right_df=right_df,
|
|
326
|
-
existing_matches=existing_matches,
|
|
327
|
-
left_col_name=fuzzy_map.left_col,
|
|
328
|
-
right_col_name=fuzzy_map.right_col
|
|
329
|
-
)
|
|
330
|
-
else:
|
|
331
|
-
flowfile_logger.info(f'Performing fuzzy match for {fuzzy_map.left_col} and {fuzzy_map.right_col}')
|
|
332
|
-
cross_join_frame = cross_join_no_existing_fuzzy_results(
|
|
333
|
-
left_df=left_df,
|
|
334
|
-
right_df=right_df,
|
|
335
|
-
left_col_name=fuzzy_map.left_col,
|
|
336
|
-
right_col_name=fuzzy_map.right_col,
|
|
337
|
-
temp_dir_ref=local_temp_dir_ref,
|
|
338
|
-
flowfile_logger=flowfile_logger
|
|
339
|
-
)
|
|
340
|
-
|
|
341
|
-
# Calculate fuzzy match scores
|
|
342
|
-
flowfile_logger.info(f'Calculating fuzzy match for {fuzzy_map.left_col} and {fuzzy_map.right_col}')
|
|
343
|
-
matching_df = calculate_and_parse_fuzzy(
|
|
344
|
-
mapping_table=cross_join_frame,
|
|
345
|
-
left_col_name=fuzzy_map.left_col,
|
|
346
|
-
right_col_name=fuzzy_map.right_col,
|
|
347
|
-
fuzzy_method=fuzzy_map.fuzzy_type,
|
|
348
|
-
th_score=fuzzy_map.reversed_threshold_score
|
|
349
|
-
)
|
|
350
|
-
if existing_matches is not None:
|
|
351
|
-
matching_df = matching_df.join(existing_matches, on=['__left_index', '__right_index'])
|
|
352
|
-
matching_df = cache_polars_frame_to_temp(matching_df, local_temp_dir_ref)
|
|
353
|
-
if existing_number_of_matches is None or existing_number_of_matches > 100_000_000:
|
|
354
|
-
existing_number_of_matches = matching_df.select(pl.len()).collect()[0, 0]
|
|
355
|
-
if existing_number_of_matches > 100_000_000:
|
|
356
|
-
return unique_df_large(matching_df.rename({'s': f'fuzzy_score_{i}'})).lazy(), existing_number_of_matches
|
|
357
|
-
else:
|
|
358
|
-
return matching_df.rename({'s': f'fuzzy_score_{i}'}).unique(), existing_number_of_matches
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
def perform_all_fuzzy_matches(left_df: pl.LazyFrame,
|
|
362
|
-
right_df: pl.LazyFrame,
|
|
363
|
-
fuzzy_maps: List[FuzzyMapping],
|
|
364
|
-
flowfile_logger: Logger,
|
|
365
|
-
local_temp_dir_ref: str,
|
|
366
|
-
) -> List[pl.LazyFrame]:
|
|
367
|
-
matching_dfs = []
|
|
368
|
-
existing_matches = None
|
|
369
|
-
existing_number_of_matches = None
|
|
370
|
-
for i, fuzzy_map in enumerate(fuzzy_maps):
|
|
371
|
-
existing_matches, existing_number_of_matches = process_fuzzy_mapping(
|
|
372
|
-
fuzzy_map=fuzzy_map,
|
|
373
|
-
left_df=left_df,
|
|
374
|
-
right_df=right_df,
|
|
375
|
-
existing_matches=existing_matches,
|
|
376
|
-
local_temp_dir_ref=local_temp_dir_ref,
|
|
377
|
-
i=i,
|
|
378
|
-
flowfile_logger=flowfile_logger,
|
|
379
|
-
existing_number_of_matches=existing_number_of_matches
|
|
380
|
-
)
|
|
381
|
-
matching_dfs.append(existing_matches)
|
|
382
|
-
return matching_dfs
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
def fuzzy_match_dfs(
|
|
386
|
-
left_df: pl.LazyFrame,
|
|
387
|
-
right_df: pl.LazyFrame,
|
|
388
|
-
fuzzy_maps: List[FuzzyMapping],
|
|
389
|
-
flowfile_logger: Logger
|
|
390
|
-
) -> pl.DataFrame:
|
|
391
|
-
"""
|
|
392
|
-
Perform fuzzy matching between two dataframes using multiple fuzzy mapping configurations.
|
|
393
|
-
|
|
394
|
-
Args:
|
|
395
|
-
left_df: Left dataframe to be matched
|
|
396
|
-
right_df: Right dataframe to be matched
|
|
397
|
-
fuzzy_maps: List of fuzzy mapping configurations
|
|
398
|
-
flowfile_logger: Logger instance for tracking progress
|
|
399
|
-
|
|
400
|
-
Returns:
|
|
401
|
-
pl.DataFrame: The final matched dataframe with all fuzzy scores
|
|
402
|
-
"""
|
|
403
|
-
left_df, right_df, fuzzy_maps = pre_process_for_fuzzy_matching(left_df, right_df, fuzzy_maps, flowfile_logger)
|
|
404
|
-
|
|
405
|
-
# Create a temporary directory for caching intermediate results
|
|
406
|
-
local_temp_dir = tempfile.TemporaryDirectory()
|
|
407
|
-
local_temp_dir_ref = local_temp_dir.name
|
|
408
|
-
|
|
409
|
-
# Add index columns to both dataframes
|
|
410
|
-
left_df = add_index_column(left_df, '__left_index', local_temp_dir_ref)
|
|
411
|
-
right_df = add_index_column(right_df, '__right_index', local_temp_dir_ref)
|
|
412
|
-
|
|
413
|
-
matching_dfs = perform_all_fuzzy_matches(left_df, right_df, fuzzy_maps, flowfile_logger, local_temp_dir_ref)
|
|
414
|
-
|
|
415
|
-
# Combine all matches
|
|
416
|
-
if len(matching_dfs) > 1:
|
|
417
|
-
flowfile_logger.info('Combining fuzzy matches')
|
|
418
|
-
all_matches_df = combine_matches(matching_dfs)
|
|
419
|
-
else:
|
|
420
|
-
flowfile_logger.info('Caching fuzzy matches')
|
|
421
|
-
all_matches_df = cache_polars_frame_to_temp(matching_dfs[0], local_temp_dir_ref)
|
|
422
|
-
|
|
423
|
-
# Join matches with original dataframes
|
|
424
|
-
flowfile_logger.info('Joining fuzzy matches with original dataframes')
|
|
425
|
-
output_df = collect_lazy_frame(
|
|
426
|
-
(left_df.join(all_matches_df, on='__left_index')
|
|
427
|
-
.join(right_df, on='__right_index')
|
|
428
|
-
.drop('__right_index', '__left_index'))
|
|
429
|
-
)
|
|
430
|
-
|
|
431
|
-
# Clean up temporary files
|
|
432
|
-
flowfile_logger.info('Cleaning up temporary files')
|
|
433
|
-
local_temp_dir.cleanup()
|
|
434
|
-
|
|
435
|
-
return output_df
|
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
2
|
-
from typing import Optional, Literal
|
|
3
|
-
|
|
4
|
-
FuzzyTypeLiteral = Literal['levenshtein','jaro', 'jaro_winkler', 'hamming', 'damerau_levenshtein', 'indel']
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
@dataclass
|
|
8
|
-
class JoinMap:
|
|
9
|
-
left_col: str
|
|
10
|
-
right_col: str
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@dataclass
|
|
14
|
-
class FuzzyMapping(JoinMap):
|
|
15
|
-
threshold_score: float = 80.0
|
|
16
|
-
fuzzy_type: FuzzyTypeLiteral = 'levenshtein'
|
|
17
|
-
perc_unique: float = 0.0
|
|
18
|
-
output_column_name: Optional[str] = None
|
|
19
|
-
valid: bool = True
|
|
20
|
-
|
|
21
|
-
def __init__(self, left_col: str, right_col: str = None, threshold_score: float = 80.0,
|
|
22
|
-
fuzzy_type: FuzzyTypeLiteral = 'levenshtein', perc_unique: float = 0, output_column_name: str = None,
|
|
23
|
-
valid: bool = True):
|
|
24
|
-
if right_col is None:
|
|
25
|
-
right_col = left_col
|
|
26
|
-
self.valid = valid
|
|
27
|
-
self.left_col = left_col
|
|
28
|
-
self.right_col = right_col
|
|
29
|
-
self.threshold_score = threshold_score
|
|
30
|
-
self.fuzzy_type = fuzzy_type
|
|
31
|
-
self.perc_unique = perc_unique
|
|
32
|
-
self.output_col_name = output_column_name if output_column_name is not None else f'fuzzy_score_{left_col}_{right_col}'
|
|
33
|
-
|
|
34
|
-
@property
|
|
35
|
-
def reversed_threshold_score(self) -> float:
|
|
36
|
-
return ((int(self.threshold_score) - 100) * -1) / 100
|
|
@@ -1,213 +0,0 @@
|
|
|
1
|
-
from logging import Logger
|
|
2
|
-
from typing import List, Dict, Tuple
|
|
3
|
-
|
|
4
|
-
import polars as pl
|
|
5
|
-
|
|
6
|
-
from flowfile_worker.polars_fuzzy_match.models import FuzzyMapping
|
|
7
|
-
from flowfile_worker.utils import collect_lazy_frame
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def get_approx_uniqueness(lf: pl.LazyFrame) -> Dict[str, int]:
|
|
11
|
-
"""
|
|
12
|
-
Calculate the approximate number of unique values for each column in a LazyFrame.
|
|
13
|
-
|
|
14
|
-
Args:
|
|
15
|
-
lf (pl.LazyFrame): Input LazyFrame to analyze.
|
|
16
|
-
|
|
17
|
-
Returns:
|
|
18
|
-
Dict[str, int]: Dictionary mapping column names to their approximate unique value counts.
|
|
19
|
-
|
|
20
|
-
Raises:
|
|
21
|
-
Exception: If the uniqueness calculation fails (empty result).
|
|
22
|
-
"""
|
|
23
|
-
uniqueness = lf.select(pl.all().approx_n_unique()).collect().to_dicts()
|
|
24
|
-
if len(uniqueness) == 0:
|
|
25
|
-
raise Exception('Approximate uniqueness calculation failed')
|
|
26
|
-
return uniqueness[0]
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def calculate_uniqueness(a: float, b: float) -> float:
|
|
30
|
-
"""
|
|
31
|
-
Calculate a combined uniqueness score from two individual uniqueness ratios.
|
|
32
|
-
|
|
33
|
-
The formula prioritizes columns with high combined uniqueness while accounting for
|
|
34
|
-
differences between the two input values.
|
|
35
|
-
|
|
36
|
-
Args:
|
|
37
|
-
a (float): First uniqueness ratio, typically from the left dataframe.
|
|
38
|
-
b (float): Second uniqueness ratio, typically from the right dataframe.
|
|
39
|
-
|
|
40
|
-
Returns:
|
|
41
|
-
float: Combined uniqueness score.
|
|
42
|
-
"""
|
|
43
|
-
return ((pow(a + 0.5, 2) + pow(b + 0.5, 2)) / 2 - pow(0.5, 2)) + 0.5 * abs(a - b)
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def calculate_df_len(df: pl.LazyFrame) -> int:
|
|
47
|
-
"""
|
|
48
|
-
Calculate the number of rows in a LazyFrame.
|
|
49
|
-
|
|
50
|
-
Args:
|
|
51
|
-
df (pl.LazyFrame): Input LazyFrame.
|
|
52
|
-
|
|
53
|
-
Returns:
|
|
54
|
-
int: Number of rows in the LazyFrame.
|
|
55
|
-
"""
|
|
56
|
-
return collect_lazy_frame(df.select(pl.len()))[0, 0]
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
def fill_perc_unique_in_fuzzy_maps(left_df: pl.LazyFrame, right_df: pl.LazyFrame, fuzzy_maps: List[FuzzyMapping],
|
|
60
|
-
flowfile_logger: Logger, left_len: int, right_len: int) -> List[FuzzyMapping]:
|
|
61
|
-
"""
|
|
62
|
-
Calculate and set uniqueness percentages for all fuzzy mapping columns.
|
|
63
|
-
|
|
64
|
-
Computes the approximate unique value counts in both dataframes for the columns
|
|
65
|
-
specified in fuzzy_maps, then calculates a combined uniqueness score for each mapping.
|
|
66
|
-
|
|
67
|
-
Args:
|
|
68
|
-
left_df (pl.LazyFrame): Left dataframe.
|
|
69
|
-
right_df (pl.LazyFrame): Right dataframe.
|
|
70
|
-
fuzzy_maps (List[FuzzyMapping]): List of fuzzy mappings between left and right columns.
|
|
71
|
-
flowfile_logger (Logger): Logger for information output.
|
|
72
|
-
left_len (int): Number of rows in the left dataframe.
|
|
73
|
-
right_len (int): Number of rows in the right dataframe.
|
|
74
|
-
|
|
75
|
-
Returns:
|
|
76
|
-
List[FuzzyMapping]: Updated fuzzy mappings with calculated uniqueness percentages.
|
|
77
|
-
"""
|
|
78
|
-
left_unique_values = get_approx_uniqueness(left_df.select(fuzzy_map.left_col for fuzzy_map in fuzzy_maps))
|
|
79
|
-
right_unique_values = get_approx_uniqueness(right_df.select(fuzzy_map.right_col for fuzzy_map in fuzzy_maps))
|
|
80
|
-
flowfile_logger.info(f'Left unique values: {left_unique_values}')
|
|
81
|
-
flowfile_logger.info(f'Right unique values: {right_unique_values}')
|
|
82
|
-
for fuzzy_map in fuzzy_maps:
|
|
83
|
-
fuzzy_map.perc_unique = calculate_uniqueness(left_unique_values[fuzzy_map.left_col] / left_len,
|
|
84
|
-
right_unique_values[fuzzy_map.right_col] / right_len)
|
|
85
|
-
return fuzzy_maps
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def determine_order_of_fuzzy_maps(fuzzy_maps: List[FuzzyMapping]) -> List[FuzzyMapping]:
|
|
89
|
-
"""
|
|
90
|
-
Sort fuzzy mappings by their uniqueness percentages in descending order.
|
|
91
|
-
|
|
92
|
-
This ensures that columns with higher uniqueness are prioritized in the
|
|
93
|
-
fuzzy matching process.
|
|
94
|
-
|
|
95
|
-
Args:
|
|
96
|
-
fuzzy_maps (List[FuzzyMapping]): List of fuzzy mappings between columns.
|
|
97
|
-
|
|
98
|
-
Returns:
|
|
99
|
-
List[FuzzyMapping]: Sorted list of fuzzy mappings by uniqueness (highest first).
|
|
100
|
-
"""
|
|
101
|
-
return sorted(fuzzy_maps, key=lambda x: x.perc_unique, reverse=True)
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
def calculate_uniqueness_rate(fuzzy_maps: List[FuzzyMapping]) -> float:
|
|
105
|
-
"""
|
|
106
|
-
Calculate the total uniqueness rate across all fuzzy mappings.
|
|
107
|
-
|
|
108
|
-
Args:
|
|
109
|
-
fuzzy_maps (List[FuzzyMapping]): List of fuzzy mappings with calculated uniqueness.
|
|
110
|
-
|
|
111
|
-
Returns:
|
|
112
|
-
float: Sum of uniqueness percentages across all mappings.
|
|
113
|
-
"""
|
|
114
|
-
return sum(jm.perc_unique for jm in fuzzy_maps)
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
def determine_need_for_aggregation(uniqueness_rate: float, cartesian_join_number: int) -> bool:
|
|
118
|
-
"""
|
|
119
|
-
Determine if aggregation is needed based on uniqueness and potential join size.
|
|
120
|
-
|
|
121
|
-
Aggregation helps prevent explosive cartesian joins when matching columns
|
|
122
|
-
have low uniqueness, which could lead to performance issues.
|
|
123
|
-
|
|
124
|
-
Args:
|
|
125
|
-
uniqueness_rate (float): Total uniqueness rate across fuzzy mappings.
|
|
126
|
-
cartesian_join_number (int): Potential size of the cartesian join (left_len * right_len).
|
|
127
|
-
|
|
128
|
-
Returns:
|
|
129
|
-
bool: True if aggregation is needed, False otherwise.
|
|
130
|
-
"""
|
|
131
|
-
return uniqueness_rate < 1.2 and cartesian_join_number > 1_000_000
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
def aggregate_output(left_df: pl.LazyFrame, right_df: pl.LazyFrame,
|
|
135
|
-
fuzzy_maps: List[FuzzyMapping]) -> Tuple[pl.LazyFrame, pl.LazyFrame]:
|
|
136
|
-
"""
|
|
137
|
-
Deduplicate the dataframes based on the fuzzy mapping columns.
|
|
138
|
-
|
|
139
|
-
This reduces the size of the join by removing duplicate rows when the
|
|
140
|
-
uniqueness rate is low and the potential join size is large.
|
|
141
|
-
|
|
142
|
-
Args:
|
|
143
|
-
left_df (pl.LazyFrame): Left dataframe.
|
|
144
|
-
right_df (pl.LazyFrame): Right dataframe.
|
|
145
|
-
fuzzy_maps (List[FuzzyMapping]): List of fuzzy mappings between columns.
|
|
146
|
-
|
|
147
|
-
Returns:
|
|
148
|
-
Tuple[pl.LazyFrame, pl.LazyFrame]: Deduplicated left and right dataframes.
|
|
149
|
-
"""
|
|
150
|
-
left_df = left_df.unique([fuzzy_map.left_col for fuzzy_map in fuzzy_maps])
|
|
151
|
-
right_df = right_df.unique([fuzzy_map.right_col for fuzzy_map in fuzzy_maps])
|
|
152
|
-
return left_df, right_df
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
def report_on_order_of_fuzzy_maps(fuzzy_maps: List[FuzzyMapping], flowfile_logger: Logger) -> None:
|
|
156
|
-
"""
|
|
157
|
-
Log the order of fuzzy mappings based on uniqueness.
|
|
158
|
-
Parameters
|
|
159
|
-
----------
|
|
160
|
-
fuzzy_maps: List[FuzzyMapping]
|
|
161
|
-
flowfile_logger: Logger
|
|
162
|
-
|
|
163
|
-
-------
|
|
164
|
-
"""
|
|
165
|
-
flowfile_logger.info('Fuzzy mappings sorted by uniqueness')
|
|
166
|
-
for i, fuzzy_map in enumerate(fuzzy_maps):
|
|
167
|
-
flowfile_logger.info(f'{i}. Fuzzy mapping: {fuzzy_map.left_col} -> {fuzzy_map.right_col} '
|
|
168
|
-
f'Uniqueness: {fuzzy_map.perc_unique}')
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
def pre_process_for_fuzzy_matching(left_df: pl.LazyFrame, right_df: pl.LazyFrame,
|
|
172
|
-
fuzzy_maps: List[FuzzyMapping],
|
|
173
|
-
flowfile_logger: Logger) -> Tuple[pl.LazyFrame, pl.LazyFrame, List[FuzzyMapping]]:
|
|
174
|
-
"""
|
|
175
|
-
Preprocess dataframes and fuzzy mappings for optimal fuzzy matching.
|
|
176
|
-
|
|
177
|
-
This function:
|
|
178
|
-
1. Calculates dataframe sizes
|
|
179
|
-
2. Calculates uniqueness percentages for each fuzzy mapping
|
|
180
|
-
3. Sorts the fuzzy mappings by uniqueness
|
|
181
|
-
4. Determines if aggregation is needed to prevent large cartesian joins
|
|
182
|
-
5. Performs aggregation if necessary
|
|
183
|
-
|
|
184
|
-
Args:
|
|
185
|
-
left_df (pl.LazyFrame): Left dataframe.
|
|
186
|
-
right_df (pl.LazyFrame): Right dataframe.
|
|
187
|
-
fuzzy_maps (List[FuzzyMapping]): List of fuzzy mappings between columns.
|
|
188
|
-
flowfile_logger (Logger): Logger for information output.
|
|
189
|
-
|
|
190
|
-
Returns:
|
|
191
|
-
Tuple[pl.LazyFrame, pl.LazyFrame, List[FuzzyMapping]]:
|
|
192
|
-
- Potentially modified left dataframe
|
|
193
|
-
- Potentially modified right dataframe
|
|
194
|
-
- Sorted and updated fuzzy mappings
|
|
195
|
-
"""
|
|
196
|
-
flowfile_logger.info('Optimizing data and settings for fuzzy matching')
|
|
197
|
-
left_df_len = calculate_df_len(left_df)
|
|
198
|
-
right_df_len = calculate_df_len(right_df)
|
|
199
|
-
if left_df_len == 0 or right_df_len == 0:
|
|
200
|
-
return left_df, right_df, fuzzy_maps
|
|
201
|
-
fuzzy_maps = fill_perc_unique_in_fuzzy_maps(left_df, right_df, fuzzy_maps, flowfile_logger, left_df_len,
|
|
202
|
-
right_df_len)
|
|
203
|
-
fuzzy_maps = determine_order_of_fuzzy_maps(fuzzy_maps)
|
|
204
|
-
report_on_order_of_fuzzy_maps(fuzzy_maps, flowfile_logger)
|
|
205
|
-
|
|
206
|
-
uniqueness_rate = calculate_uniqueness_rate(fuzzy_maps)
|
|
207
|
-
flowfile_logger.info(f'Uniqueness rate: {uniqueness_rate}')
|
|
208
|
-
if determine_need_for_aggregation(uniqueness_rate, left_df_len * right_df_len):
|
|
209
|
-
flowfile_logger.warning('The join fields are not unique enough, resulting in many duplicates, '
|
|
210
|
-
'therefore removing duplicates on the join field')
|
|
211
|
-
left_df, right_df = aggregate_output(left_df, right_df, fuzzy_maps)
|
|
212
|
-
flowfile_logger.info('Data and settings optimized for fuzzy matching')
|
|
213
|
-
return left_df, right_df, fuzzy_maps
|