maxframe 0.1.0b5__cp38-cp38-macosx_10_9_universal2.whl → 1.0.0rc2__cp38-cp38-macosx_10_9_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of maxframe might be problematic. Click here for more details.
- maxframe/_utils.cpython-38-darwin.so +0 -0
- maxframe/codegen.py +6 -2
- maxframe/config/config.py +38 -2
- maxframe/config/validators.py +1 -0
- maxframe/conftest.py +2 -0
- maxframe/core/__init__.py +0 -3
- maxframe/core/entity/__init__.py +1 -8
- maxframe/core/entity/objects.py +3 -45
- maxframe/core/graph/core.cpython-38-darwin.so +0 -0
- maxframe/core/graph/core.pyx +4 -4
- maxframe/dataframe/__init__.py +1 -1
- maxframe/dataframe/arithmetic/around.py +5 -17
- maxframe/dataframe/arithmetic/core.py +15 -7
- maxframe/dataframe/arithmetic/docstring.py +5 -55
- maxframe/dataframe/arithmetic/tests/test_arithmetic.py +22 -0
- maxframe/dataframe/core.py +5 -5
- maxframe/dataframe/datasource/date_range.py +2 -2
- maxframe/dataframe/datasource/read_odps_query.py +6 -0
- maxframe/dataframe/datasource/read_odps_table.py +2 -1
- maxframe/dataframe/datasource/tests/test_datasource.py +14 -0
- maxframe/dataframe/datastore/tests/__init__.py +13 -0
- maxframe/dataframe/datastore/tests/test_to_odps.py +48 -0
- maxframe/dataframe/datastore/to_odps.py +21 -0
- maxframe/dataframe/groupby/cum.py +0 -1
- maxframe/dataframe/groupby/tests/test_groupby.py +4 -0
- maxframe/dataframe/indexing/add_prefix_suffix.py +1 -1
- maxframe/dataframe/indexing/align.py +1 -1
- maxframe/dataframe/indexing/rename.py +3 -37
- maxframe/dataframe/indexing/sample.py +0 -1
- maxframe/dataframe/indexing/set_index.py +68 -1
- maxframe/dataframe/merge/merge.py +236 -2
- maxframe/dataframe/merge/tests/test_merge.py +123 -0
- maxframe/dataframe/misc/apply.py +5 -10
- maxframe/dataframe/misc/case_when.py +1 -1
- maxframe/dataframe/misc/describe.py +2 -2
- maxframe/dataframe/misc/drop_duplicates.py +4 -25
- maxframe/dataframe/misc/eval.py +4 -0
- maxframe/dataframe/misc/memory_usage.py +2 -2
- maxframe/dataframe/misc/pct_change.py +1 -83
- maxframe/dataframe/misc/tests/test_misc.py +23 -0
- maxframe/dataframe/misc/transform.py +1 -30
- maxframe/dataframe/misc/value_counts.py +4 -17
- maxframe/dataframe/missing/dropna.py +1 -1
- maxframe/dataframe/missing/fillna.py +5 -5
- maxframe/dataframe/sort/sort_values.py +1 -11
- maxframe/dataframe/statistics/corr.py +3 -3
- maxframe/dataframe/statistics/quantile.py +5 -17
- maxframe/dataframe/utils.py +4 -7
- maxframe/errors.py +13 -0
- maxframe/extension.py +12 -0
- maxframe/learn/contrib/xgboost/dmatrix.py +2 -2
- maxframe/learn/contrib/xgboost/predict.py +2 -2
- maxframe/learn/contrib/xgboost/train.py +2 -2
- maxframe/lib/mmh3.cpython-38-darwin.so +0 -0
- maxframe/lib/mmh3.pyi +43 -0
- maxframe/lib/wrapped_pickle.py +2 -1
- maxframe/odpsio/__init__.py +1 -1
- maxframe/odpsio/arrow.py +8 -4
- maxframe/odpsio/schema.py +10 -7
- maxframe/odpsio/tableio.py +388 -14
- maxframe/odpsio/tests/test_schema.py +16 -15
- maxframe/odpsio/tests/test_tableio.py +48 -21
- maxframe/protocol.py +148 -12
- maxframe/serialization/core.cpython-38-darwin.so +0 -0
- maxframe/serialization/core.pxd +3 -0
- maxframe/serialization/core.pyi +3 -0
- maxframe/serialization/core.pyx +54 -25
- maxframe/serialization/exception.py +1 -1
- maxframe/serialization/pandas.py +7 -2
- maxframe/serialization/serializables/core.py +158 -12
- maxframe/serialization/serializables/tests/test_serializable.py +46 -4
- maxframe/tensor/__init__.py +59 -0
- maxframe/tensor/arithmetic/tests/test_arithmetic.py +1 -1
- maxframe/tensor/base/atleast_1d.py +1 -1
- maxframe/tensor/base/unique.py +3 -3
- maxframe/tensor/reduction/count_nonzero.py +1 -1
- maxframe/tensor/statistics/quantile.py +2 -2
- maxframe/tests/test_protocol.py +34 -0
- maxframe/tests/test_utils.py +0 -12
- maxframe/tests/utils.py +11 -2
- maxframe/utils.py +24 -13
- {maxframe-0.1.0b5.dist-info → maxframe-1.0.0rc2.dist-info}/METADATA +75 -2
- {maxframe-0.1.0b5.dist-info → maxframe-1.0.0rc2.dist-info}/RECORD +91 -89
- {maxframe-0.1.0b5.dist-info → maxframe-1.0.0rc2.dist-info}/WHEEL +1 -1
- maxframe_client/__init__.py +0 -1
- maxframe_client/fetcher.py +38 -27
- maxframe_client/session/odps.py +50 -10
- maxframe_client/session/task.py +41 -20
- maxframe_client/tests/test_fetcher.py +21 -3
- maxframe_client/tests/test_session.py +49 -2
- maxframe_client/clients/spe.py +0 -104
- {maxframe-0.1.0b5.dist-info → maxframe-1.0.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# Copyright 1999-2024 Alibaba Group Holding Ltd.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# Copyright 1999-2024 Alibaba Group Holding Ltd.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import pytest
|
|
16
|
+
|
|
17
|
+
from ... import DataFrame
|
|
18
|
+
from ..to_odps import to_odps_table
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@pytest.fixture
|
|
22
|
+
def df():
|
|
23
|
+
return DataFrame({"A": [1, 2], "B": [3, 4]})
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@pytest.mark.parametrize(
|
|
27
|
+
"kwargs",
|
|
28
|
+
[
|
|
29
|
+
{"partition_col": ["A", "C"]},
|
|
30
|
+
{"partition_col": "C"},
|
|
31
|
+
{"partition": "a=1,C=2"},
|
|
32
|
+
],
|
|
33
|
+
)
|
|
34
|
+
def test_to_odps_table_validation(df, kwargs):
|
|
35
|
+
with pytest.raises(ValueError):
|
|
36
|
+
to_odps_table(df, "test_table", **kwargs)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@pytest.mark.parametrize(
|
|
40
|
+
"kwargs",
|
|
41
|
+
[
|
|
42
|
+
{"partition_col": ["a", "B"]},
|
|
43
|
+
{"partition_col": "a"},
|
|
44
|
+
{"partition": "C=1,d=2"},
|
|
45
|
+
],
|
|
46
|
+
)
|
|
47
|
+
def test_to_odps_table_vaild(df, kwargs):
|
|
48
|
+
to_odps_table(df, "test_table", **kwargs)
|
|
@@ -18,10 +18,12 @@ import logging
|
|
|
18
18
|
from typing import List, Optional, Union
|
|
19
19
|
|
|
20
20
|
from odps.models import Table as ODPSTable
|
|
21
|
+
from odps.types import PartitionSpec
|
|
21
22
|
|
|
22
23
|
from ... import opcodes
|
|
23
24
|
from ...config import options
|
|
24
25
|
from ...core import OutputType
|
|
26
|
+
from ...odpsio import build_dataframe_table_meta
|
|
25
27
|
from ...serialization.serializables import (
|
|
26
28
|
BoolField,
|
|
27
29
|
FieldTypes,
|
|
@@ -147,6 +149,25 @@ def to_odps_table(
|
|
|
147
149
|
f"index_label needs {len(df.index.nlevels)} labels "
|
|
148
150
|
f"but it only have {len(index_label)}"
|
|
149
151
|
)
|
|
152
|
+
table_cols = set(build_dataframe_table_meta(df).table_column_names)
|
|
153
|
+
if partition:
|
|
154
|
+
partition_intersect = (
|
|
155
|
+
set(x.lower() for x in PartitionSpec(partition).keys()) & table_cols
|
|
156
|
+
)
|
|
157
|
+
if partition_intersect:
|
|
158
|
+
raise ValueError(
|
|
159
|
+
f"Data column(s) {partition_intersect} in the dataframe"
|
|
160
|
+
" cannot be used in parameter 'partition'."
|
|
161
|
+
" Use 'partition_col' instead."
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
if partition_col:
|
|
165
|
+
partition_diff = set(x.lower() for x in partition_col) - table_cols
|
|
166
|
+
if partition_diff:
|
|
167
|
+
raise ValueError(
|
|
168
|
+
f"Partition column(s) {partition_diff}"
|
|
169
|
+
" is not the data column(s) of the input dataframe."
|
|
170
|
+
)
|
|
150
171
|
|
|
151
172
|
op = DataFrameToODPSTable(
|
|
152
173
|
dtypes=df.dtypes,
|
|
@@ -59,7 +59,6 @@ class GroupByCumReductionOperator(DataFrameOperatorMixin, DataFrameOperator):
|
|
|
59
59
|
out_dtypes = self._calc_out_dtypes(groupby)
|
|
60
60
|
|
|
61
61
|
kw = in_df.params.copy()
|
|
62
|
-
kw["index_value"] = parse_index(pd.RangeIndex(-1), groupby.key)
|
|
63
62
|
if self.output_types[0] == OutputType.dataframe:
|
|
64
63
|
kw.update(
|
|
65
64
|
dict(
|
|
@@ -282,14 +282,17 @@ def test_groupby_cum():
|
|
|
282
282
|
r = getattr(mdf.groupby("b"), fun)()
|
|
283
283
|
assert r.op.output_types[0] == OutputType.dataframe
|
|
284
284
|
assert r.shape == (len(df1), 2)
|
|
285
|
+
assert r.index_value.key == mdf.index_value.key
|
|
285
286
|
|
|
286
287
|
r = getattr(mdf.groupby("b"), fun)(axis=1)
|
|
287
288
|
assert r.op.output_types[0] == OutputType.dataframe
|
|
288
289
|
assert r.shape == (len(df1), 3)
|
|
290
|
+
assert r.index_value.key == mdf.index_value.key
|
|
289
291
|
|
|
290
292
|
r = mdf.groupby("b").cumcount()
|
|
291
293
|
assert r.op.output_types[0] == OutputType.series
|
|
292
294
|
assert r.shape == (len(df1),)
|
|
295
|
+
assert r.index_value.key == mdf.index_value.key
|
|
293
296
|
|
|
294
297
|
series1 = pd.Series([2, 2, 5, 7, 3, 7, 8, 8, 5, 6])
|
|
295
298
|
ms1 = md.Series(series1, chunk_size=3)
|
|
@@ -298,6 +301,7 @@ def test_groupby_cum():
|
|
|
298
301
|
r = getattr(ms1.groupby(lambda x: x % 2), fun)()
|
|
299
302
|
assert r.op.output_types[0] == OutputType.series
|
|
300
303
|
assert r.shape == (len(series1),)
|
|
304
|
+
assert r.index_value.key == ms1.index_value.key
|
|
301
305
|
|
|
302
306
|
|
|
303
307
|
def test_groupby_fill():
|
|
@@ -138,7 +138,7 @@ class DataFrameAlign(DataFrameOperator, DataFrameOperatorMixin):
|
|
|
138
138
|
series_index = rhs.index_value.to_pandas()
|
|
139
139
|
dtypes = lhs.dtypes.reindex(
|
|
140
140
|
lhs.dtypes.index.join(series_index, how=self.join)
|
|
141
|
-
).fillna(np.dtype(
|
|
141
|
+
).fillna(np.dtype(float))
|
|
142
142
|
l_shape[1] = r_size = len(dtypes)
|
|
143
143
|
col_val = r_idx_val = parse_index(dtypes.index, store_data=True)
|
|
144
144
|
|
|
@@ -17,7 +17,7 @@ import warnings
|
|
|
17
17
|
from ... import opcodes
|
|
18
18
|
from ...core import get_output_types
|
|
19
19
|
from ...serialization.serializables import AnyField, StringField
|
|
20
|
-
from ..core import SERIES_TYPE
|
|
20
|
+
from ..core import INDEX_TYPE, SERIES_TYPE
|
|
21
21
|
from ..operators import DataFrameOperator, DataFrameOperatorMixin
|
|
22
22
|
from ..utils import build_df, build_series, parse_index, validate_axis
|
|
23
23
|
|
|
@@ -73,6 +73,8 @@ class DataFrameRename(DataFrameOperator, DataFrameOperatorMixin):
|
|
|
73
73
|
params["index_value"] = parse_index(new_index)
|
|
74
74
|
if df.ndim == 1:
|
|
75
75
|
params["name"] = new_df.name
|
|
76
|
+
if isinstance(df, INDEX_TYPE):
|
|
77
|
+
params["names"] = new_df.names
|
|
76
78
|
return self.new_tileable([df], **params)
|
|
77
79
|
|
|
78
80
|
|
|
@@ -303,11 +305,6 @@ def series_rename(
|
|
|
303
305
|
1 2
|
|
304
306
|
2 3
|
|
305
307
|
Name: my_name, dtype: int64
|
|
306
|
-
>>> s.rename(lambda x: x ** 2).execute() # function, changes labels.execute()
|
|
307
|
-
0 1
|
|
308
|
-
1 2
|
|
309
|
-
4 3
|
|
310
|
-
dtype: int64
|
|
311
308
|
>>> s.rename({1: 3, 2: 5}).execute() # mapping, changes labels.execute()
|
|
312
309
|
0 1
|
|
313
310
|
3 2
|
|
@@ -410,37 +407,6 @@ def index_set_names(index, names, level=None, inplace=False):
|
|
|
410
407
|
See Also
|
|
411
408
|
--------
|
|
412
409
|
Index.rename : Able to set new names without level.
|
|
413
|
-
|
|
414
|
-
Examples
|
|
415
|
-
--------
|
|
416
|
-
>>> import maxframe.dataframe as md
|
|
417
|
-
>>> idx = md.Index([1, 2, 3, 4])
|
|
418
|
-
>>> idx.execute()
|
|
419
|
-
Int64Index([1, 2, 3, 4], dtype='int64')
|
|
420
|
-
>>> idx.set_names('quarter').execute()
|
|
421
|
-
Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')
|
|
422
|
-
|
|
423
|
-
>>> idx = md.MultiIndex.from_product([['python', 'cobra'],
|
|
424
|
-
... [2018, 2019]])
|
|
425
|
-
>>> idx.execute()
|
|
426
|
-
MultiIndex([('python', 2018),
|
|
427
|
-
('python', 2019),
|
|
428
|
-
( 'cobra', 2018),
|
|
429
|
-
( 'cobra', 2019)],
|
|
430
|
-
)
|
|
431
|
-
>>> idx.set_names(['kind', 'year'], inplace=True)
|
|
432
|
-
>>> idx.execute()
|
|
433
|
-
MultiIndex([('python', 2018),
|
|
434
|
-
('python', 2019),
|
|
435
|
-
( 'cobra', 2018),
|
|
436
|
-
( 'cobra', 2019)],
|
|
437
|
-
names=['kind', 'year'])
|
|
438
|
-
>>> idx.set_names('species', level=0).execute()
|
|
439
|
-
MultiIndex([('python', 2018),
|
|
440
|
-
('python', 2019),
|
|
441
|
-
( 'cobra', 2018),
|
|
442
|
-
( 'cobra', 2019)],
|
|
443
|
-
names=['species', 'year'])
|
|
444
410
|
"""
|
|
445
411
|
op = DataFrameRename(
|
|
446
412
|
index_mapper=names, level=level, output_types=get_output_types(index)
|
|
@@ -31,7 +31,7 @@ class DataFrameSetIndex(DataFrameOperator, DataFrameOperatorMixin):
|
|
|
31
31
|
super().__init__(_output_types=output_types, **kw)
|
|
32
32
|
|
|
33
33
|
def __call__(self, df):
|
|
34
|
-
new_df = build_empty_df(df.dtypes).set_index(
|
|
34
|
+
new_df = build_empty_df(df.dtypes, index=df.index_value.to_pandas()).set_index(
|
|
35
35
|
keys=self.keys,
|
|
36
36
|
drop=self.drop,
|
|
37
37
|
append=self.append,
|
|
@@ -47,6 +47,73 @@ class DataFrameSetIndex(DataFrameOperator, DataFrameOperatorMixin):
|
|
|
47
47
|
|
|
48
48
|
|
|
49
49
|
def set_index(df, keys, drop=True, append=False, inplace=False, verify_integrity=False):
|
|
50
|
+
# TODO add support for set index by series, index, mt.ndarray, etc.
|
|
51
|
+
"""
|
|
52
|
+
Set the DataFrame index using existing columns.
|
|
53
|
+
|
|
54
|
+
Set the DataFrame index (row labels) using one or more existing
|
|
55
|
+
columns. The index can replace the existing index or expand on it.
|
|
56
|
+
|
|
57
|
+
Parameters
|
|
58
|
+
----------
|
|
59
|
+
keys : label or array-like or list of labels
|
|
60
|
+
This parameter can be either a single column key, or a list containing column keys.
|
|
61
|
+
drop : bool, default True
|
|
62
|
+
Delete columns to be used as the new index.
|
|
63
|
+
append : bool, default False
|
|
64
|
+
Whether to append columns to existing index.
|
|
65
|
+
inplace : bool, default False
|
|
66
|
+
If True, modifies the DataFrame in place (do not create a new object).
|
|
67
|
+
verify_integrity : bool, default False
|
|
68
|
+
Check the new index for duplicates. Otherwise defer the check until
|
|
69
|
+
necessary. Setting to False will improve the performance of this
|
|
70
|
+
method.
|
|
71
|
+
|
|
72
|
+
Returns
|
|
73
|
+
-------
|
|
74
|
+
DataFrame or None
|
|
75
|
+
Changed row labels or None if ``inplace=True``.
|
|
76
|
+
|
|
77
|
+
See Also
|
|
78
|
+
--------
|
|
79
|
+
DataFrame.reset_index : Opposite of set_index.
|
|
80
|
+
DataFrame.reindex : Change to new indices or expand indices.
|
|
81
|
+
DataFrame.reindex_like : Change to same indices as other DataFrame.
|
|
82
|
+
|
|
83
|
+
Examples
|
|
84
|
+
--------
|
|
85
|
+
>>> import maxframe.dataframe as md
|
|
86
|
+
|
|
87
|
+
>>> df = md.DataFrame({'month': [1, 4, 7, 10],
|
|
88
|
+
... 'year': [2012, 2014, 2013, 2014],
|
|
89
|
+
... 'sale': [55, 40, 84, 31]})
|
|
90
|
+
>>> df
|
|
91
|
+
month year sale
|
|
92
|
+
0 1 2012 55
|
|
93
|
+
1 4 2014 40
|
|
94
|
+
2 7 2013 84
|
|
95
|
+
3 10 2014 31
|
|
96
|
+
|
|
97
|
+
Set the index to become the 'month' column:
|
|
98
|
+
|
|
99
|
+
>>> df.set_index('month')
|
|
100
|
+
year sale
|
|
101
|
+
month
|
|
102
|
+
1 2012 55
|
|
103
|
+
4 2014 40
|
|
104
|
+
7 2013 84
|
|
105
|
+
10 2014 31
|
|
106
|
+
|
|
107
|
+
Create a MultiIndex using columns 'year' and 'month':
|
|
108
|
+
|
|
109
|
+
>>> df.set_index(['year', 'month'])
|
|
110
|
+
sale
|
|
111
|
+
year month
|
|
112
|
+
2012 1 55
|
|
113
|
+
2014 4 40
|
|
114
|
+
2013 7 84
|
|
115
|
+
2014 10 31
|
|
116
|
+
"""
|
|
50
117
|
op = DataFrameSetIndex(
|
|
51
118
|
keys=keys,
|
|
52
119
|
drop=drop,
|
|
@@ -11,12 +11,13 @@
|
|
|
11
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
|
-
|
|
15
14
|
import logging
|
|
15
|
+
from abc import abstractmethod
|
|
16
16
|
from collections import namedtuple
|
|
17
|
-
from typing import Any, Dict, Optional, Tuple, Union
|
|
17
|
+
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
|
|
18
18
|
|
|
19
19
|
import numpy as np
|
|
20
|
+
from pandas import Index
|
|
20
21
|
|
|
21
22
|
from ... import opcodes
|
|
22
23
|
from ...core import OutputType
|
|
@@ -28,6 +29,7 @@ from ...serialization.serializables import (
|
|
|
28
29
|
Int32Field,
|
|
29
30
|
KeyField,
|
|
30
31
|
NamedTupleField,
|
|
32
|
+
Serializable,
|
|
31
33
|
StringField,
|
|
32
34
|
TupleField,
|
|
33
35
|
)
|
|
@@ -73,9 +75,208 @@ class DataFrameMergeAlign(MapReduceOperator, DataFrameOperatorMixin):
|
|
|
73
75
|
MergeSplitInfo = namedtuple("MergeSplitInfo", "split_side, split_index, nsplits")
|
|
74
76
|
|
|
75
77
|
|
|
78
|
+
class JoinHint(Serializable):
|
|
79
|
+
@abstractmethod
|
|
80
|
+
def verify_params(
|
|
81
|
+
self,
|
|
82
|
+
hint_on_df: Union[DataFrame, Series],
|
|
83
|
+
on: str,
|
|
84
|
+
is_on_index: bool,
|
|
85
|
+
how: str,
|
|
86
|
+
is_hint_for_left: bool,
|
|
87
|
+
):
|
|
88
|
+
pass
|
|
89
|
+
|
|
90
|
+
@abstractmethod
|
|
91
|
+
def verify_can_work_with(self, other: "JoinHint"):
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class MapJoinHint(JoinHint):
|
|
96
|
+
def verify_params(
|
|
97
|
+
self,
|
|
98
|
+
hint_on_df: Union[DataFrame, Series],
|
|
99
|
+
on: str,
|
|
100
|
+
is_on_index: bool,
|
|
101
|
+
how: str,
|
|
102
|
+
is_hint_for_left: bool,
|
|
103
|
+
):
|
|
104
|
+
if how in ("cross", "outer"):
|
|
105
|
+
raise ValueError(
|
|
106
|
+
"Invalid join hint, MapJoinHint is not support in cross and outer join"
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
def verify_can_work_with(self, other: JoinHint):
|
|
110
|
+
if isinstance(other, SkewJoinHint):
|
|
111
|
+
raise ValueError(
|
|
112
|
+
"Invalid join hint, SkewJoinHint cannot work with MapJoinHint"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class DistributedMapJoinHint(JoinHint):
|
|
117
|
+
shard_count = Int32Field("shard_count")
|
|
118
|
+
replica_count = Int32Field("replica_count", default=1)
|
|
119
|
+
|
|
120
|
+
def verify_params(
|
|
121
|
+
self,
|
|
122
|
+
hint_on_df: Union[DataFrame, Series],
|
|
123
|
+
on: str,
|
|
124
|
+
is_on_index: bool,
|
|
125
|
+
how: str,
|
|
126
|
+
is_hint_for_left: bool,
|
|
127
|
+
):
|
|
128
|
+
if how in ("cross", "outer"):
|
|
129
|
+
raise ValueError(
|
|
130
|
+
"Invalid join hint, DistributedMapJoinHint is not support in cross and outer join"
|
|
131
|
+
)
|
|
132
|
+
if not hasattr(self, "shard_count"):
|
|
133
|
+
raise ValueError(
|
|
134
|
+
"Invalid DistributedMapJoinHint, shard_count must be specified"
|
|
135
|
+
)
|
|
136
|
+
if self.shard_count <= 0 or self.replica_count <= 0:
|
|
137
|
+
raise ValueError(
|
|
138
|
+
"Invalid DistributedMapJoinHint, shard_count and replica_count must be greater than 0"
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
def verify_can_work_with(self, other: JoinHint):
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class SkewJoinHint(JoinHint):
|
|
146
|
+
columns = AnyField("columns", default=None)
|
|
147
|
+
|
|
148
|
+
@staticmethod
|
|
149
|
+
def _check_index_levels(index, level_list):
|
|
150
|
+
selected_levels = set()
|
|
151
|
+
valid_levels = set(range(index.nlevels))
|
|
152
|
+
valid_level_names = set(index.names)
|
|
153
|
+
|
|
154
|
+
for item in level_list:
|
|
155
|
+
if isinstance(item, int):
|
|
156
|
+
if item not in valid_levels:
|
|
157
|
+
raise ValueError(f"Level {item} is not a valid index level")
|
|
158
|
+
if item in selected_levels:
|
|
159
|
+
raise ValueError(f"Level {item} is selected multiple times")
|
|
160
|
+
selected_levels.add(item)
|
|
161
|
+
elif isinstance(item, str):
|
|
162
|
+
if item not in valid_level_names:
|
|
163
|
+
raise ValueError(f"'{item}' is not a valid index level name")
|
|
164
|
+
level = index.names.index(item)
|
|
165
|
+
if level in selected_levels:
|
|
166
|
+
raise ValueError(
|
|
167
|
+
f"'{item}' (Level {level}) is selected multiple times"
|
|
168
|
+
)
|
|
169
|
+
selected_levels.add(level)
|
|
170
|
+
else:
|
|
171
|
+
raise ValueError(f"Invalid input type: {type(item)}")
|
|
172
|
+
|
|
173
|
+
@staticmethod
|
|
174
|
+
def _check_columns(join_on_columns, column_list):
|
|
175
|
+
selected_columns = set()
|
|
176
|
+
valid_columns = set(join_on_columns)
|
|
177
|
+
|
|
178
|
+
for item in column_list:
|
|
179
|
+
if isinstance(item, int):
|
|
180
|
+
if item < 0 or item >= len(join_on_columns):
|
|
181
|
+
raise ValueError(f"Column index {item} is out of range")
|
|
182
|
+
col_name = join_on_columns[item]
|
|
183
|
+
if col_name in selected_columns:
|
|
184
|
+
raise ValueError(
|
|
185
|
+
f"Column '{col_name}' (index {item}) is selected multiple times"
|
|
186
|
+
)
|
|
187
|
+
selected_columns.add(col_name)
|
|
188
|
+
elif isinstance(item, str):
|
|
189
|
+
if item not in valid_columns:
|
|
190
|
+
raise ValueError(f"'{item}' is not a valid column name")
|
|
191
|
+
if item in selected_columns:
|
|
192
|
+
raise ValueError(f"Column '{item}' is selected multiple times")
|
|
193
|
+
selected_columns.add(item)
|
|
194
|
+
else:
|
|
195
|
+
raise ValueError(f"Invalid input type: {type(item)}")
|
|
196
|
+
|
|
197
|
+
def verify_params(
|
|
198
|
+
self,
|
|
199
|
+
hint_on_df: Union[DataFrame, Series],
|
|
200
|
+
on: str,
|
|
201
|
+
is_on_index: bool,
|
|
202
|
+
how: str,
|
|
203
|
+
is_hint_for_left: bool,
|
|
204
|
+
):
|
|
205
|
+
if how in ("cross", "outer"):
|
|
206
|
+
raise ValueError(
|
|
207
|
+
"Invalid join hint, map join is not support in cross and outer join"
|
|
208
|
+
)
|
|
209
|
+
if is_hint_for_left and how == "right":
|
|
210
|
+
raise ValueError(
|
|
211
|
+
"Invalid join hint, right join can only use SkewJoinHint on right frame"
|
|
212
|
+
)
|
|
213
|
+
elif not is_hint_for_left and how == "left":
|
|
214
|
+
raise ValueError(
|
|
215
|
+
"Invalid join hint, left join can only use SkewJoinHint on left frame"
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
# check columns
|
|
219
|
+
if self.columns is None:
|
|
220
|
+
return
|
|
221
|
+
|
|
222
|
+
if not isinstance(self.columns, list):
|
|
223
|
+
raise TypeError("Invalid SkewJoinHint, `columns` must be a list")
|
|
224
|
+
|
|
225
|
+
if all(isinstance(item, (int, str)) for item in self.columns):
|
|
226
|
+
# if elements are int (levels) or str (index names or column names)
|
|
227
|
+
self._verify_valid_index_or_columns(
|
|
228
|
+
self.columns, hint_on_df.index_value.to_pandas(), on, is_on_index
|
|
229
|
+
)
|
|
230
|
+
elif all(isinstance(c, dict) for c in self.columns):
|
|
231
|
+
# dict with column names and values
|
|
232
|
+
cols_set = set(self.columns[0].keys())
|
|
233
|
+
if any(cols_set != set(c.keys()) for c in self.columns):
|
|
234
|
+
raise ValueError(
|
|
235
|
+
"Invalid SkewJoinHint, all values in `columns` need to have same columns"
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
self._verify_valid_index_or_columns(
|
|
239
|
+
cols_set, hint_on_df.index_value.to_pandas(), on, is_on_index
|
|
240
|
+
)
|
|
241
|
+
else:
|
|
242
|
+
raise TypeError("Invalid SkewJoinHint, annot accept `columns` type")
|
|
243
|
+
|
|
244
|
+
def verify_can_work_with(self, other: JoinHint):
|
|
245
|
+
if isinstance(other, SkewJoinHint):
|
|
246
|
+
raise ValueError(
|
|
247
|
+
"Invalid join hint, SkewJoinHint cannot work with MapJoinHint"
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
@staticmethod
|
|
251
|
+
def _verify_valid_index_or_columns(
|
|
252
|
+
skew_join_columns: Iterable[Union[int, str]],
|
|
253
|
+
frame_index: Index,
|
|
254
|
+
on: Union[str, List[str]],
|
|
255
|
+
is_on_index: bool,
|
|
256
|
+
):
|
|
257
|
+
if isinstance(on, str):
|
|
258
|
+
on = [on]
|
|
259
|
+
on_columns = set(frame_index.names if is_on_index else on)
|
|
260
|
+
for col in skew_join_columns:
|
|
261
|
+
if isinstance(col, int):
|
|
262
|
+
if col < 0 or col >= len(on_columns):
|
|
263
|
+
raise ValueError(
|
|
264
|
+
f"Invalid, SkeJoinHint, `{col}` is out of join on columns range"
|
|
265
|
+
)
|
|
266
|
+
else:
|
|
267
|
+
if col not in on_columns:
|
|
268
|
+
raise ValueError(
|
|
269
|
+
f"Invalid, SkeJoinHint, '{col}' is not a valid column name"
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
|
|
76
273
|
class DataFrameMerge(DataFrameOperator, DataFrameOperatorMixin):
|
|
77
274
|
_op_type_ = opcodes.DATAFRAME_MERGE
|
|
78
275
|
|
|
276
|
+
# workaround for new field since v1.0.0rc2
|
|
277
|
+
# todo remove this when all versions below v1.0.0rc1 is eliminated
|
|
278
|
+
_legacy_new_non_primitives = ["left_hint", "right_hint"]
|
|
279
|
+
|
|
79
280
|
how = StringField("how")
|
|
80
281
|
on = AnyField("on")
|
|
81
282
|
left_on = AnyField("left_on")
|
|
@@ -95,6 +296,8 @@ class DataFrameMerge(DataFrameOperator, DataFrameOperatorMixin):
|
|
|
95
296
|
|
|
96
297
|
# only for broadcast merge
|
|
97
298
|
split_info = NamedTupleField("split_info")
|
|
299
|
+
left_hint = AnyField("left_hint", default=None)
|
|
300
|
+
right_hint = AnyField("right_hint", default=None)
|
|
98
301
|
|
|
99
302
|
def __init__(self, copy=None, **kwargs):
|
|
100
303
|
super().__init__(copy_=copy, **kwargs)
|
|
@@ -165,6 +368,8 @@ def merge(
|
|
|
165
368
|
auto_merge_threshold: int = 8,
|
|
166
369
|
bloom_filter: Union[bool, str] = "auto",
|
|
167
370
|
bloom_filter_options: Dict[str, Any] = None,
|
|
371
|
+
left_hint: JoinHint = None,
|
|
372
|
+
right_hint: JoinHint = None,
|
|
168
373
|
) -> DataFrame:
|
|
169
374
|
"""
|
|
170
375
|
Merge DataFrame or named Series objects with a database-style join.
|
|
@@ -267,6 +472,12 @@ def merge(
|
|
|
267
472
|
when chunk size of left and right is greater than this threshold, apply bloom filter
|
|
268
473
|
* "filter": "large", "small", "both", default "large"
|
|
269
474
|
decides to filter on large, small or both DataFrames.
|
|
475
|
+
left_hint: JoinHint, default None
|
|
476
|
+
Join strategy to use for left frame. When data skew occurs, consider these strategies to avoid long-tail issues,
|
|
477
|
+
but use them cautiously to prevent OOM and unnecessary overhead.
|
|
478
|
+
right_hint: JoinHint, default None
|
|
479
|
+
Join strategy to use for right frame.
|
|
480
|
+
|
|
270
481
|
|
|
271
482
|
Returns
|
|
272
483
|
-------
|
|
@@ -381,6 +592,18 @@ def merge(
|
|
|
381
592
|
raise ValueError(
|
|
382
593
|
f"Invalid filter {k}, available: {BLOOM_FILTER_ON_OPTIONS}"
|
|
383
594
|
)
|
|
595
|
+
|
|
596
|
+
if left_hint:
|
|
597
|
+
if not isinstance(left_hint, JoinHint):
|
|
598
|
+
raise TypeError(f"left_hint must be a JoinHint, got {type(left_hint)}")
|
|
599
|
+
left_hint.verify_can_work_with(right_hint)
|
|
600
|
+
left_hint.verify_params(df, on or left_on, left_index, how, True)
|
|
601
|
+
|
|
602
|
+
if right_hint:
|
|
603
|
+
if not isinstance(right_hint, JoinHint):
|
|
604
|
+
raise TypeError(f"right_hint must be a JoinHint, got {type(right_hint)}")
|
|
605
|
+
right_hint.verify_params(right, on or right_on, right_index, how, False)
|
|
606
|
+
|
|
384
607
|
op = DataFrameMerge(
|
|
385
608
|
how=how,
|
|
386
609
|
on=on,
|
|
@@ -399,6 +622,8 @@ def merge(
|
|
|
399
622
|
bloom_filter=bloom_filter,
|
|
400
623
|
bloom_filter_options=bloom_filter_options,
|
|
401
624
|
output_types=[OutputType.dataframe],
|
|
625
|
+
left_hint=left_hint,
|
|
626
|
+
right_hint=right_hint,
|
|
402
627
|
)
|
|
403
628
|
return op(df, right)
|
|
404
629
|
|
|
@@ -416,6 +641,8 @@ def join(
|
|
|
416
641
|
auto_merge_threshold: int = 8,
|
|
417
642
|
bloom_filter: Union[bool, Dict] = True,
|
|
418
643
|
bloom_filter_options: Dict[str, Any] = None,
|
|
644
|
+
left_hint: JoinHint = None,
|
|
645
|
+
right_hint: JoinHint = None,
|
|
419
646
|
) -> DataFrame:
|
|
420
647
|
"""
|
|
421
648
|
Join columns of another DataFrame.
|
|
@@ -480,6 +707,11 @@ def join(
|
|
|
480
707
|
when chunk size of left and right is greater than this threshold, apply bloom filter
|
|
481
708
|
* "filter": "large", "small", "both", default "large"
|
|
482
709
|
decides to filter on large, small or both DataFrames.
|
|
710
|
+
left_hint: JoinHint, default None
|
|
711
|
+
Join strategy to use for left frame. When data skew occurs, consider these strategies to avoid long-tail issues,
|
|
712
|
+
but use them cautiously to prevent OOM and unnecessary overhead.
|
|
713
|
+
right_hint: JoinHint, default None
|
|
714
|
+
Join strategy to use for right frame.
|
|
483
715
|
|
|
484
716
|
Returns
|
|
485
717
|
-------
|
|
@@ -590,4 +822,6 @@ def join(
|
|
|
590
822
|
auto_merge_threshold=auto_merge_threshold,
|
|
591
823
|
bloom_filter=bloom_filter,
|
|
592
824
|
bloom_filter_options=bloom_filter_options,
|
|
825
|
+
left_hint=left_hint,
|
|
826
|
+
right_hint=right_hint,
|
|
593
827
|
)
|