maxframe 1.0.0rc3__cp37-cp37m-win_amd64.whl → 1.0.0rc4__cp37-cp37m-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of maxframe might be problematic. Click here for more details.

Files changed (57) hide show
  1. maxframe/_utils.cp37-win_amd64.pyd +0 -0
  2. maxframe/codegen.py +1 -0
  3. maxframe/config/config.py +13 -1
  4. maxframe/conftest.py +43 -12
  5. maxframe/core/entity/executable.py +1 -1
  6. maxframe/core/graph/core.cp37-win_amd64.pyd +0 -0
  7. maxframe/dataframe/arithmetic/docstring.py +26 -2
  8. maxframe/dataframe/arithmetic/equal.py +4 -2
  9. maxframe/dataframe/arithmetic/greater.py +4 -2
  10. maxframe/dataframe/arithmetic/greater_equal.py +4 -2
  11. maxframe/dataframe/arithmetic/less.py +2 -2
  12. maxframe/dataframe/arithmetic/less_equal.py +4 -2
  13. maxframe/dataframe/arithmetic/not_equal.py +4 -2
  14. maxframe/dataframe/core.py +2 -0
  15. maxframe/dataframe/datasource/read_odps_query.py +66 -7
  16. maxframe/dataframe/datasource/read_odps_table.py +3 -1
  17. maxframe/dataframe/datasource/tests/test_datasource.py +35 -6
  18. maxframe/dataframe/datastore/to_odps.py +7 -0
  19. maxframe/dataframe/extensions/__init__.py +3 -0
  20. maxframe/dataframe/extensions/flatmap.py +326 -0
  21. maxframe/dataframe/extensions/tests/test_extensions.py +62 -1
  22. maxframe/dataframe/indexing/add_prefix_suffix.py +1 -1
  23. maxframe/dataframe/indexing/rename.py +11 -0
  24. maxframe/dataframe/initializer.py +11 -1
  25. maxframe/dataframe/misc/drop_duplicates.py +18 -1
  26. maxframe/dataframe/tests/test_initializer.py +33 -2
  27. maxframe/io/odpsio/schema.py +5 -3
  28. maxframe/io/odpsio/tableio.py +44 -38
  29. maxframe/io/odpsio/tests/test_schema.py +0 -4
  30. maxframe/io/odpsio/volumeio.py +9 -3
  31. maxframe/learn/contrib/__init__.py +2 -1
  32. maxframe/learn/contrib/graph/__init__.py +15 -0
  33. maxframe/learn/contrib/graph/connected_components.py +215 -0
  34. maxframe/learn/contrib/graph/tests/__init__.py +13 -0
  35. maxframe/learn/contrib/graph/tests/test_connected_components.py +53 -0
  36. maxframe/learn/contrib/xgboost/classifier.py +3 -3
  37. maxframe/learn/contrib/xgboost/predict.py +8 -39
  38. maxframe/learn/contrib/xgboost/train.py +4 -3
  39. maxframe/lib/mmh3.cp37-win_amd64.pyd +0 -0
  40. maxframe/opcodes.py +3 -0
  41. maxframe/protocol.py +6 -1
  42. maxframe/serialization/core.cp37-win_amd64.pyd +0 -0
  43. maxframe/session.py +9 -2
  44. maxframe/tensor/indexing/getitem.py +2 -0
  45. maxframe/tensor/merge/concatenate.py +23 -20
  46. maxframe/tensor/merge/vstack.py +5 -1
  47. maxframe/tensor/misc/transpose.py +1 -1
  48. maxframe/utils.py +34 -12
  49. {maxframe-1.0.0rc3.dist-info → maxframe-1.0.0rc4.dist-info}/METADATA +1 -1
  50. {maxframe-1.0.0rc3.dist-info → maxframe-1.0.0rc4.dist-info}/RECORD +57 -52
  51. maxframe_client/fetcher.py +10 -8
  52. maxframe_client/session/consts.py +3 -0
  53. maxframe_client/session/odps.py +84 -13
  54. maxframe_client/session/task.py +58 -20
  55. maxframe_client/tests/test_session.py +14 -2
  56. {maxframe-1.0.0rc3.dist-info → maxframe-1.0.0rc4.dist-info}/WHEEL +0 -0
  57. {maxframe-1.0.0rc3.dist-info → maxframe-1.0.0rc4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,326 @@
1
+ # Copyright 1999-2024 Alibaba Group Holding Ltd.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Callable
16
+
17
+ import numpy as np
18
+ import pandas as pd
19
+
20
+ from maxframe import opcodes
21
+ from maxframe.core import OutputType
22
+ from maxframe.dataframe.core import DataFrame, IndexValue
23
+ from maxframe.dataframe.operators import DataFrameOperator, DataFrameOperatorMixin
24
+ from maxframe.dataframe.utils import make_dtypes, parse_index
25
+ from maxframe.serialization.serializables import (
26
+ BoolField,
27
+ DictField,
28
+ FunctionField,
29
+ TupleField,
30
+ )
31
+
32
+
33
+ class DataFrameFlatMapOperator(DataFrameOperator, DataFrameOperatorMixin):
34
+ _op_type_ = opcodes.FLATMAP
35
+
36
+ func = FunctionField("func")
37
+ raw = BoolField("raw", default=False)
38
+ args = TupleField("args", default=())
39
+ kwargs = DictField("kwargs", default={})
40
+
41
+ def __init__(self, output_types=None, **kw):
42
+ super().__init__(_output_types=output_types, **kw)
43
+
44
+ @staticmethod
45
+ def _gen_flattening_index_value(index_value, *args) -> IndexValue:
46
+ pd_index = index_value.to_pandas()
47
+ if not isinstance(pd_index, pd.MultiIndex):
48
+ # for func return multi rows, will copy indexes
49
+ return parse_index(pd.Index([], dtype=pd_index.dtype), *args)
50
+ # multi index will keep the same level and types
51
+ return parse_index(
52
+ pd.MultiIndex.from_arrays([c[:0] for c in pd_index.levels]), *args
53
+ )
54
+
55
+ def _call_dataframe(self, df: DataFrame, dtypes: pd.Series):
56
+ dtypes = make_dtypes(dtypes)
57
+ index_value = self._gen_flattening_index_value(
58
+ df.index_value,
59
+ (df.key, df.index_value.key, self.func),
60
+ )
61
+ return self.new_dataframe(
62
+ [df],
63
+ shape=(np.nan, len(dtypes)),
64
+ index_value=index_value,
65
+ columns_value=parse_index(dtypes.index, store_data=True),
66
+ dtypes=dtypes,
67
+ )
68
+
69
+ def _call_series_or_index(self, series, dtypes=None):
70
+ index_value = self._gen_flattening_index_value(
71
+ series.index_value,
72
+ (series.key, series.index_value.key, self.func),
73
+ )
74
+
75
+ if self.output_types[0] == OutputType.series:
76
+ name, dtype = dtypes
77
+ return self.new_series(
78
+ [series],
79
+ dtype=dtype,
80
+ shape=(np.nan,),
81
+ index_value=index_value,
82
+ name=name,
83
+ )
84
+
85
+ dtypes = make_dtypes(dtypes)
86
+ columns_value = parse_index(dtypes.index, store_data=True)
87
+ return self.new_dataframe(
88
+ [series],
89
+ shape=(np.nan, len(dtypes)),
90
+ index_value=index_value,
91
+ columns_value=columns_value,
92
+ dtypes=dtypes,
93
+ )
94
+
95
+ def __call__(
96
+ self,
97
+ df_or_series,
98
+ dtypes=None,
99
+ output_type=None,
100
+ ):
101
+ if df_or_series.op.output_types[0] == OutputType.dataframe:
102
+ return self._call_dataframe(df_or_series, dtypes=dtypes)
103
+ else:
104
+ return self._call_series_or_index(df_or_series, dtypes=dtypes)
105
+
106
+
107
+ def df_flatmap(dataframe, func: Callable, dtypes=None, raw=False, args=(), **kwargs):
108
+ """
109
+ Apply the given function to each row and then flatten results. Use this method if your transformation returns
110
+ multiple rows for each input row.
111
+
112
+ This function applies a transformation to each row of the DataFrame, where the transformation can return zero
113
+ or multiple values, effectively flattening Python generators, list-like collections, and DataFrames.
114
+
115
+ Parameters
116
+ ----------
117
+ dataframe : DataFrame
118
+ The DataFrame to which the function will be applied.
119
+
120
+ func : Callable
121
+ Function to apply to each row of the DataFrame. It should accept a Series (or an array if `raw=True`)
122
+ representing a row and return a list or iterable of values.
123
+
124
+ dtypes : Series, dict or list
125
+ Specify dtypes of returned DataFrame.
126
+
127
+ raw : bool, default False
128
+ Determines if the row is passed as a Series or as a numpy array:
129
+
130
+ * ``False`` : passes each row as a Series to the function.
131
+ * ``True`` : the passed function will receive numpy array objects instead.
132
+
133
+ args : tuple
134
+ Positional arguments to pass to `func`.
135
+
136
+ **kwargs
137
+ Additional keyword arguments to pass as keywords arguments to `func`.
138
+
139
+ Returns
140
+ -------
141
+ DataFrame
142
+ Return DataFrame with specified `dtypes`.
143
+
144
+ Notes
145
+ -----
146
+ The `func` must return an iterable of values for each input row. The index of the resulting DataFrame will be
147
+ repeated based on the number of output rows generated by `func`.
148
+
149
+ Examples
150
+ --------
151
+ >>> import numpy as np
152
+ >>> import maxframe.dataframe as md
153
+ >>> df = md.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
154
+ >>> df.execute()
155
+ A B
156
+ 0 1 4
157
+ 1 2 5
158
+ 2 3 6
159
+
160
+ Define a function that takes a number and returns a list of two numbers:
161
+
162
+ >>> def generate_values_array(row):
163
+ ... return [row['A'] * 2, row['B'] * 3]
164
+
165
+ Define a function that takes a row and return two rows and two columns:
166
+
167
+ >>> def generate_values_in_generator(row):
168
+ ... yield [row[0] * 2, row[1] * 4]
169
+ ... yield [row[0] * 3, row[1] * 5]
170
+
171
+ Which equals to the following function return a dataframe:
172
+
173
+ >>> def generate_values_in_dataframe(row):
174
+ ... return pd.DataFrame([[row[0] * 2, row[1] * 4], [row[0] * 3, row[1] * 5]])
175
+
176
+ Specify `dtypes` with a function which returns a DataFrame:
177
+
178
+ >>> df.mf.flatmap(generate_values_array, dtypes=pd.Series({'A': 'int'})).execute()
179
+ A
180
+ 0 2
181
+ 0 12
182
+ 1 4
183
+ 1 15
184
+ 2 6
185
+ 2 18
186
+
187
+ Specify raw=True to pass input row as array:
188
+
189
+ >>> df.mf.flatmap(generate_values_in_generator, dtypes={"A": "int", "B": "int"}, raw=True).execute()
190
+ A B
191
+ 0 2 16
192
+ 0 3 20
193
+ 1 4 20
194
+ 1 6 25
195
+ 2 6 24
196
+ 2 9 30
197
+ """
198
+ if dtypes is None or len(dtypes) == 0:
199
+ raise TypeError(
200
+ "Cannot determine {dtypes} by calculating with enumerate data, "
201
+ "please specify it as arguments"
202
+ )
203
+
204
+ if not isinstance(func, Callable):
205
+ raise TypeError("function must be a callable object")
206
+
207
+ output_types = [OutputType.dataframe]
208
+ op = DataFrameFlatMapOperator(
209
+ func=func, raw=raw, output_types=output_types, args=args, kwargs=kwargs
210
+ )
211
+ return op(
212
+ dataframe,
213
+ dtypes=dtypes,
214
+ )
215
+
216
+
217
+ def series_flatmap(
218
+ series, func: Callable, dtypes=None, dtype=None, name=None, args=(), **kwargs
219
+ ):
220
+ """
221
+ Apply the given function to each row and then flatten results. Use this method if your transformation returns
222
+ multiple rows for each input row.
223
+
224
+ This function applies a transformation to each element of the Series, where the transformation can return zero
225
+ or multiple values, effectively flattening Python generator, list-liked collections and DataFrame.
226
+
227
+ Parameters
228
+ ----------
229
+ series : Series
230
+ The series to which the function will be applied.
231
+
232
+ func : Callable
233
+ Function to apply to each element of the Series. It should accept a scalar value
234
+ (or an array if `raw=True`) and return a list or iterable of values.
235
+
236
+ dtypes : Series, default None
237
+ Specify dtypes of returned DataFrame. Can't work with dtype.
238
+
239
+ dtype : numpy.dtype, default None
240
+ Specify dtype of returned Series. Can't work with dtypes.
241
+
242
+ name : str, default None
243
+ Specify name of the returned Series.
244
+
245
+ args : tuple
246
+ Positional arguments to pass to `func`.
247
+
248
+ **kwargs
249
+ Additional keyword arguments to pass as keywords arguments to `func`.
250
+
251
+ Returns
252
+ -------
253
+ DataFrame or Series
254
+ Result of DataFrame when dtypes specified, else Series.
255
+
256
+ Notes
257
+ -----
258
+ The `func` must return an iterable of values for each input element. If `dtypes` is specified,
259
+ `flatmap` will return a DataFrame, if `dtype` and `name` is specified, a Series will be returned. The index of
260
+ the resulting DataFrame/Series will be repeated based on the number of output rows generated by `func`.
261
+
262
+ Examples
263
+ --------
264
+ >>> import numpy as np
265
+ >>> import maxframe.dataframe as md
266
+ >>> df = md.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
267
+ >>> df.execute()
268
+ A B
269
+ 0 1 4
270
+ 1 2 5
271
+ 2 3 6
272
+
273
+ Define a function that takes a number and returns a list of two numbers:
274
+
275
+ >>> def generate_values_array(x):
276
+ ... return [x * 2, x * 3]
277
+
278
+ >>> def generate_values_in_generator(x):
279
+ ... yield pd.Series([x * 2, x * 4])
280
+ ... yield pd.Series([x * 3, x * 5])
281
+
282
+ Specify `dtype` with a function which returns list to return more than one elements as a Series:
283
+
284
+ >>> df['A'].mf.flatmap(generate_values_array, dtype="int", name="C").execute()
285
+ 0 2
286
+ 0 3
287
+ 1 4
288
+ 1 6
289
+ 2 6
290
+ 2 9
291
+ Name: C, dtype: int64
292
+
293
+ Specify `dtypes` to return multi columns as a DataFrame:
294
+
295
+ >>> df['A'].mf.flatmap(generate_values_in_generator, dtypes={"A": "int", "B": "int"}).execute()
296
+ A B
297
+ 0 2 4
298
+ 0 3 5
299
+ 1 4 8
300
+ 1 6 10
301
+ 2 6 12
302
+ 2 9 15
303
+ """
304
+
305
+ if dtypes and dtype:
306
+ raise ValueError("Both dtypes and dtype cannot be specified at the same time.")
307
+
308
+ dtypes = (name, dtype) if dtype is not None else dtypes
309
+ if dtypes is None:
310
+ raise TypeError(
311
+ "Cannot determine {dtypes} or {dtype} by calculating with enumerate data, "
312
+ "please specify it as arguments"
313
+ )
314
+
315
+ if not isinstance(func, Callable):
316
+ raise TypeError("function must be a callable object")
317
+
318
+ output_type = OutputType.series if dtype is not None else OutputType.dataframe
319
+
320
+ op = DataFrameFlatMapOperator(
321
+ func=func, raw=False, output_types=[output_type], args=args, kwargs=kwargs
322
+ )
323
+ return op(
324
+ series,
325
+ dtypes=dtypes,
326
+ )
@@ -11,11 +11,12 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
-
14
+ import numpy as np
15
15
  import pandas as pd
16
16
  import pytest
17
17
 
18
18
  from .... import dataframe as md
19
+ from ... import DataFrame
19
20
  from ...core import IndexValue
20
21
  from ..reshuffle import DataFrameReshuffle
21
22
 
@@ -36,3 +37,63 @@ def test_reshuffle():
36
37
  r = mdf.mf.reshuffle(ignore_index=True)
37
38
  assert isinstance(r.op, DataFrameReshuffle)
38
39
  assert isinstance(r.index_value.value, IndexValue.RangeIndex)
40
+
41
+
42
+ @pytest.fixture
43
+ def df1():
44
+ return DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
45
+
46
+
47
+ @pytest.fixture
48
+ def df2():
49
+ return DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["a", "b", "c"])
50
+
51
+
52
+ @pytest.fixture
53
+ def df3():
54
+ return DataFrame(
55
+ [[1, 2, 3], [1, 2, 3], [1, 2, 3]],
56
+ columns=["a", "b", "c"],
57
+ index=pd.MultiIndex.from_arrays([[1, 2, 3], [1, 2, 3]], names=["A", "B"]),
58
+ )
59
+
60
+
61
+ def test_flatmap(df1, df2, df3):
62
+ def f(x, keys):
63
+ if x["a"] in keys:
64
+ yield [1, 0]
65
+ yield [0, 1]
66
+
67
+ apply_df = df1[["a"]].mf.flatmap(
68
+ f,
69
+ dtypes={"a": "int64", "b": "int64"},
70
+ )
71
+ assert apply_df.shape == (np.nan, 2)
72
+ assert df1.index_value.key != apply_df.index_value.key
73
+ assert isinstance(df1.index_value.to_pandas(), pd.RangeIndex)
74
+ assert not isinstance(apply_df.index_value.to_pandas(), pd.RangeIndex)
75
+ apply_df = df2[["a"]].mf.flatmap(
76
+ f,
77
+ dtypes=pd.Series(["int64", "int64"]),
78
+ )
79
+ assert apply_df.shape == (np.nan, 2)
80
+ assert df2.index_value.key != apply_df.index_value.key
81
+ with pytest.raises(TypeError):
82
+ apply_s = df3["a"].mf.flatmap(
83
+ f,
84
+ )
85
+ apply_s = df3["a"].mf.flatmap(
86
+ f,
87
+ dtype="int64",
88
+ )
89
+ assert apply_s.shape == (np.nan,)
90
+ assert df3.index_value.key != apply_s.index_value.key
91
+ assert df3.key != apply_s.index_value.key
92
+ apply_s = df3["a"].mf.flatmap(
93
+ f,
94
+ output_type="dataframe",
95
+ dtypes=["int64", "int64"],
96
+ )
97
+ assert apply_s.shape == (np.nan, 2)
98
+ assert df3.index_value.key != apply_s.index_value.key
99
+ assert df3.key != apply_s.index_value.key
@@ -51,7 +51,7 @@ def _get_prefix_suffix_docs(is_prefix: bool):
51
51
  Examples
52
52
  --------
53
53
  >>> import maxframe.dataframe as md
54
- >>> s = md.Series([1, 2, 3, 4])
54
+ >>> s = md.Series([1, 2, 3, 4])
55
55
  >>> s.execute()
56
56
  0 1
57
57
  1 2
@@ -248,6 +248,7 @@ def df_rename(
248
248
  )
249
249
 
250
250
 
251
+ # fixme https://github.com/aliyun/alibabacloud-odps-maxframe-client/issues/58
251
252
  def series_rename(
252
253
  series,
253
254
  index=None,
@@ -382,6 +383,7 @@ def index_rename(index, name, inplace=False):
382
383
  return ret
383
384
 
384
385
 
386
+ # fixme https://github.com/aliyun/alibabacloud-odps-maxframe-client/issues/59
385
387
  def index_set_names(index, names, level=None, inplace=False):
386
388
  """
387
389
  Set Index or MultiIndex name.
@@ -407,6 +409,15 @@ def index_set_names(index, names, level=None, inplace=False):
407
409
  See Also
408
410
  --------
409
411
  Index.rename : Able to set new names without level.
412
+
413
+ Examples
414
+ --------
415
+ >>> import maxframe.dataframe as md
416
+ >>> idx = md.Index([1, 2, 3, 4])
417
+ >>> idx.execute()
418
+ Int64Index([1, 2, 3, 4], dtype='int64')
419
+ >>> idx.set_names('quarter').execute()
420
+ Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')
410
421
  """
411
422
  op = DataFrameRename(
412
423
  index_mapper=names, level=level, output_types=get_output_types(index)
@@ -15,6 +15,7 @@
15
15
  from typing import Union
16
16
 
17
17
  import pandas as pd
18
+ from pandas.api.types import is_list_like
18
19
  from pandas.core.dtypes.common import pandas_dtype
19
20
 
20
21
  from ..core import ENTITY_TYPE
@@ -61,6 +62,8 @@ class DataFrame(_Frame, metaclass=InitializerMeta):
61
62
  num_partitions=None,
62
63
  ):
63
64
  need_repart = False
65
+ if columns is not None and not is_list_like(columns):
66
+ raise ValueError("columns must be a list-like object")
64
67
  if isinstance(data, TENSOR_TYPE):
65
68
  if chunk_size is not None:
66
69
  data = data.rechunk(chunk_size)
@@ -69,7 +72,10 @@ class DataFrame(_Frame, metaclass=InitializerMeta):
69
72
  )
70
73
  need_repart = num_partitions is not None
71
74
  elif isinstance(data, SERIES_TYPE):
72
- df = data.to_frame()
75
+ if columns is not None and len(columns) != 1:
76
+ raise ValueError("columns' length must be 1 when data is Series")
77
+ col_name = columns[0] if columns else None
78
+ df = data.to_frame(name=col_name)
73
79
  need_repart = num_partitions is not None
74
80
  elif isinstance(data, DATAFRAME_TYPE):
75
81
  if not hasattr(data, "data"):
@@ -77,6 +83,10 @@ class DataFrame(_Frame, metaclass=InitializerMeta):
77
83
  df = _Frame(data)
78
84
  else:
79
85
  df = data
86
+ if columns is not None:
87
+ if len(df.columns) != len(columns):
88
+ raise ValueError("columns' length must be equal to the data's")
89
+ df.columns = columns
80
90
  need_repart = num_partitions is not None
81
91
  elif isinstance(data, dict) and self._can_process_by_1d_tileables(data):
82
92
  # data is a dict and some value is tensor
@@ -104,7 +104,6 @@ def df_drop_duplicates(
104
104
  def series_drop_duplicates(
105
105
  series, keep="first", inplace=False, ignore_index=False, method="auto"
106
106
  ):
107
- # FIXME: https://github.com/aliyun/alibabacloud-odps-maxframe-client/issues/12
108
107
  """
109
108
  Return Series with duplicate values removed.
110
109
 
@@ -148,6 +147,24 @@ def series_drop_duplicates(
148
147
  5 hippo
149
148
  Name: animal, dtype: object
150
149
 
150
+ With the 'keep' parameter, the selection behaviour of duplicated values
151
+ can be changed. The value 'first' keeps the first occurrence for each
152
+ set of duplicated entries. The default value of keep is 'first'.
153
+ >>> s.drop_duplicates().execute()
154
+ 0 lame
155
+ 1 cow
156
+ 3 beetle
157
+ 5 hippo
158
+ Name: animal, dtype: object
159
+ The value 'last' for parameter 'keep' keeps the last occurrence for
160
+ each set of duplicated entries.
161
+ >>> s.drop_duplicates(keep='last').execute()
162
+ 1 cow
163
+ 3 beetle
164
+ 4 lame
165
+ 5 hippo
166
+ Name: animal, dtype: object
167
+
151
168
  The value ``False`` for parameter 'keep' discards all sets of
152
169
  duplicated entries. Setting the value of 'inplace' to ``True`` performs
153
170
  the operation inplace and returns ``None``.
@@ -13,12 +13,13 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import pandas as pd
16
+ import pytest
16
17
 
17
18
  from ..core import DATAFRAME_TYPE, INDEX_TYPE, SERIES_TYPE
18
- from ..initializer import read_pandas
19
+ from ..initializer import DataFrame, Series, read_pandas
19
20
 
20
21
 
21
- def test_from_pandas():
22
+ def test_read_pandas():
22
23
  df_data = pd.DataFrame([["a", 1], ["b", 2]], columns=["a", "b"])
23
24
  assert isinstance(read_pandas(df_data), DATAFRAME_TYPE)
24
25
 
@@ -27,3 +28,33 @@ def test_from_pandas():
27
28
 
28
29
  idx_data = pd.Index(["a", "b"])
29
30
  assert isinstance(read_pandas(idx_data), INDEX_TYPE)
31
+
32
+
33
+ def test_init_dataframe_from_maxframe_series():
34
+ s = Series([1, 2, 3, 4], index=[1, 2, 3, 4])
35
+
36
+ df = DataFrame(s, index=s.index, columns=["col1"])
37
+
38
+ assert isinstance(df, DATAFRAME_TYPE)
39
+ assert df.dtypes.index == ["col1"]
40
+
41
+ with pytest.raises(ValueError):
42
+ DataFrame(s, index=s.index, columns=[])
43
+
44
+ with pytest.raises(ValueError):
45
+ DataFrame(s, index=s.index, columns="col1")
46
+
47
+ with pytest.raises(ValueError):
48
+ DataFrame(s, index=s.index, columns="col2")
49
+
50
+
51
+ def test_init_dataframe_from_maxframe_dataframe():
52
+ df1 = DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4]}, index=[1, 2, 3, 4])
53
+
54
+ df2 = DataFrame(df1, index=df1.index, columns=["col1", "col2"])
55
+
56
+ assert isinstance(df2, DATAFRAME_TYPE)
57
+ assert list(df2.dtypes.index) == ["col1", "col2"]
58
+
59
+ with pytest.raises(ValueError):
60
+ DataFrame(df1, index=df1.index, columns=["col1", "col2", "col3"])
@@ -54,7 +54,9 @@ _odps_type_to_arrow = {
54
54
  odps_types.double: pa.float64(),
55
55
  odps_types.date: pa.date32(),
56
56
  odps_types.datetime: pa.timestamp("ms"),
57
+ odps_types.json: pa.string(),
57
58
  odps_types.timestamp: pa.timestamp("ns"),
59
+ odps_types.timestamp_ntz: pa.timestamp("ns"),
58
60
  }
59
61
 
60
62
 
@@ -166,7 +168,7 @@ def odps_schema_to_pandas_dtypes(
166
168
  return arrow_schema.empty_table().to_pandas().dtypes
167
169
 
168
170
 
169
- def _is_scalar_object(df_obj: Any) -> bool:
171
+ def is_scalar_object(df_obj: Any) -> bool:
170
172
  return (
171
173
  isinstance(df_obj, TENSOR_TYPE) and df_obj.shape == ()
172
174
  ) or pd_types.is_scalar(df_obj)
@@ -187,7 +189,7 @@ def pandas_to_odps_schema(
187
189
  from ... import dataframe as md
188
190
  from .arrow import pandas_to_arrow
189
191
 
190
- if _is_scalar_object(df_obj):
192
+ if is_scalar_object(df_obj):
191
193
  empty_index = None
192
194
  elif hasattr(df_obj, "index_value"):
193
195
  empty_index = df_obj.index_value.to_pandas()[:0]
@@ -289,7 +291,7 @@ def build_dataframe_table_meta(
289
291
  obj_type = OutputType.series
290
292
  elif isinstance(df_obj, (md.Index, pd.Index)):
291
293
  obj_type = OutputType.index
292
- elif _is_scalar_object(df_obj):
294
+ elif is_scalar_object(df_obj):
293
295
  obj_type = OutputType.scalar
294
296
  else: # pragma: no cover
295
297
  raise TypeError(f"Cannot accept type {type(df_obj)}")