maxframe 1.0.0rc2__cp310-cp310-win_amd64.whl → 1.0.0rc3__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of maxframe might be problematic. Click here for more details.

Files changed (106) hide show
  1. maxframe/_utils.cp310-win_amd64.pyd +0 -0
  2. maxframe/codegen.py +3 -2
  3. maxframe/config/config.py +16 -9
  4. maxframe/config/validators.py +42 -12
  5. maxframe/conftest.py +13 -2
  6. maxframe/core/__init__.py +2 -13
  7. maxframe/core/entity/__init__.py +0 -4
  8. maxframe/core/entity/objects.py +45 -2
  9. maxframe/core/entity/output_types.py +0 -3
  10. maxframe/core/entity/tests/test_objects.py +43 -0
  11. maxframe/core/entity/tileables.py +5 -78
  12. maxframe/core/graph/__init__.py +2 -2
  13. maxframe/core/graph/builder/__init__.py +0 -1
  14. maxframe/core/graph/builder/base.py +5 -4
  15. maxframe/core/graph/builder/tileable.py +4 -4
  16. maxframe/core/graph/builder/utils.py +4 -8
  17. maxframe/core/graph/core.cp310-win_amd64.pyd +0 -0
  18. maxframe/core/graph/entity.py +9 -33
  19. maxframe/core/operator/__init__.py +2 -9
  20. maxframe/core/operator/base.py +3 -5
  21. maxframe/core/operator/objects.py +0 -9
  22. maxframe/core/operator/utils.py +55 -0
  23. maxframe/dataframe/datasource/read_odps_query.py +1 -1
  24. maxframe/dataframe/datasource/read_odps_table.py +1 -1
  25. maxframe/dataframe/datastore/to_odps.py +1 -1
  26. maxframe/dataframe/operators.py +1 -17
  27. maxframe/dataframe/reduction/core.py +2 -2
  28. maxframe/io/objects/__init__.py +24 -0
  29. maxframe/io/objects/core.py +140 -0
  30. maxframe/io/objects/tensor.py +76 -0
  31. maxframe/io/objects/tests/__init__.py +13 -0
  32. maxframe/io/objects/tests/test_object_io.py +97 -0
  33. maxframe/{odpsio → io/odpsio}/__init__.py +2 -0
  34. maxframe/{odpsio → io/odpsio}/arrow.py +4 -4
  35. maxframe/{odpsio → io/odpsio}/schema.py +5 -5
  36. maxframe/{odpsio → io/odpsio}/tableio.py +10 -4
  37. maxframe/io/odpsio/tests/__init__.py +13 -0
  38. maxframe/{odpsio → io/odpsio}/tests/test_schema.py +3 -3
  39. maxframe/{odpsio → io/odpsio}/tests/test_tableio.py +3 -3
  40. maxframe/{odpsio → io/odpsio}/tests/test_volumeio.py +4 -6
  41. maxframe/io/odpsio/volumeio.py +57 -0
  42. maxframe/learn/contrib/xgboost/classifier.py +26 -2
  43. maxframe/learn/contrib/xgboost/core.py +87 -2
  44. maxframe/learn/contrib/xgboost/dmatrix.py +1 -4
  45. maxframe/learn/contrib/xgboost/predict.py +19 -5
  46. maxframe/learn/contrib/xgboost/regressor.py +3 -10
  47. maxframe/learn/contrib/xgboost/train.py +25 -15
  48. maxframe/{core/operator/fuse.py → learn/core.py} +7 -10
  49. maxframe/lib/mmh3.cp310-win_amd64.pyd +0 -0
  50. maxframe/protocol.py +1 -15
  51. maxframe/remote/core.py +4 -8
  52. maxframe/serialization/__init__.py +1 -0
  53. maxframe/serialization/core.cp310-win_amd64.pyd +0 -0
  54. maxframe/tensor/__init__.py +10 -2
  55. maxframe/tensor/arithmetic/isclose.py +1 -0
  56. maxframe/tensor/arithmetic/tests/test_arithmetic.py +21 -17
  57. maxframe/tensor/core.py +5 -136
  58. maxframe/tensor/datasource/array.py +3 -0
  59. maxframe/tensor/datasource/full.py +1 -1
  60. maxframe/tensor/datasource/tests/test_datasource.py +1 -1
  61. maxframe/tensor/indexing/flatnonzero.py +1 -1
  62. maxframe/tensor/merge/__init__.py +2 -0
  63. maxframe/tensor/merge/concatenate.py +98 -0
  64. maxframe/tensor/merge/tests/test_merge.py +30 -1
  65. maxframe/tensor/merge/vstack.py +70 -0
  66. maxframe/tensor/{base → misc}/__init__.py +2 -0
  67. maxframe/tensor/{base → misc}/atleast_1d.py +0 -2
  68. maxframe/tensor/misc/atleast_2d.py +70 -0
  69. maxframe/tensor/misc/atleast_3d.py +85 -0
  70. maxframe/tensor/misc/tests/__init__.py +13 -0
  71. maxframe/tensor/{base → misc}/transpose.py +22 -18
  72. maxframe/tensor/operators.py +1 -7
  73. maxframe/tensor/random/core.py +1 -1
  74. maxframe/tensor/reduction/count_nonzero.py +1 -0
  75. maxframe/tensor/reduction/mean.py +1 -0
  76. maxframe/tensor/reduction/nanmean.py +1 -0
  77. maxframe/tensor/reduction/nanvar.py +2 -0
  78. maxframe/tensor/reduction/tests/test_reduction.py +12 -1
  79. maxframe/tensor/reduction/var.py +2 -0
  80. maxframe/tensor/utils.py +2 -22
  81. maxframe/typing_.py +4 -1
  82. maxframe/udf.py +8 -9
  83. maxframe/utils.py +15 -61
  84. maxframe-1.0.0rc3.dist-info/METADATA +104 -0
  85. {maxframe-1.0.0rc2.dist-info → maxframe-1.0.0rc3.dist-info}/RECORD +101 -91
  86. {maxframe-1.0.0rc2.dist-info → maxframe-1.0.0rc3.dist-info}/WHEEL +1 -1
  87. maxframe_client/fetcher.py +23 -42
  88. maxframe_client/session/graph.py +8 -2
  89. maxframe_client/session/odps.py +54 -18
  90. maxframe_client/tests/test_fetcher.py +1 -1
  91. maxframe_client/tests/test_session.py +14 -2
  92. maxframe/core/entity/chunks.py +0 -68
  93. maxframe/core/entity/fuse.py +0 -73
  94. maxframe/core/graph/builder/chunk.py +0 -430
  95. maxframe/odpsio/volumeio.py +0 -95
  96. maxframe-1.0.0rc2.dist-info/METADATA +0 -177
  97. /maxframe/{odpsio → core/entity}/tests/__init__.py +0 -0
  98. /maxframe/{tensor/base/tests → io}/__init__.py +0 -0
  99. /maxframe/{odpsio → io/odpsio}/tests/test_arrow.py +0 -0
  100. /maxframe/tensor/{base → misc}/astype.py +0 -0
  101. /maxframe/tensor/{base → misc}/broadcast_to.py +0 -0
  102. /maxframe/tensor/{base → misc}/ravel.py +0 -0
  103. /maxframe/tensor/{base/tests/test_base.py → misc/tests/test_misc.py} +0 -0
  104. /maxframe/tensor/{base → misc}/unique.py +0 -0
  105. /maxframe/tensor/{base → misc}/where.py +0 -0
  106. {maxframe-1.0.0rc2.dist-info → maxframe-1.0.0rc3.dist-info}/top_level.txt +0 -0
@@ -114,7 +114,6 @@ from .arithmetic import (
114
114
  )
115
115
  from .arithmetic import truediv as true_divide
116
116
  from .arithmetic import trunc
117
- from .base import broadcast_to, transpose, unique, where
118
117
  from .core import Tensor
119
118
  from .datasource import (
120
119
  arange,
@@ -143,7 +142,16 @@ from .indexing import (
143
142
  take,
144
143
  unravel_index,
145
144
  )
146
- from .merge import stack
145
+ from .merge import concatenate, stack, vstack
146
+ from .misc import (
147
+ atleast_1d,
148
+ atleast_2d,
149
+ atleast_3d,
150
+ broadcast_to,
151
+ transpose,
152
+ unique,
153
+ where,
154
+ )
147
155
  from .rechunk import rechunk
148
156
  from .reduction import (
149
157
  all,
@@ -23,6 +23,7 @@ from .core import TensorBinOp
23
23
 
24
24
  class TensorIsclose(TensorBinOp):
25
25
  _op_type_ = opcodes.ISCLOSE
26
+ _func_name = "isclose"
26
27
 
27
28
  rtol = Float64Field("rtol", default=None)
28
29
  atol = Float64Field("atol", default=None)
@@ -17,26 +17,13 @@
17
17
  import numpy as np
18
18
  import pytest
19
19
 
20
+ from maxframe.tensor.arithmetic.core import TensorBinOp, TensorUnaryOp
21
+ from maxframe.utils import collect_leaf_operators
22
+
20
23
  from ....core import enter_mode
21
24
  from ...core import SparseTensor, Tensor
22
25
  from ...datasource import array, empty, ones, tensor
23
- from .. import (
24
- TensorAdd,
25
- TensorGreaterThan,
26
- TensorIsclose,
27
- TensorLog,
28
- TensorSubtract,
29
- add,
30
- around,
31
- cos,
32
- frexp,
33
- isclose,
34
- isfinite,
35
- log,
36
- negative,
37
- subtract,
38
- truediv,
39
- )
26
+ from .. import * # noqa: F401
40
27
 
41
28
 
42
29
  def test_add():
@@ -412,3 +399,20 @@ def test_build_mode():
412
399
 
413
400
  with enter_mode(build=True):
414
401
  assert t1 != 2
402
+
403
+
404
+ def test_unary_op_func_name():
405
+ # make sure all the unary op has defined the func name.
406
+
407
+ results = collect_leaf_operators(TensorUnaryOp)
408
+ for op_type in results:
409
+ assert hasattr(op_type, "_func_name")
410
+
411
+
412
+ def test_binary_op_func_name():
413
+ # make sure all the binary op has defined the func name.
414
+
415
+ results = collect_leaf_operators(TensorBinOp)
416
+ for op_type in results:
417
+ if op_type not in (TensorSetImag, TensorSetReal):
418
+ assert hasattr(op_type, "_func_name")
maxframe/tensor/core.py CHANGED
@@ -23,8 +23,6 @@ from typing import Any, Dict
23
23
  import numpy as np
24
24
 
25
25
  from ..core import (
26
- Chunk,
27
- ChunkData,
28
26
  HasShapeTileable,
29
27
  HasShapeTileableData,
30
28
  OutputType,
@@ -36,14 +34,9 @@ from ..core.entity.utils import refresh_tileable_shape
36
34
  from ..serialization.serializables import (
37
35
  AnyField,
38
36
  DataTypeField,
39
- EnumField,
40
- FieldTypes,
41
- ListField,
42
37
  Serializable,
43
38
  StringField,
44
- TupleField,
45
39
  )
46
- from ..utils import on_deserialize_shape, on_serialize_shape, skip_na_call
47
40
  from .utils import fetch_corner_data, get_chunk_slices
48
41
 
49
42
  logger = logging.getLogger(__name__)
@@ -56,134 +49,18 @@ class TensorOrder(Enum):
56
49
  F_ORDER = "F"
57
50
 
58
51
 
59
- class TensorChunkData(ChunkData):
60
- __slots__ = ()
61
- _no_copy_attrs_ = ChunkData._no_copy_attrs_ | {"dtype"}
62
- type_name = "Tensor"
63
-
64
- # required fields
65
- _shape = TupleField(
66
- "shape",
67
- FieldTypes.int64,
68
- on_serialize=on_serialize_shape,
69
- on_deserialize=on_deserialize_shape,
70
- )
71
- _order = EnumField("order", TensorOrder, FieldTypes.string)
72
- # optional fields
73
- _dtype = DataTypeField("dtype")
74
-
75
- def __init__(self, op=None, index=None, shape=None, dtype=None, order=None, **kw):
76
- if isinstance(order, str):
77
- order = getattr(TensorOrder, order)
78
- super().__init__(
79
- _op=op, _index=index, _shape=shape, _dtype=dtype, _order=order, **kw
80
- )
81
- if self.order is None and self.op is not None:
82
- if len(self.inputs) == 0:
83
- self._order = TensorOrder.C_ORDER
84
- elif all(
85
- hasattr(inp, "order") and inp.order == TensorOrder.F_ORDER
86
- for inp in self.inputs
87
- ):
88
- self._order = TensorOrder.F_ORDER
89
- else:
90
- self._order = TensorOrder.C_ORDER
91
-
92
- @property
93
- def params(self) -> Dict[str, Any]:
94
- # params return the properties which useful to rebuild a new chunk
95
- return {
96
- "shape": self.shape,
97
- "dtype": self.dtype,
98
- "order": self.order,
99
- "index": self.index,
100
- }
101
-
102
- @params.setter
103
- def params(self, new_params: Dict[str, Any]):
104
- params = new_params.copy()
105
- params.pop("index", None) # index not needed to update
106
- new_shape = params.pop("shape", None)
107
- if new_shape is not None:
108
- self._shape = new_shape
109
- dtype = params.pop("dtype", None)
110
- if dtype is not None:
111
- self._dtype = dtype
112
- order = params.pop("order", None)
113
- if order is not None:
114
- self._order = order
115
- if params: # pragma: no cover
116
- raise TypeError(f"Unknown params: {list(params)}")
117
-
118
- @classmethod
119
- def get_params_from_data(cls, data: np.ndarray) -> Dict[str, Any]:
120
- from .array_utils import is_cupy
121
-
122
- if not is_cupy(data):
123
- data = np.asarray(data)
124
- order = (
125
- TensorOrder.C_ORDER if data.flags["C_CONTIGUOUS"] else TensorOrder.F_ORDER
126
- )
127
- return {"shape": data.shape, "dtype": data.dtype, "order": order}
128
-
129
- def __len__(self):
130
- try:
131
- return self.shape[0]
132
- except IndexError:
133
- if is_build_mode():
134
- return 0
135
- raise TypeError("len() of unsized object")
136
-
137
- @property
138
- def shape(self):
139
- return getattr(self, "_shape", None)
140
-
141
- @property
142
- def ndim(self):
143
- return len(self.shape)
144
-
145
- @property
146
- def size(self):
147
- return np.prod(self.shape).item()
148
-
149
- @property
150
- def dtype(self):
151
- return getattr(self, "_dtype", None) or self.op.dtype
152
-
153
- @property
154
- def order(self):
155
- return getattr(self, "_order", None)
156
-
157
- @property
158
- def nbytes(self):
159
- return np.prod(self.shape) * self.dtype.itemsize
160
-
161
-
162
- class TensorChunk(Chunk):
163
- __slots__ = ()
164
- _allow_data_type_ = (TensorChunkData,)
165
- type_name = "Tensor"
166
-
167
- def __len__(self):
168
- return len(self._data)
169
-
170
-
171
52
  class TensorData(HasShapeTileableData, _ExecuteAndFetchMixin):
172
53
  __slots__ = ()
173
54
  type_name = "Tensor"
174
55
 
56
+ _legacy_deprecated_non_primitives = ["_chunks"]
57
+
175
58
  # required fields
176
59
  _order = StringField(
177
60
  "order", on_serialize=attrgetter("value"), on_deserialize=TensorOrder
178
61
  )
179
62
  # optional fields
180
63
  _dtype = DataTypeField("dtype")
181
- _chunks = ListField(
182
- "chunks",
183
- FieldTypes.reference(TensorChunkData),
184
- on_serialize=skip_na_call(lambda x: [it.data for it in x]),
185
- on_deserialize=skip_na_call(lambda x: [TensorChunk(it) for it in x]),
186
- )
187
64
 
188
65
  def __init__(
189
66
  self,
@@ -318,7 +195,7 @@ class TensorData(HasShapeTileableData, _ExecuteAndFetchMixin):
318
195
  return fromsparse(self, fill_value=fill_value)
319
196
 
320
197
  def transpose(self, *axes):
321
- from .base import transpose
198
+ from .misc import transpose
322
199
 
323
200
  if len(axes) == 1 and isinstance(axes[0], Iterable):
324
201
  axes = axes[0]
@@ -346,11 +223,6 @@ class TensorData(HasShapeTileableData, _ExecuteAndFetchMixin):
346
223
 
347
224
  return reshape(self, shape, order=order)
348
225
 
349
- def totiledb(self, uri, ctx=None, key=None, timestamp=None):
350
- from .datastore import totiledb
351
-
352
- return totiledb(uri, self, ctx=ctx, key=key, timestamp=timestamp)
353
-
354
226
  @staticmethod
355
227
  def from_dataframe(in_df):
356
228
  from .datasource import from_dataframe
@@ -526,9 +398,6 @@ class Tensor(HasShapeTileable):
526
398
  """
527
399
  return self._data.T
528
400
 
529
- def totiledb(self, uri, ctx=None, key=None, timestamp=None):
530
- return self._data.totiledb(uri, ctx=ctx, key=key, timestamp=timestamp)
531
-
532
401
  def copy(self, order="C"):
533
402
  return super().copy().astype(self.dtype, order=order, copy=False)
534
403
 
@@ -589,7 +458,7 @@ class Tensor(HasShapeTileable):
589
458
  array([('c', 1), ('a', 2)],
590
459
  dtype=[('x', '|S1'), ('y', '<i4')])
591
460
  """
592
- from .base import sort
461
+ from .misc import sort
593
462
 
594
463
  self._data = sort(
595
464
  self,
@@ -651,7 +520,7 @@ class Tensor(HasShapeTileable):
651
520
  >>> a.execute()
652
521
  array([1, 2, 3, 4])
653
522
  """
654
- from .base import partition
523
+ from .misc import partition
655
524
 
656
525
  self._data = partition(self, kth, axis=axis, kind=kind, order=order, **kw).data
657
526
 
@@ -53,6 +53,9 @@ class ArrayDataSource(TensorNoInput):
53
53
 
54
54
  super().__init__(data=data, dtype=dtype, gpu=gpu, **kw)
55
55
 
56
+ def get_data(self):
57
+ return self.data
58
+
56
59
 
57
60
  class CSRMatrixDataSource(TensorNoInput):
58
61
  """
@@ -89,7 +89,7 @@ def full(shape, fill_value, dtype=None, chunk_size=None, gpu=None, order="C"):
89
89
  """
90
90
  v = np.asarray(fill_value)
91
91
  if len(v.shape) > 0:
92
- from ..base import broadcast_to
92
+ from ..misc import broadcast_to
93
93
 
94
94
  return broadcast_to(
95
95
  tensor(v, dtype=dtype, chunk_size=chunk_size, gpu=gpu, order=order), shape
@@ -141,7 +141,7 @@ def test_zeros():
141
141
 
142
142
 
143
143
  def test_data_source():
144
- from ...base.broadcast_to import TensorBroadcastTo
144
+ from ...misc.broadcast_to import TensorBroadcastTo
145
145
 
146
146
  data = np.random.random((10, 3))
147
147
  t = tensor(data, chunk_size=2)
@@ -55,6 +55,6 @@ def flatnonzero(a):
55
55
  >>> x.ravel()[mt.flatnonzero(x)].execute() # TODO(jisheng): accomplish this after fancy indexing is supported
56
56
 
57
57
  """
58
- from ..base import ravel
58
+ from ..misc import ravel
59
59
 
60
60
  return nonzero(ravel(a))[0]
@@ -12,4 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ from .concatenate import concatenate
15
16
  from .stack import stack
17
+ from .vstack import vstack
@@ -0,0 +1,98 @@
1
+ # Copyright 1999-2024 Alibaba Group Holding Ltd.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import numpy as np
15
+
16
+ from ... import opcodes
17
+ from ...serialization.serializables import Int32Field
18
+ from ..datasource import tensor as astensor
19
+ from ..operators import TensorOperator, TensorOperatorMixin
20
+ from ..utils import validate_axis
21
+
22
+
23
+ class TensorConcatenate(TensorOperator, TensorOperatorMixin):
24
+ _op_type_ = opcodes.CONCATENATE
25
+
26
+ axis = Int32Field("axis", default=0)
27
+
28
+ def __call__(self, tensors):
29
+ if len(set(t.ndim for t in tensors)) != 1:
30
+ raise ValueError(
31
+ "all the input tensors must have same number of dimensions"
32
+ )
33
+
34
+ axis = self.axis
35
+ shapes = [t.shape[:axis] + t.shape[axis + 1 :] for t in tensors]
36
+ if len(set(shapes)) != 1:
37
+ raise ValueError(
38
+ "all the input tensor dimensions "
39
+ "except for the concatenation axis must match exactly"
40
+ )
41
+
42
+ shape = [
43
+ 0 if i == axis else tensors[0].shape[i] for i in range(tensors[0].ndim)
44
+ ]
45
+ shape[axis] = sum(t.shape[axis] for t in tensors)
46
+
47
+ if any(np.isnan(s) for i, s in enumerate(shape) if i != axis):
48
+ raise ValueError("cannot concatenate tensor with unknown shape")
49
+
50
+ return self.new_tensor(tensors, shape=tuple(shape))
51
+
52
+
53
+ def concatenate(tensors, axis=0):
54
+ """
55
+ Join a sequence of arrays along an existing axis.
56
+
57
+ Parameters
58
+ ----------
59
+ a1, a2, ... : sequence of array_like
60
+ The tensors must have the same shape, except in the dimension
61
+ corresponding to `axis` (the first, by default).
62
+ axis : int, optional
63
+ The axis along which the tensors will be joined. Default is 0.
64
+
65
+ Returns
66
+ -------
67
+ res : Tensor
68
+ The concatenated tensor.
69
+
70
+ See Also
71
+ --------
72
+ stack : Stack a sequence of tensors along a new axis.
73
+ vstack : Stack tensors in sequence vertically (row wise)
74
+
75
+ Examples
76
+ --------
77
+ >>> import maxframe.tensor as mt
78
+
79
+ >>> a = mt.array([[1, 2], [3, 4]])
80
+ >>> b = mt.array([[5, 6]])
81
+ >>> mt.concatenate((a, b), axis=0).execute()
82
+ array([[1, 2],
83
+ [3, 4],
84
+ [5, 6]])
85
+ >>> mt.concatenate((a, b.T), axis=1).execute()
86
+ array([[1, 2, 5],
87
+ [3, 4, 6]])
88
+
89
+ """
90
+ if axis is None:
91
+ axis = 0
92
+ tensors = [astensor(t) for t in tensors]
93
+
94
+ axis = validate_axis(tensors[0].ndim, axis)
95
+ dtype = np.result_type(*(t.dtype for t in tensors))
96
+
97
+ op = TensorConcatenate(axis=axis, dtype=dtype)
98
+ return op(tensors)
@@ -18,7 +18,36 @@ import numpy as np
18
18
  import pytest
19
19
 
20
20
  from ...datasource import empty, ones
21
- from .. import stack
21
+ from .. import concatenate, stack
22
+
23
+
24
+ def test_concatenate():
25
+ a = ones((10, 20, 30), chunk_size=10)
26
+ b = ones((20, 20, 30), chunk_size=20)
27
+
28
+ c = concatenate([a, b])
29
+ assert c.shape == (30, 20, 30)
30
+
31
+ a = ones((10, 20, 30), chunk_size=10)
32
+ b = ones((10, 20, 40), chunk_size=20)
33
+
34
+ c = concatenate([a, b], axis=-1)
35
+ assert c.shape == (10, 20, 70)
36
+
37
+ with pytest.raises(ValueError):
38
+ a = ones((10, 20, 30), chunk_size=10)
39
+ b = ones((20, 30, 30), chunk_size=20)
40
+
41
+ concatenate([a, b])
42
+
43
+ with pytest.raises(ValueError):
44
+ a = ones((10, 20, 30), chunk_size=10)
45
+ b = ones((20, 20), chunk_size=20)
46
+
47
+ concatenate([a, b])
48
+
49
+ a = ones((10, 20, 30), chunk_size=5)
50
+ b = ones((20, 20, 30), chunk_size=10)
22
51
 
23
52
 
24
53
  def test_stack():
@@ -0,0 +1,70 @@
1
+ # Copyright 1999-2024 Alibaba Group Holding Ltd.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from ..misc import atleast_2d
17
+ from .concatenate import concatenate
18
+
19
+
20
+ def vstack(tup):
21
+ """
22
+ Stack tensors in sequence vertically (row wise).
23
+
24
+ This is equivalent to concatenation along the first axis after 1-D tensors
25
+ of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds tensors divided by
26
+ `vsplit`.
27
+
28
+ This function makes most sense for tensors with up to 3 dimensions. For
29
+ instance, for pixel-data with a height (first axis), width (second axis),
30
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
31
+ `block` provide more general stacking and concatenation operations.
32
+
33
+ Parameters
34
+ ----------
35
+ tup : sequence of tensors
36
+ The tensors must have the same shape along all but the first axis.
37
+ 1-D tensors must have the same length.
38
+
39
+ Returns
40
+ -------
41
+ stacked : Tensor
42
+ The tensor formed by stacking the given tensors, will be at least 2-D.
43
+
44
+ See Also
45
+ --------
46
+ stack : Join a sequence of tensors along a new axis.
47
+ concatenate : Join a sequence of tensors along an existing axis.
48
+
49
+ Examples
50
+ --------
51
+ >>> import mars.tensor as mt
52
+
53
+ >>> a = mt.array([1, 2, 3])
54
+ >>> b = mt.array([2, 3, 4])
55
+ >>> mt.vstack((a,b)).execute()
56
+ array([[1, 2, 3],
57
+ [2, 3, 4]])
58
+
59
+ >>> a = mt.array([[1], [2], [3]])
60
+ >>> b = mt.array([[2], [3], [4]])
61
+ >>> mt.vstack((a,b)).execute()
62
+ array([[1],
63
+ [2],
64
+ [3],
65
+ [2],
66
+ [3],
67
+ [4]])
68
+
69
+ """
70
+ return concatenate([atleast_2d(t) for t in tup], axis=0)
@@ -14,6 +14,8 @@
14
14
 
15
15
  from .astype import TensorAstype
16
16
  from .atleast_1d import atleast_1d
17
+ from .atleast_2d import atleast_2d
18
+ from .atleast_3d import atleast_3d
17
19
  from .broadcast_to import TensorBroadcastTo, broadcast_to
18
20
  from .ravel import ravel
19
21
  from .transpose import transpose
@@ -1,5 +1,3 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
1
  # Copyright 1999-2024 Alibaba Group Holding Ltd.
4
2
  #
5
3
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -0,0 +1,70 @@
1
+ # Copyright 1999-2024 Alibaba Group Holding Ltd.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+
17
+ from ...core import ExecutableTuple
18
+ from ..datasource import tensor as astensor
19
+
20
+
21
+ def atleast_2d(*tensors):
22
+ """
23
+ View inputs as tensors with at least two dimensions.
24
+
25
+ Parameters
26
+ ----------
27
+ tensors1, tensors2, ... : array_like
28
+ One or more array-like sequences. Non-tensor inputs are converted
29
+ to tensors. Tensors that already have two or more dimensions are
30
+ preserved.
31
+
32
+ Returns
33
+ -------
34
+ res, res2, ... : Tensor
35
+ A tensor, or list of tensors, each with ``a.ndim >= 2``.
36
+ Copies are avoided where possible, and views with two or more
37
+ dimensions are returned.
38
+
39
+ See Also
40
+ --------
41
+ atleast_1d, atleast_3d
42
+
43
+ Examples
44
+ --------
45
+ >>> import maxframe.tensor as mt
46
+
47
+ >>> mt.atleast_2d(3.0).execute()
48
+ array([[ 3.]])
49
+
50
+ >>> x = mt.arange(3.0)
51
+ >>> mt.atleast_2d(x).execute()
52
+ array([[ 0., 1., 2.]])
53
+
54
+ >>> mt.atleast_2d(1, [1, 2], [[1, 2]]).execute()
55
+ [array([[1]]), array([[1, 2]]), array([[1, 2]])]
56
+
57
+ """
58
+ new_tensors = []
59
+ for x in tensors:
60
+ x = astensor(x)
61
+ if x.ndim == 0:
62
+ x = x[np.newaxis, np.newaxis]
63
+ elif x.ndim == 1:
64
+ x = x[np.newaxis, :]
65
+
66
+ new_tensors.append(x)
67
+
68
+ if len(new_tensors) == 1:
69
+ return new_tensors[0]
70
+ return ExecutableTuple(new_tensors)