maxframe 1.0.0rc2__cp39-cp39-macosx_10_9_universal2.whl → 1.0.0rc4__cp39-cp39-macosx_10_9_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of maxframe might be problematic. Click here for more details.

Files changed (134) hide show
  1. maxframe/_utils.cpython-39-darwin.so +0 -0
  2. maxframe/codegen.py +4 -2
  3. maxframe/config/config.py +28 -9
  4. maxframe/config/validators.py +42 -12
  5. maxframe/conftest.py +56 -14
  6. maxframe/core/__init__.py +2 -13
  7. maxframe/core/entity/__init__.py +0 -4
  8. maxframe/core/entity/executable.py +1 -1
  9. maxframe/core/entity/objects.py +45 -2
  10. maxframe/core/entity/output_types.py +0 -3
  11. maxframe/core/entity/tests/test_objects.py +43 -0
  12. maxframe/core/entity/tileables.py +5 -78
  13. maxframe/core/graph/__init__.py +2 -2
  14. maxframe/core/graph/builder/__init__.py +0 -1
  15. maxframe/core/graph/builder/base.py +5 -4
  16. maxframe/core/graph/builder/tileable.py +4 -4
  17. maxframe/core/graph/builder/utils.py +4 -8
  18. maxframe/core/graph/core.cpython-39-darwin.so +0 -0
  19. maxframe/core/graph/entity.py +9 -33
  20. maxframe/core/operator/__init__.py +2 -9
  21. maxframe/core/operator/base.py +3 -5
  22. maxframe/core/operator/objects.py +0 -9
  23. maxframe/core/operator/utils.py +55 -0
  24. maxframe/dataframe/arithmetic/docstring.py +26 -2
  25. maxframe/dataframe/arithmetic/equal.py +4 -2
  26. maxframe/dataframe/arithmetic/greater.py +4 -2
  27. maxframe/dataframe/arithmetic/greater_equal.py +4 -2
  28. maxframe/dataframe/arithmetic/less.py +2 -2
  29. maxframe/dataframe/arithmetic/less_equal.py +4 -2
  30. maxframe/dataframe/arithmetic/not_equal.py +4 -2
  31. maxframe/dataframe/core.py +2 -0
  32. maxframe/dataframe/datasource/read_odps_query.py +67 -8
  33. maxframe/dataframe/datasource/read_odps_table.py +4 -2
  34. maxframe/dataframe/datasource/tests/test_datasource.py +35 -6
  35. maxframe/dataframe/datastore/to_odps.py +8 -1
  36. maxframe/dataframe/extensions/__init__.py +3 -0
  37. maxframe/dataframe/extensions/flatmap.py +326 -0
  38. maxframe/dataframe/extensions/tests/test_extensions.py +62 -1
  39. maxframe/dataframe/indexing/add_prefix_suffix.py +1 -1
  40. maxframe/dataframe/indexing/rename.py +11 -0
  41. maxframe/dataframe/initializer.py +11 -1
  42. maxframe/dataframe/misc/drop_duplicates.py +18 -1
  43. maxframe/dataframe/operators.py +1 -17
  44. maxframe/dataframe/reduction/core.py +2 -2
  45. maxframe/dataframe/tests/test_initializer.py +33 -2
  46. maxframe/io/objects/__init__.py +24 -0
  47. maxframe/io/objects/core.py +140 -0
  48. maxframe/io/objects/tensor.py +76 -0
  49. maxframe/io/objects/tests/__init__.py +13 -0
  50. maxframe/io/objects/tests/test_object_io.py +97 -0
  51. maxframe/{odpsio → io/odpsio}/__init__.py +2 -0
  52. maxframe/{odpsio → io/odpsio}/arrow.py +4 -4
  53. maxframe/{odpsio → io/odpsio}/schema.py +10 -8
  54. maxframe/{odpsio → io/odpsio}/tableio.py +50 -38
  55. maxframe/io/odpsio/tests/__init__.py +13 -0
  56. maxframe/{odpsio → io/odpsio}/tests/test_schema.py +3 -7
  57. maxframe/{odpsio → io/odpsio}/tests/test_tableio.py +3 -3
  58. maxframe/{odpsio → io/odpsio}/tests/test_volumeio.py +4 -6
  59. maxframe/io/odpsio/volumeio.py +63 -0
  60. maxframe/learn/contrib/__init__.py +2 -1
  61. maxframe/learn/contrib/graph/__init__.py +15 -0
  62. maxframe/learn/contrib/graph/connected_components.py +215 -0
  63. maxframe/learn/contrib/graph/tests/__init__.py +13 -0
  64. maxframe/learn/contrib/graph/tests/test_connected_components.py +53 -0
  65. maxframe/learn/contrib/xgboost/classifier.py +26 -2
  66. maxframe/learn/contrib/xgboost/core.py +87 -2
  67. maxframe/learn/contrib/xgboost/dmatrix.py +1 -4
  68. maxframe/learn/contrib/xgboost/predict.py +27 -44
  69. maxframe/learn/contrib/xgboost/regressor.py +3 -10
  70. maxframe/learn/contrib/xgboost/train.py +27 -16
  71. maxframe/{core/operator/fuse.py → learn/core.py} +7 -10
  72. maxframe/lib/mmh3.cpython-39-darwin.so +0 -0
  73. maxframe/opcodes.py +3 -0
  74. maxframe/protocol.py +7 -16
  75. maxframe/remote/core.py +4 -8
  76. maxframe/serialization/__init__.py +1 -0
  77. maxframe/serialization/core.cpython-39-darwin.so +0 -0
  78. maxframe/session.py +9 -2
  79. maxframe/tensor/__init__.py +10 -2
  80. maxframe/tensor/arithmetic/isclose.py +1 -0
  81. maxframe/tensor/arithmetic/tests/test_arithmetic.py +21 -17
  82. maxframe/tensor/core.py +5 -136
  83. maxframe/tensor/datasource/array.py +3 -0
  84. maxframe/tensor/datasource/full.py +1 -1
  85. maxframe/tensor/datasource/tests/test_datasource.py +1 -1
  86. maxframe/tensor/indexing/flatnonzero.py +1 -1
  87. maxframe/tensor/indexing/getitem.py +2 -0
  88. maxframe/tensor/merge/__init__.py +2 -0
  89. maxframe/tensor/merge/concatenate.py +101 -0
  90. maxframe/tensor/merge/tests/test_merge.py +30 -1
  91. maxframe/tensor/merge/vstack.py +74 -0
  92. maxframe/tensor/{base → misc}/__init__.py +2 -0
  93. maxframe/tensor/{base → misc}/atleast_1d.py +0 -2
  94. maxframe/tensor/misc/atleast_2d.py +70 -0
  95. maxframe/tensor/misc/atleast_3d.py +85 -0
  96. maxframe/tensor/misc/tests/__init__.py +13 -0
  97. maxframe/tensor/{base → misc}/transpose.py +22 -18
  98. maxframe/tensor/operators.py +1 -7
  99. maxframe/tensor/random/core.py +1 -1
  100. maxframe/tensor/reduction/count_nonzero.py +1 -0
  101. maxframe/tensor/reduction/mean.py +1 -0
  102. maxframe/tensor/reduction/nanmean.py +1 -0
  103. maxframe/tensor/reduction/nanvar.py +2 -0
  104. maxframe/tensor/reduction/tests/test_reduction.py +12 -1
  105. maxframe/tensor/reduction/var.py +2 -0
  106. maxframe/tensor/utils.py +2 -22
  107. maxframe/typing_.py +4 -1
  108. maxframe/udf.py +8 -9
  109. maxframe/utils.py +49 -73
  110. maxframe-1.0.0rc4.dist-info/METADATA +104 -0
  111. {maxframe-1.0.0rc2.dist-info → maxframe-1.0.0rc4.dist-info}/RECORD +129 -114
  112. {maxframe-1.0.0rc2.dist-info → maxframe-1.0.0rc4.dist-info}/WHEEL +1 -1
  113. maxframe_client/fetcher.py +33 -50
  114. maxframe_client/session/consts.py +3 -0
  115. maxframe_client/session/graph.py +8 -2
  116. maxframe_client/session/odps.py +134 -27
  117. maxframe_client/session/task.py +58 -20
  118. maxframe_client/tests/test_fetcher.py +1 -1
  119. maxframe_client/tests/test_session.py +27 -3
  120. maxframe/core/entity/chunks.py +0 -68
  121. maxframe/core/entity/fuse.py +0 -73
  122. maxframe/core/graph/builder/chunk.py +0 -430
  123. maxframe/odpsio/volumeio.py +0 -95
  124. maxframe-1.0.0rc2.dist-info/METADATA +0 -177
  125. /maxframe/{odpsio → core/entity}/tests/__init__.py +0 -0
  126. /maxframe/{tensor/base/tests → io}/__init__.py +0 -0
  127. /maxframe/{odpsio → io/odpsio}/tests/test_arrow.py +0 -0
  128. /maxframe/tensor/{base → misc}/astype.py +0 -0
  129. /maxframe/tensor/{base → misc}/broadcast_to.py +0 -0
  130. /maxframe/tensor/{base → misc}/ravel.py +0 -0
  131. /maxframe/tensor/{base/tests/test_base.py → misc/tests/test_misc.py} +0 -0
  132. /maxframe/tensor/{base → misc}/unique.py +0 -0
  133. /maxframe/tensor/{base → misc}/where.py +0 -0
  134. {maxframe-1.0.0rc2.dist-info → maxframe-1.0.0rc4.dist-info}/top_level.txt +0 -0
maxframe/tensor/core.py CHANGED
@@ -23,8 +23,6 @@ from typing import Any, Dict
23
23
  import numpy as np
24
24
 
25
25
  from ..core import (
26
- Chunk,
27
- ChunkData,
28
26
  HasShapeTileable,
29
27
  HasShapeTileableData,
30
28
  OutputType,
@@ -36,14 +34,9 @@ from ..core.entity.utils import refresh_tileable_shape
36
34
  from ..serialization.serializables import (
37
35
  AnyField,
38
36
  DataTypeField,
39
- EnumField,
40
- FieldTypes,
41
- ListField,
42
37
  Serializable,
43
38
  StringField,
44
- TupleField,
45
39
  )
46
- from ..utils import on_deserialize_shape, on_serialize_shape, skip_na_call
47
40
  from .utils import fetch_corner_data, get_chunk_slices
48
41
 
49
42
  logger = logging.getLogger(__name__)
@@ -56,134 +49,18 @@ class TensorOrder(Enum):
56
49
  F_ORDER = "F"
57
50
 
58
51
 
59
- class TensorChunkData(ChunkData):
60
- __slots__ = ()
61
- _no_copy_attrs_ = ChunkData._no_copy_attrs_ | {"dtype"}
62
- type_name = "Tensor"
63
-
64
- # required fields
65
- _shape = TupleField(
66
- "shape",
67
- FieldTypes.int64,
68
- on_serialize=on_serialize_shape,
69
- on_deserialize=on_deserialize_shape,
70
- )
71
- _order = EnumField("order", TensorOrder, FieldTypes.string)
72
- # optional fields
73
- _dtype = DataTypeField("dtype")
74
-
75
- def __init__(self, op=None, index=None, shape=None, dtype=None, order=None, **kw):
76
- if isinstance(order, str):
77
- order = getattr(TensorOrder, order)
78
- super().__init__(
79
- _op=op, _index=index, _shape=shape, _dtype=dtype, _order=order, **kw
80
- )
81
- if self.order is None and self.op is not None:
82
- if len(self.inputs) == 0:
83
- self._order = TensorOrder.C_ORDER
84
- elif all(
85
- hasattr(inp, "order") and inp.order == TensorOrder.F_ORDER
86
- for inp in self.inputs
87
- ):
88
- self._order = TensorOrder.F_ORDER
89
- else:
90
- self._order = TensorOrder.C_ORDER
91
-
92
- @property
93
- def params(self) -> Dict[str, Any]:
94
- # params return the properties which useful to rebuild a new chunk
95
- return {
96
- "shape": self.shape,
97
- "dtype": self.dtype,
98
- "order": self.order,
99
- "index": self.index,
100
- }
101
-
102
- @params.setter
103
- def params(self, new_params: Dict[str, Any]):
104
- params = new_params.copy()
105
- params.pop("index", None) # index not needed to update
106
- new_shape = params.pop("shape", None)
107
- if new_shape is not None:
108
- self._shape = new_shape
109
- dtype = params.pop("dtype", None)
110
- if dtype is not None:
111
- self._dtype = dtype
112
- order = params.pop("order", None)
113
- if order is not None:
114
- self._order = order
115
- if params: # pragma: no cover
116
- raise TypeError(f"Unknown params: {list(params)}")
117
-
118
- @classmethod
119
- def get_params_from_data(cls, data: np.ndarray) -> Dict[str, Any]:
120
- from .array_utils import is_cupy
121
-
122
- if not is_cupy(data):
123
- data = np.asarray(data)
124
- order = (
125
- TensorOrder.C_ORDER if data.flags["C_CONTIGUOUS"] else TensorOrder.F_ORDER
126
- )
127
- return {"shape": data.shape, "dtype": data.dtype, "order": order}
128
-
129
- def __len__(self):
130
- try:
131
- return self.shape[0]
132
- except IndexError:
133
- if is_build_mode():
134
- return 0
135
- raise TypeError("len() of unsized object")
136
-
137
- @property
138
- def shape(self):
139
- return getattr(self, "_shape", None)
140
-
141
- @property
142
- def ndim(self):
143
- return len(self.shape)
144
-
145
- @property
146
- def size(self):
147
- return np.prod(self.shape).item()
148
-
149
- @property
150
- def dtype(self):
151
- return getattr(self, "_dtype", None) or self.op.dtype
152
-
153
- @property
154
- def order(self):
155
- return getattr(self, "_order", None)
156
-
157
- @property
158
- def nbytes(self):
159
- return np.prod(self.shape) * self.dtype.itemsize
160
-
161
-
162
- class TensorChunk(Chunk):
163
- __slots__ = ()
164
- _allow_data_type_ = (TensorChunkData,)
165
- type_name = "Tensor"
166
-
167
- def __len__(self):
168
- return len(self._data)
169
-
170
-
171
52
  class TensorData(HasShapeTileableData, _ExecuteAndFetchMixin):
172
53
  __slots__ = ()
173
54
  type_name = "Tensor"
174
55
 
56
+ _legacy_deprecated_non_primitives = ["_chunks"]
57
+
175
58
  # required fields
176
59
  _order = StringField(
177
60
  "order", on_serialize=attrgetter("value"), on_deserialize=TensorOrder
178
61
  )
179
62
  # optional fields
180
63
  _dtype = DataTypeField("dtype")
181
- _chunks = ListField(
182
- "chunks",
183
- FieldTypes.reference(TensorChunkData),
184
- on_serialize=skip_na_call(lambda x: [it.data for it in x]),
185
- on_deserialize=skip_na_call(lambda x: [TensorChunk(it) for it in x]),
186
- )
187
64
 
188
65
  def __init__(
189
66
  self,
@@ -318,7 +195,7 @@ class TensorData(HasShapeTileableData, _ExecuteAndFetchMixin):
318
195
  return fromsparse(self, fill_value=fill_value)
319
196
 
320
197
  def transpose(self, *axes):
321
- from .base import transpose
198
+ from .misc import transpose
322
199
 
323
200
  if len(axes) == 1 and isinstance(axes[0], Iterable):
324
201
  axes = axes[0]
@@ -346,11 +223,6 @@ class TensorData(HasShapeTileableData, _ExecuteAndFetchMixin):
346
223
 
347
224
  return reshape(self, shape, order=order)
348
225
 
349
- def totiledb(self, uri, ctx=None, key=None, timestamp=None):
350
- from .datastore import totiledb
351
-
352
- return totiledb(uri, self, ctx=ctx, key=key, timestamp=timestamp)
353
-
354
226
  @staticmethod
355
227
  def from_dataframe(in_df):
356
228
  from .datasource import from_dataframe
@@ -526,9 +398,6 @@ class Tensor(HasShapeTileable):
526
398
  """
527
399
  return self._data.T
528
400
 
529
- def totiledb(self, uri, ctx=None, key=None, timestamp=None):
530
- return self._data.totiledb(uri, ctx=ctx, key=key, timestamp=timestamp)
531
-
532
401
  def copy(self, order="C"):
533
402
  return super().copy().astype(self.dtype, order=order, copy=False)
534
403
 
@@ -589,7 +458,7 @@ class Tensor(HasShapeTileable):
589
458
  array([('c', 1), ('a', 2)],
590
459
  dtype=[('x', '|S1'), ('y', '<i4')])
591
460
  """
592
- from .base import sort
461
+ from .misc import sort
593
462
 
594
463
  self._data = sort(
595
464
  self,
@@ -651,7 +520,7 @@ class Tensor(HasShapeTileable):
651
520
  >>> a.execute()
652
521
  array([1, 2, 3, 4])
653
522
  """
654
- from .base import partition
523
+ from .misc import partition
655
524
 
656
525
  self._data = partition(self, kth, axis=axis, kind=kind, order=order, **kw).data
657
526
 
@@ -53,6 +53,9 @@ class ArrayDataSource(TensorNoInput):
53
53
 
54
54
  super().__init__(data=data, dtype=dtype, gpu=gpu, **kw)
55
55
 
56
+ def get_data(self):
57
+ return self.data
58
+
56
59
 
57
60
  class CSRMatrixDataSource(TensorNoInput):
58
61
  """
@@ -89,7 +89,7 @@ def full(shape, fill_value, dtype=None, chunk_size=None, gpu=None, order="C"):
89
89
  """
90
90
  v = np.asarray(fill_value)
91
91
  if len(v.shape) > 0:
92
- from ..base import broadcast_to
92
+ from ..misc import broadcast_to
93
93
 
94
94
  return broadcast_to(
95
95
  tensor(v, dtype=dtype, chunk_size=chunk_size, gpu=gpu, order=order), shape
@@ -141,7 +141,7 @@ def test_zeros():
141
141
 
142
142
 
143
143
  def test_data_source():
144
- from ...base.broadcast_to import TensorBroadcastTo
144
+ from ...misc.broadcast_to import TensorBroadcastTo
145
145
 
146
146
  data = np.random.random((10, 3))
147
147
  t = tensor(data, chunk_size=2)
@@ -55,6 +55,6 @@ def flatnonzero(a):
55
55
  >>> x.ravel()[mt.flatnonzero(x)].execute() # TODO(jisheng): accomplish this after fancy indexing is supported
56
56
 
57
57
  """
58
- from ..base import ravel
58
+ from ..misc import ravel
59
59
 
60
60
  return nonzero(ravel(a))[0]
@@ -130,6 +130,8 @@ def _calc_order(a, index):
130
130
  continue
131
131
  elif isinstance(ind, slice):
132
132
  shape = a.shape[in_axis]
133
+ if shape is np.nan:
134
+ return TensorOrder.C_ORDER
133
135
  slc = ind.indices(shape)
134
136
  if slc[0] == 0 and slc[1] == shape and slc[2] == 1:
135
137
  continue
@@ -12,4 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ from .concatenate import concatenate
15
16
  from .stack import stack
17
+ from .vstack import vstack
@@ -0,0 +1,101 @@
1
+ # Copyright 1999-2024 Alibaba Group Holding Ltd.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import numpy as np
15
+
16
+ from ... import opcodes
17
+ from ...serialization.serializables import Int32Field
18
+ from ..datasource import tensor as astensor
19
+ from ..operators import TensorOperator, TensorOperatorMixin
20
+ from ..utils import validate_axis
21
+
22
+
23
+ class TensorConcatenate(TensorOperator, TensorOperatorMixin):
24
+ _op_type_ = opcodes.CONCATENATE
25
+
26
+ axis = Int32Field("axis", default=0)
27
+
28
+ def __call__(self, tensors):
29
+ axis = self.axis
30
+ shape = _calc_concatenate_shape(tensors, axis)
31
+ shape[axis] = sum(t.shape[axis] for t in tensors)
32
+ return self.new_tensor(tensors, shape=tuple(shape))
33
+
34
+
35
+ def concatenate(tensors, axis=0):
36
+ """
37
+ Join a sequence of arrays along an existing axis.
38
+
39
+ Parameters
40
+ ----------
41
+ a1, a2, ... : sequence of array_like
42
+ The tensors must have the same shape, except in the dimension
43
+ corresponding to `axis` (the first, by default).
44
+ axis : int, optional
45
+ The axis along which the tensors will be joined. Default is 0.
46
+
47
+ Returns
48
+ -------
49
+ res : Tensor
50
+ The concatenated tensor.
51
+
52
+ See Also
53
+ --------
54
+ stack : Stack a sequence of tensors along a new axis.
55
+ vstack : Stack tensors in sequence vertically (row wise)
56
+
57
+ Examples
58
+ --------
59
+ >>> import maxframe.tensor as mt
60
+
61
+ >>> a = mt.array([[1, 2], [3, 4]])
62
+ >>> b = mt.array([[5, 6]])
63
+ >>> mt.concatenate((a, b), axis=0).execute()
64
+ array([[1, 2],
65
+ [3, 4],
66
+ [5, 6]])
67
+ >>> mt.concatenate((a, b.T), axis=1).execute()
68
+ array([[1, 2, 5],
69
+ [3, 4, 6]])
70
+
71
+ """
72
+ if axis is None:
73
+ axis = 0
74
+ tensors = [astensor(t) for t in tensors]
75
+ axis = validate_axis(tensors[0].ndim, axis)
76
+
77
+ if len(set(t.ndim for t in tensors)) != 1:
78
+ raise ValueError("all the input tensors must have same number of dimensions")
79
+
80
+ shapes = [t.shape[:axis] + t.shape[axis + 1 :] for t in tensors]
81
+ if len(set(shapes)) != 1:
82
+ raise ValueError(
83
+ "all the input tensor dimensions "
84
+ "except for the concatenation axis must match exactly"
85
+ )
86
+ shape = _calc_concatenate_shape(tensors, axis)
87
+ if any(np.isnan(s) for i, s in enumerate(shape) if i != axis):
88
+ raise ValueError("cannot concatenate tensor with unknown shape")
89
+
90
+ return _concatenate(tensors, axis)
91
+
92
+
93
+ def _concatenate(tensors, axis=0):
94
+ dtype = np.result_type(*(t.dtype for t in tensors))
95
+
96
+ op = TensorConcatenate(axis=axis, dtype=dtype)
97
+ return op(tensors)
98
+
99
+
100
+ def _calc_concatenate_shape(tensors, axis):
101
+ return [0 if i == axis else tensors[0].shape[i] for i in range(tensors[0].ndim)]
@@ -18,7 +18,36 @@ import numpy as np
18
18
  import pytest
19
19
 
20
20
  from ...datasource import empty, ones
21
- from .. import stack
21
+ from .. import concatenate, stack
22
+
23
+
24
+ def test_concatenate():
25
+ a = ones((10, 20, 30), chunk_size=10)
26
+ b = ones((20, 20, 30), chunk_size=20)
27
+
28
+ c = concatenate([a, b])
29
+ assert c.shape == (30, 20, 30)
30
+
31
+ a = ones((10, 20, 30), chunk_size=10)
32
+ b = ones((10, 20, 40), chunk_size=20)
33
+
34
+ c = concatenate([a, b], axis=-1)
35
+ assert c.shape == (10, 20, 70)
36
+
37
+ with pytest.raises(ValueError):
38
+ a = ones((10, 20, 30), chunk_size=10)
39
+ b = ones((20, 30, 30), chunk_size=20)
40
+
41
+ concatenate([a, b])
42
+
43
+ with pytest.raises(ValueError):
44
+ a = ones((10, 20, 30), chunk_size=10)
45
+ b = ones((20, 20), chunk_size=20)
46
+
47
+ concatenate([a, b])
48
+
49
+ a = ones((10, 20, 30), chunk_size=5)
50
+ b = ones((20, 20, 30), chunk_size=10)
22
51
 
23
52
 
24
53
  def test_stack():
@@ -0,0 +1,74 @@
1
+ # Copyright 1999-2024 Alibaba Group Holding Ltd.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from ..misc import atleast_2d
17
+ from .concatenate import _concatenate, concatenate
18
+
19
+
20
+ def vstack(tup):
21
+ """
22
+ Stack tensors in sequence vertically (row wise).
23
+
24
+ This is equivalent to concatenation along the first axis after 1-D tensors
25
+ of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds tensors divided by
26
+ `vsplit`.
27
+
28
+ This function makes most sense for tensors with up to 3 dimensions. For
29
+ instance, for pixel-data with a height (first axis), width (second axis),
30
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
31
+ `block` provide more general stacking and concatenation operations.
32
+
33
+ Parameters
34
+ ----------
35
+ tup : sequence of tensors
36
+ The tensors must have the same shape along all but the first axis.
37
+ 1-D tensors must have the same length.
38
+
39
+ Returns
40
+ -------
41
+ stacked : Tensor
42
+ The tensor formed by stacking the given tensors, will be at least 2-D.
43
+
44
+ See Also
45
+ --------
46
+ stack : Join a sequence of tensors along a new axis.
47
+ concatenate : Join a sequence of tensors along an existing axis.
48
+
49
+ Examples
50
+ --------
51
+ >>> import mars.tensor as mt
52
+
53
+ >>> a = mt.array([1, 2, 3])
54
+ >>> b = mt.array([2, 3, 4])
55
+ >>> mt.vstack((a,b)).execute()
56
+ array([[1, 2, 3],
57
+ [2, 3, 4]])
58
+
59
+ >>> a = mt.array([[1], [2], [3]])
60
+ >>> b = mt.array([[2], [3], [4]])
61
+ >>> mt.vstack((a,b)).execute()
62
+ array([[1],
63
+ [2],
64
+ [3],
65
+ [2],
66
+ [3],
67
+ [4]])
68
+
69
+ """
70
+ return concatenate([atleast_2d(t) for t in tup], axis=0)
71
+
72
+
73
+ def _vstack(tup):
74
+ return _concatenate([atleast_2d(t) for t in tup], axis=0)
@@ -14,6 +14,8 @@
14
14
 
15
15
  from .astype import TensorAstype
16
16
  from .atleast_1d import atleast_1d
17
+ from .atleast_2d import atleast_2d
18
+ from .atleast_3d import atleast_3d
17
19
  from .broadcast_to import TensorBroadcastTo, broadcast_to
18
20
  from .ravel import ravel
19
21
  from .transpose import transpose
@@ -1,5 +1,3 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
1
  # Copyright 1999-2024 Alibaba Group Holding Ltd.
4
2
  #
5
3
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -0,0 +1,70 @@
1
+ # Copyright 1999-2024 Alibaba Group Holding Ltd.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+
17
+ from ...core import ExecutableTuple
18
+ from ..datasource import tensor as astensor
19
+
20
+
21
+ def atleast_2d(*tensors):
22
+ """
23
+ View inputs as tensors with at least two dimensions.
24
+
25
+ Parameters
26
+ ----------
27
+ tensors1, tensors2, ... : array_like
28
+ One or more array-like sequences. Non-tensor inputs are converted
29
+ to tensors. Tensors that already have two or more dimensions are
30
+ preserved.
31
+
32
+ Returns
33
+ -------
34
+ res, res2, ... : Tensor
35
+ A tensor, or list of tensors, each with ``a.ndim >= 2``.
36
+ Copies are avoided where possible, and views with two or more
37
+ dimensions are returned.
38
+
39
+ See Also
40
+ --------
41
+ atleast_1d, atleast_3d
42
+
43
+ Examples
44
+ --------
45
+ >>> import maxframe.tensor as mt
46
+
47
+ >>> mt.atleast_2d(3.0).execute()
48
+ array([[ 3.]])
49
+
50
+ >>> x = mt.arange(3.0)
51
+ >>> mt.atleast_2d(x).execute()
52
+ array([[ 0., 1., 2.]])
53
+
54
+ >>> mt.atleast_2d(1, [1, 2], [[1, 2]]).execute()
55
+ [array([[1]]), array([[1, 2]]), array([[1, 2]])]
56
+
57
+ """
58
+ new_tensors = []
59
+ for x in tensors:
60
+ x = astensor(x)
61
+ if x.ndim == 0:
62
+ x = x[np.newaxis, np.newaxis]
63
+ elif x.ndim == 1:
64
+ x = x[np.newaxis, :]
65
+
66
+ new_tensors.append(x)
67
+
68
+ if len(new_tensors) == 1:
69
+ return new_tensors[0]
70
+ return ExecutableTuple(new_tensors)
@@ -0,0 +1,85 @@
1
+ # Copyright 1999-2024 Alibaba Group Holding Ltd.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import numpy as np
17
+
18
+ from ...core import ExecutableTuple
19
+ from ..datasource import tensor as astensor
20
+
21
+
22
+ def atleast_3d(*tensors):
23
+ """
24
+ View inputs as tensors with at least three dimensions.
25
+
26
+ Parameters
27
+ ----------
28
+ tensors1, tensors2, ... : array_like
29
+ One or more tensor-like sequences. Non-tensor inputs are converted to
30
+ tensors. Tensors that already have three or more dimensions are
31
+ preserved.
32
+
33
+ Returns
34
+ -------
35
+ res1, res2, ... : Tensor
36
+ A tensor, or list of tensors, each with ``a.ndim >= 3``. Copies are
37
+ avoided where possible, and views with three or more dimensions are
38
+ returned. For example, a 1-D tensor of shape ``(N,)`` becomes a view
39
+ of shape ``(1, N, 1)``, and a 2-D tensor of shape ``(M, N)`` becomes a
40
+ view of shape ``(M, N, 1)``.
41
+
42
+ See Also
43
+ --------
44
+ atleast_1d, atleast_2d
45
+
46
+ Examples
47
+ --------
48
+ >>> import maxframe.tensor as mt
49
+
50
+ >>> mt.atleast_3d(3.0).execute()
51
+ array([[[ 3.]]])
52
+
53
+ >>> x = mt.arange(3.0)
54
+ >>> mt.atleast_3d(x).shape
55
+ (1, 3, 1)
56
+
57
+ >>> x = mt.arange(12.0).reshape(4,3)
58
+ >>> mt.atleast_3d(x).shape
59
+ (4, 3, 1)
60
+
61
+ >>> for arr in mt.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]).execute():
62
+ ... print(arr, arr.shape)
63
+ ...
64
+ [[[1]
65
+ [2]]] (1, 2, 1)
66
+ [[[1]
67
+ [2]]] (1, 2, 1)
68
+ [[[1 2]]] (1, 1, 2)
69
+
70
+ """
71
+ new_tensors = []
72
+ for x in tensors:
73
+ x = astensor(x)
74
+ if x.ndim == 0:
75
+ x = x[np.newaxis, np.newaxis, np.newaxis]
76
+ elif x.ndim == 1:
77
+ x = x[np.newaxis, :, np.newaxis]
78
+ elif x.ndim == 2:
79
+ x = x[:, :, None]
80
+
81
+ new_tensors.append(x)
82
+
83
+ if len(new_tensors) == 1:
84
+ return new_tensors[0]
85
+ return ExecutableTuple(new_tensors)
@@ -0,0 +1,13 @@
1
+ # Copyright 1999-2024 Alibaba Group Holding Ltd.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.