mlx-cluster 0.0.2__tar.gz → 0.0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/CMakeLists.txt +0 -1
  2. {mlx_cluster-0.0.2/mlx_cluster.egg-info → mlx_cluster-0.0.4}/PKG-INFO +6 -3
  3. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/mlx_cluster/_ext.cpython-311-darwin.so +0 -0
  4. mlx_cluster-0.0.4/mlx_cluster/libmlx.dylib +0 -0
  5. mlx_cluster-0.0.4/mlx_cluster/libmlx_cluster.dylib +0 -0
  6. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4/mlx_cluster.egg-info}/PKG-INFO +6 -3
  7. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/mlx_cluster.egg-info/SOURCES.txt +1 -0
  8. mlx_cluster-0.0.4/mlx_cluster.egg-info/requires.txt +9 -0
  9. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/pyproject.toml +12 -5
  10. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/random_walks/RandomWalk.cpp +2 -0
  11. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/setup.py +2 -2
  12. mlx_cluster-0.0.4/tests/test_random_walk.py +38 -0
  13. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/tests/test_rejection_sampling.py +1 -15
  14. mlx_cluster-0.0.2/mlx_cluster/libmlx_cluster.dylib +0 -0
  15. mlx_cluster-0.0.2/mlx_cluster.egg-info/requires.txt +0 -6
  16. mlx_cluster-0.0.2/tests/test_random_walk.py +0 -58
  17. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/LICENSE +0 -0
  18. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/MANIFEST.in +0 -0
  19. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/README.md +0 -0
  20. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/bindings.cpp +0 -0
  21. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/mlx_cluster/__init__.py +0 -0
  22. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/mlx_cluster/mlx_cluster.metallib +0 -0
  23. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/mlx_cluster.egg-info/dependency_links.txt +0 -0
  24. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/mlx_cluster.egg-info/not-zip-safe +0 -0
  25. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/mlx_cluster.egg-info/top_level.txt +0 -0
  26. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/random_walks/BiasedRandomWalk.cpp +0 -0
  27. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/random_walks/BiasedRandomWalk.h +0 -0
  28. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/random_walks/RandomWalk.h +0 -0
  29. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/random_walks/random_walk.metal +0 -0
  30. {mlx_cluster-0.0.2 → mlx_cluster-0.0.4}/setup.cfg +0 -0
@@ -9,7 +9,6 @@ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
9
9
  option(BUILD_SHARED_LIBS "Build extensions as a shared library" ON)
10
10
 
11
11
  # ----- Dependencies required ----
12
- find_package(fmt REQUIRED)
13
12
  find_package(MLX CONFIG REQUIRED)
14
13
  find_package(Python 3.8 COMPONENTS Interpreter Development.Module REQUIRED)
15
14
  execute_process(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mlx_cluster
3
- Version: 0.0.2
3
+ Version: 0.0.4
4
4
  Summary: C++ and Metal extensions for MLX CTC Loss
5
5
  Author-email: Vinay Pandya <vinayharshadpandya27@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/vinayhpandya/mlx_cluster
@@ -15,8 +15,11 @@ Description-Content-Type: text/markdown
15
15
  License-File: LICENSE
16
16
  Provides-Extra: dev
17
17
  Provides-Extra: test
18
- Requires-Dist: torch_geometric; extra == "test"
19
- Requires-Dist: pytest; extra == "test"
18
+ Requires-Dist: mlx_graphs==0.0.7; extra == "test"
19
+ Requires-Dist: torch==2.2.0; extra == "test"
20
+ Requires-Dist: mlx>=0.17.0; extra == "test"
21
+ Requires-Dist: pytest==7.4.4; extra == "test"
22
+ Requires-Dist: scipy==1.12.0; extra == "test"
20
23
 
21
24
  # mlx_cluster
22
25
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mlx_cluster
3
- Version: 0.0.2
3
+ Version: 0.0.4
4
4
  Summary: C++ and Metal extensions for MLX CTC Loss
5
5
  Author-email: Vinay Pandya <vinayharshadpandya27@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/vinayhpandya/mlx_cluster
@@ -15,8 +15,11 @@ Description-Content-Type: text/markdown
15
15
  License-File: LICENSE
16
16
  Provides-Extra: dev
17
17
  Provides-Extra: test
18
- Requires-Dist: torch_geometric; extra == "test"
19
- Requires-Dist: pytest; extra == "test"
18
+ Requires-Dist: mlx_graphs==0.0.7; extra == "test"
19
+ Requires-Dist: torch==2.2.0; extra == "test"
20
+ Requires-Dist: mlx>=0.17.0; extra == "test"
21
+ Requires-Dist: pytest==7.4.4; extra == "test"
22
+ Requires-Dist: scipy==1.12.0; extra == "test"
20
23
 
21
24
  # mlx_cluster
22
25
 
@@ -7,6 +7,7 @@ pyproject.toml
7
7
  setup.py
8
8
  mlx_cluster/__init__.py
9
9
  mlx_cluster/_ext.cpython-311-darwin.so
10
+ mlx_cluster/libmlx.dylib
10
11
  mlx_cluster/libmlx_cluster.dylib
11
12
  mlx_cluster/mlx_cluster.metallib
12
13
  mlx_cluster.egg-info/PKG-INFO
@@ -0,0 +1,9 @@
1
+
2
+ [dev]
3
+
4
+ [test]
5
+ mlx_graphs==0.0.7
6
+ torch==2.2.0
7
+ mlx>=0.17.0
8
+ pytest==7.4.4
9
+ scipy==1.12.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "mlx_cluster"
3
- version = "0.0.2"
3
+ version = "0.0.4"
4
4
  authors = [
5
5
  { name = "Vinay Pandya", email = "vinayharshadpandya27@gmail.com" },
6
6
  ]
@@ -17,7 +17,13 @@ classifiers = [
17
17
 
18
18
  [project.optional-dependencies]
19
19
  dev = []
20
-
20
+ test = [
21
+ "mlx_graphs==0.0.7",
22
+ "torch==2.2.0",
23
+ "mlx>=0.17.0",
24
+ "pytest==7.4.4",
25
+ "scipy==1.12.0",
26
+ ]
21
27
  [project.urls]
22
28
  Homepage = "https://github.com/vinayhpandya/mlx_cluster"
23
29
  Issues = "https://github.com/vinayhpandya/mlx_cluster/Issues"
@@ -27,8 +33,9 @@ Issues = "https://github.com/vinayhpandya/mlx_cluster/Issues"
27
33
  requires = [
28
34
  "setuptools>=42",
29
35
  "cmake>=3.24",
30
- "mlx==0.15.*",
36
+ "mlx==0.18.0",
31
37
  "nanobind@git+https://github.com/wjakob/nanobind.git@2f04eac452a6d9142dedb957701bdb20125561e4",
32
- "fmt"
33
38
  ]
34
- build-backend = "setuptools.build_meta"
39
+
40
+
41
+ build-backend = "setuptools.build_meta"
@@ -37,6 +37,7 @@ namespace mlx::core {
37
37
  auto* col_values = col.data<int64_t>();
38
38
  auto* rand_values = rand.data<float>();
39
39
 
40
+ std::cout<<"After evaluating outputs"<<std::endl;
40
41
  for (int64_t n = 0; n < numel; n++) {
41
42
  int64_t n_cur = start_values[n];
42
43
  n_out_ptr[n * (walk_length_ + 1)] = n_cur;
@@ -136,6 +137,7 @@ std::vector<std::vector<int>> RandomWalk::output_shapes(const std::vector<array>
136
137
 
137
138
  array random_walk(const array& rowptr, const array& col, const array& start, const array& rand, int walk_length, StreamOrDevice s)
138
139
  {
140
+ std::cout<<"Inside random walk"<<std::endl;
139
141
  int nodes = start.size();
140
142
  auto primitive = std::make_shared<RandomWalk>(walk_length, to_stream(s));
141
143
  return array::make_arrays({{nodes,walk_length+1},{nodes, walk_length}},
@@ -4,13 +4,13 @@ from mlx import extension
4
4
  if __name__ == "__main__":
5
5
  setup(
6
6
  name="mlx_cluster",
7
- version="0.0.2",
7
+ version="0.0.4",
8
8
  description="Sample C++ and Metal extensions for MLX primitives.",
9
9
  ext_modules=[extension.CMakeExtension("mlx_cluster._ext")],
10
10
  cmdclass={"build_ext": extension.CMakeBuild},
11
11
  packages=["mlx_cluster"],
12
12
  package_data={"mlx_cluster": ["*.so", "*.dylib", "*.metallib"]},
13
- extras_require={"dev": [], "test": ["torch_geometric", "pytest"]},
13
+ extras_require={"dev": []},
14
14
  zip_safe=False,
15
15
  python_requires=">=3.8",
16
16
  )
@@ -0,0 +1,38 @@
1
+ import mlx.core as mx
2
+ import numpy as np
3
+ import time
4
+
5
+ # Torch dataset
6
+ import torch
7
+ from torch.utils.data import DataLoader
8
+
9
+ loader = DataLoader(range(2708), batch_size=2000)
10
+ start_indices = next(iter(loader))
11
+
12
+
13
+ from mlx_graphs.datasets import PlanetoidDataset
14
+ from mlx_graphs.utils.sorting import sort_edge_index
15
+ from torch.utils.data import DataLoader
16
+ from mlx_cluster import random_walk
17
+
18
+ cora_dataset = PlanetoidDataset(name="cora", base_dir="~")
19
+ # For some reason int_64t and int_32t are not compatible
20
+ edge_index = cora_dataset.graphs[0].edge_index.astype(mx.int64)
21
+
22
+ # Convert edge index into a CSR matrix
23
+ sorted_edge_index = sort_edge_index(edge_index=edge_index)
24
+ row_mlx = sorted_edge_index[0][0]
25
+ col_mlx = sorted_edge_index[0][1]
26
+ _, counts_mlx = np.unique(np.array(row_mlx, copy=False), return_counts=True)
27
+ cum_sum_mlx = counts_mlx.cumsum()
28
+ row_ptr_mlx = mx.concatenate([mx.array([0]), mx.array(cum_sum_mlx)])
29
+ start_indices = mx.array(start_indices.numpy())
30
+
31
+ rand_data = mx.random.uniform(shape=[start_indices.shape[0], 5])
32
+ start_time = time.time()
33
+
34
+ node_sequence = random_walk(
35
+ row_ptr_mlx, col_mlx, start_indices, rand_data, 5, stream=mx.cpu
36
+ )
37
+ print("Time taken to complete 1000 random walks : ", time.time() - start_time)
38
+ print("MLX random walks are", node_sequence)
@@ -4,17 +4,8 @@ import time
4
4
 
5
5
  # Torch dataset
6
6
  import torch
7
- import torch_geometric.datasets as pyg_datasets
8
- from torch_geometric.utils import sort_edge_index
9
- from torch_geometric.utils.num_nodes import maybe_num_nodes
10
- from torch_geometric.utils.sparse import index2ptr
11
7
  from torch.utils.data import DataLoader
12
8
 
13
- torch_planetoid = pyg_datasets.Planetoid(root="data/Cora", name="Cora")
14
- edge_index_torch = torch_planetoid.edge_index
15
- num_nodes = maybe_num_nodes(edge_index=edge_index_torch)
16
- row, col = sort_edge_index(edge_index=edge_index_torch, num_nodes=num_nodes)
17
- row_ptr, col = index2ptr(row, num_nodes), col
18
9
  loader = DataLoader(range(2708), batch_size=2000)
19
10
  start_indices = next(iter(loader))
20
11
  # random_walks = torch.ops.torch_cluster.random_walk(
@@ -35,15 +26,10 @@ _, counts_mlx = np.unique(np.array(row_mlx, copy=False), return_counts=True)
35
26
  cum_sum_mlx = counts_mlx.cumsum()
36
27
  row_ptr_mlx = mx.concatenate([mx.array([0]), mx.array(cum_sum_mlx)])
37
28
  start_indices = mx.array(start_indices.numpy())
38
- print("row pointer datatype", row_ptr_mlx.dtype)
39
- print("col datatype", col_mlx.dtype)
40
- print("start pointer datatype", start_indices.dtype)
41
- assert mx.array_equal(row_ptr_mlx, mx.array(row_ptr.numpy())), "Arrays not equal"
42
- assert mx.array_equal(col_mlx, mx.array(col.numpy())), "Col arrays are not equal"
43
29
  rand_data = mx.random.uniform(shape=[start_indices.shape[0], 5])
44
30
  start_time = time.time()
45
31
  node_sequence = rejection_sampling(
46
32
  row_ptr_mlx, col_mlx, start_indices, 5, 1.0, 3.0, stream=mx.cpu
47
33
  )
48
- # print("Time taken to complete 1000 random walks : ", time.time() - start_time)
34
+ print("Time taken to complete 1000 random walks : ", time.time() - start_time)
49
35
  print(node_sequence)
@@ -1,6 +0,0 @@
1
-
2
- [dev]
3
-
4
- [test]
5
- torch_geometric
6
- pytest
@@ -1,58 +0,0 @@
1
- import mlx.core as mx
2
- import numpy as np
3
- import time
4
-
5
- # Torch dataset
6
- import torch
7
- import torch_geometric.datasets as pyg_datasets
8
- from torch_geometric.utils import sort_edge_index
9
- from torch_geometric.utils.num_nodes import maybe_num_nodes
10
- from torch_geometric.utils.sparse import index2ptr
11
- from torch.utils.data import DataLoader
12
-
13
- torch_planetoid = pyg_datasets.Planetoid(root="data/Cora", name="Cora")
14
- edge_index_torch = torch_planetoid.edge_index
15
- num_nodes = maybe_num_nodes(edge_index=edge_index_torch)
16
- row, col = sort_edge_index(edge_index=edge_index_torch, num_nodes=num_nodes)
17
- row_ptr, col = index2ptr(row, num_nodes), col
18
- loader = DataLoader(range(2708), batch_size=2000)
19
- start_indices = next(iter(loader))
20
- print(edge_index_torch.dtype)
21
- print(row_ptr.dtype)
22
- print(col.dtype)
23
- print(start_indices.dtype)
24
- random_walks = torch.ops.torch_cluster.random_walk(
25
- row_ptr, col, start_indices, 5, 1.0, 1.0
26
- )
27
-
28
- from mlx_graphs.datasets import PlanetoidDataset
29
- from mlx_graphs.utils.sorting import sort_edge_index
30
- from torch.utils.data import DataLoader
31
- from mlx_cluster import random_walk
32
-
33
- cora_dataset = PlanetoidDataset(name="cora", base_dir="~")
34
- edge_index = cora_dataset.graphs[0].edge_index.astype(mx.int64)
35
- sorted_edge_index = sort_edge_index(edge_index=edge_index)
36
- print(edge_index.dtype)
37
- row_mlx = sorted_edge_index[0][0]
38
- col_mlx = sorted_edge_index[0][1]
39
- _, counts_mlx = np.unique(np.array(row_mlx, copy=False), return_counts=True)
40
- cum_sum_mlx = counts_mlx.cumsum()
41
- row_ptr_mlx = mx.concatenate([mx.array([0]), mx.array(cum_sum_mlx)])
42
- start_indices = mx.array(start_indices.numpy())
43
- print("Start indices data type is ", start_indices.dtype)
44
- print("Col mlx data type is ", col_mlx.dtype)
45
- print("Row mlx data type is ", row_ptr_mlx.dtype)
46
- assert mx.array_equal(row_ptr_mlx, mx.array(row_ptr.numpy())), "Arrays not equal"
47
- assert mx.array_equal(col_mlx, mx.array(col.numpy())), "Col arrays are not equal"
48
- rand_data = mx.random.uniform(shape=[start_indices.shape[0], 5])
49
- start_time = time.time()
50
- print("Start indices data type is ", start_indices.dtype)
51
- print("Col mlx data type is ", col_mlx.dtype)
52
- print("Row mlx data type is ", row_ptr_mlx.dtype)
53
- node_sequence = random_walk(
54
- row_ptr_mlx, col_mlx, start_indices, rand_data, 5, stream=mx.gpu
55
- )
56
- # print("Time taken to complete 1000 random walks : ", time.time() - start_time)
57
- print("Torch random walks are", random_walks[0])
58
- print("MLX random walks are", node_sequence)
File without changes
File without changes
File without changes
File without changes
File without changes