dummy-spatialdata 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dummy_spatialdata/__init__.py +19 -0
- dummy_spatialdata/examples/bird-color.png +0 -0
- dummy_spatialdata/examples/bird.png +0 -0
- dummy_spatialdata/examples/nuclei.tif +0 -0
- dummy_spatialdata/generate_dataset.py +88 -0
- dummy_spatialdata/generate_imagemodel.py +68 -0
- dummy_spatialdata/generate_labelmodel.py +39 -0
- dummy_spatialdata/generate_shapemodel.py +75 -0
- dummy_spatialdata/generate_tablemodel.py +30 -0
- dummy_spatialdata/generate_transformations.py +48 -0
- dummy_spatialdata-0.1.0.dist-info/METADATA +89 -0
- dummy_spatialdata-0.1.0.dist-info/RECORD +13 -0
- dummy_spatialdata-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from importlib.metadata import version
|
|
2
|
+
|
|
3
|
+
from .generate_dataset import generate_dataset
|
|
4
|
+
from .generate_imagemodel import generate_imagemodel
|
|
5
|
+
from .generate_labelmodel import generate_labelmodel
|
|
6
|
+
from .generate_shapemodel import generate_shapemodel
|
|
7
|
+
from .generate_tablemodel import generate_tablemodel
|
|
8
|
+
from .generate_transformations import generate_transformations
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"generate_dataset",
|
|
12
|
+
"generate_imagemodel",
|
|
13
|
+
"generate_labelmodel",
|
|
14
|
+
"generate_shapemodel",
|
|
15
|
+
"generate_tablemodel",
|
|
16
|
+
"generate_transformations"
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
__version__ = version("dummy-spatialdata")
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import numpy as np
|
|
4
|
+
import spatialdata as sd
|
|
5
|
+
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from importlib.resources import files, as_file
|
|
8
|
+
from PIL import Image
|
|
9
|
+
from typing import Optional
|
|
10
|
+
from .generate_imagemodel import generate_imagemodel
|
|
11
|
+
from .generate_labelmodel import generate_labelmodel
|
|
12
|
+
from .generate_shapemodel import generate_shapemodel
|
|
13
|
+
from .generate_tablemodel import generate_tablemodel
|
|
14
|
+
from .generate_transformations import generate_transformations
|
|
15
|
+
from spatialdata.models import TableModel
|
|
16
|
+
|
|
17
|
+
def generate_dataset(
|
|
18
|
+
images: Optional[list] = None,
|
|
19
|
+
labels: Optional[list] = None,
|
|
20
|
+
shapes: Optional[list] = None,
|
|
21
|
+
tables: Optional[list] = None,
|
|
22
|
+
SEED: Optional[int] = 42
|
|
23
|
+
) -> sd.SpatialData:
|
|
24
|
+
"""Generate a dummy SpatialData object with specified elements.
|
|
25
|
+
|
|
26
|
+
Parameters
|
|
27
|
+
----------
|
|
28
|
+
images: dict, optional
|
|
29
|
+
A dictionary specifying the type and number of layers for the image data.
|
|
30
|
+
Example: {"type": "rgb", "n_layers": 4} or {"type": "grayscale", "n_layers": 4}
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
sd.SpatialData
|
|
35
|
+
A SpatialData object populated with random data according to the specified parameters.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
# image model
|
|
39
|
+
if images is None:
|
|
40
|
+
images = {}
|
|
41
|
+
else:
|
|
42
|
+
images = [generate_imagemodel(img) for img in images]
|
|
43
|
+
keys = [f"image_{i}" for i in range(len(images))]
|
|
44
|
+
images = {key: img for key, img in zip(keys, images)}
|
|
45
|
+
|
|
46
|
+
# label model
|
|
47
|
+
if labels is None:
|
|
48
|
+
labels = {}
|
|
49
|
+
else:
|
|
50
|
+
labels = [generate_labelmodel(lbl) for lbl in labels]
|
|
51
|
+
keys = [f"label_{i}" for i in range(len(labels))]
|
|
52
|
+
labels = {key: lbl for key, lbl in zip(keys, labels)}
|
|
53
|
+
|
|
54
|
+
# shape model
|
|
55
|
+
if shapes is None:
|
|
56
|
+
shapes = {}
|
|
57
|
+
else:
|
|
58
|
+
shapes = [generate_shapemodel(shp, SEED) for shp in shapes]
|
|
59
|
+
keys = [f"shape_{i}" for i in range(len(shapes))]
|
|
60
|
+
shapes = {key: shp for key, shp in zip(keys, shapes)}
|
|
61
|
+
|
|
62
|
+
# tables
|
|
63
|
+
if tables is None:
|
|
64
|
+
tables = {}
|
|
65
|
+
else:
|
|
66
|
+
tables = [generate_tablemodel(tbl) for tbl in tables]
|
|
67
|
+
keys = [f"table_{i}" for i in range(len(tables))]
|
|
68
|
+
tables = {key: tbl for key, tbl in zip(keys, tables)}
|
|
69
|
+
|
|
70
|
+
# create a SpatialData object and add the image data
|
|
71
|
+
sdata = sd.SpatialData(
|
|
72
|
+
images=images,
|
|
73
|
+
labels=labels,
|
|
74
|
+
shapes=shapes,
|
|
75
|
+
tables=tables,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# map shapes to tables
|
|
79
|
+
if tables is not {}:
|
|
80
|
+
for tbl in tables.values():
|
|
81
|
+
region = tbl.uns["spatialdata_attrs"]["region"]
|
|
82
|
+
element_type = region.split("_")[0]
|
|
83
|
+
instance_key = tbl.uns["spatialdata_attrs"]["instance_key"]
|
|
84
|
+
if region in sdata._shared_keys:
|
|
85
|
+
element = getattr(sdata, element_type + "s")[region]
|
|
86
|
+
tbl.obs['instance_id'] = element.index
|
|
87
|
+
|
|
88
|
+
return sdata
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from importlib.resources import files, as_file
|
|
7
|
+
from PIL import Image
|
|
8
|
+
from typing import Optional
|
|
9
|
+
from spatialdata.models import Image2DModel
|
|
10
|
+
from .generate_transformations import generate_transformations
|
|
11
|
+
|
|
12
|
+
def generate_imagemodel(
|
|
13
|
+
input: Optional[dict] = None,
|
|
14
|
+
) -> Image2DModel:
|
|
15
|
+
"""Generate a dummy Image2DModel object with specified elements.
|
|
16
|
+
|
|
17
|
+
Parameters
|
|
18
|
+
----------
|
|
19
|
+
n_obs : int, optional
|
|
20
|
+
Number of observations (rows), by default 10.
|
|
21
|
+
|
|
22
|
+
Returns
|
|
23
|
+
-------
|
|
24
|
+
Image2DModel
|
|
25
|
+
An Image2DModel object populated with random data according to the specified parameters.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
# check input
|
|
29
|
+
if input is None:
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
# check transformations
|
|
33
|
+
if "transformations" not in input:
|
|
34
|
+
input["transformations"] = None
|
|
35
|
+
|
|
36
|
+
# get source
|
|
37
|
+
resource = files("dummy_spatialdata")
|
|
38
|
+
|
|
39
|
+
# get image type
|
|
40
|
+
if input["type"] == "rgb":
|
|
41
|
+
with as_file(resource.joinpath("examples", "bird-color.png")) as path:
|
|
42
|
+
img = Image.open(path)
|
|
43
|
+
img = resize_image(img, input)
|
|
44
|
+
img = np.array(img).astype(np.uint8)
|
|
45
|
+
img = img.transpose((2, 0, 1))
|
|
46
|
+
elif input["type"] == "grayscale":
|
|
47
|
+
with as_file(resource.joinpath("examples", "nuclei.tif")) as path:
|
|
48
|
+
img = Image.open(path)
|
|
49
|
+
img = resize_image(img, input)
|
|
50
|
+
img = np.array(img).astype(np.uint8)
|
|
51
|
+
img = img.reshape(1, *img.shape)
|
|
52
|
+
else:
|
|
53
|
+
raise ValueError("Please type either 'rgb' or 'grayscale' for the image type.")
|
|
54
|
+
|
|
55
|
+
# image model
|
|
56
|
+
imagemodel = Image2DModel.parse(data=img,
|
|
57
|
+
scale_factors=(2,) * (input["n_layers"]-1),
|
|
58
|
+
transformations=generate_transformations(input["transformations"]))
|
|
59
|
+
|
|
60
|
+
return imagemodel
|
|
61
|
+
|
|
62
|
+
def resize_image(image: Image, input: dict) -> Image:
|
|
63
|
+
if "shape" not in input:
|
|
64
|
+
return image
|
|
65
|
+
new_width = input["shape"]['x']
|
|
66
|
+
new_height = input["shape"]['y']
|
|
67
|
+
resized = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
|
68
|
+
return resized
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from importlib.resources import files, as_file
|
|
6
|
+
from typing import Optional
|
|
7
|
+
from spatialdata.models import Labels2DModel
|
|
8
|
+
from .generate_transformations import generate_transformations
|
|
9
|
+
|
|
10
|
+
def generate_labelmodel(
|
|
11
|
+
input: Optional[dict] = None,
|
|
12
|
+
) -> Labels2DModel:
|
|
13
|
+
|
|
14
|
+
# return None if no input is provided
|
|
15
|
+
if input is None:
|
|
16
|
+
return None
|
|
17
|
+
|
|
18
|
+
# check transformations
|
|
19
|
+
if "transformations" not in input:
|
|
20
|
+
input["transformations"] = None
|
|
21
|
+
|
|
22
|
+
# generate labels
|
|
23
|
+
# mask for where values should be non-zero
|
|
24
|
+
rows, cols = input["shape"]["x"], input["shape"]["y"],
|
|
25
|
+
prob_nonzero = 0.1 # 5% non-zero values
|
|
26
|
+
|
|
27
|
+
arr = np.zeros((rows, cols), dtype=int)
|
|
28
|
+
mask = np.random.rand(rows, cols) < prob_nonzero
|
|
29
|
+
arr[mask] = np.random.randint(1, input["n_labels"], size=mask.sum())
|
|
30
|
+
|
|
31
|
+
# image model
|
|
32
|
+
labelmodel = Labels2DModel.parse(data=arr,
|
|
33
|
+
scale_factors=(2,) * (input["n_layers"]-1),
|
|
34
|
+
transformations = generate_transformations(input["transformations"]))
|
|
35
|
+
|
|
36
|
+
return labelmodel
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from importlib.resources import files, as_file
|
|
6
|
+
from typing import Optional
|
|
7
|
+
from spatialdata.models import ShapesModel
|
|
8
|
+
import geopandas as gpd
|
|
9
|
+
from shapely.geometry import Polygon, Point
|
|
10
|
+
from .generate_transformations import generate_transformations
|
|
11
|
+
|
|
12
|
+
def generate_shapemodel(
|
|
13
|
+
input: Optional[dict] = None,
|
|
14
|
+
SEED: Optional[int] = 42
|
|
15
|
+
) -> ShapesModel:
|
|
16
|
+
|
|
17
|
+
if input is None:
|
|
18
|
+
return None
|
|
19
|
+
|
|
20
|
+
# check transformations
|
|
21
|
+
if "transformations" not in input:
|
|
22
|
+
input["transformations"] = None
|
|
23
|
+
|
|
24
|
+
# generate polygons
|
|
25
|
+
RADIUS = 0.08 * min(input["shape"]["x"], input["shape"]["y"])
|
|
26
|
+
MIN_GAP = 0.01 * min(input["shape"]["x"], input["shape"]["y"])
|
|
27
|
+
|
|
28
|
+
centers = generate_non_overlapping_centers(input["shape"]["x"], input["shape"]["y"], RADIUS, input["n_shapes"], MIN_GAP, SEED)
|
|
29
|
+
polygon_seeds = [SEED + i for i in range(input["n_shapes"])]
|
|
30
|
+
polygons = [Polygon(border_polygon_points(c, RADIUS, 10, SEED = seed)) for c, seed in zip(centers, polygon_seeds)]
|
|
31
|
+
gdf = gpd.GeoDataFrame(geometry=polygons)
|
|
32
|
+
|
|
33
|
+
# shape model
|
|
34
|
+
shapemodel = ShapesModel.parse(gdf,
|
|
35
|
+
transformations=generate_transformations(input["transformations"]))
|
|
36
|
+
|
|
37
|
+
# return shapemodel
|
|
38
|
+
return gdf
|
|
39
|
+
|
|
40
|
+
def circles_overlap(c1, c2, radius, min_gap=0.0):
|
|
41
|
+
x1, y1 = c1
|
|
42
|
+
x2, y2 = c2
|
|
43
|
+
return np.hypot(x2 - x1, y2 - y1) < (2 * radius + min_gap)
|
|
44
|
+
|
|
45
|
+
def generate_non_overlapping_centers(width, height, radius, n_circles, min_gap=0.15, SEED=1, max_tries=10000):
|
|
46
|
+
centers = []
|
|
47
|
+
tries = 0
|
|
48
|
+
rng = np.random.default_rng(SEED)
|
|
49
|
+
|
|
50
|
+
while len(centers) < n_circles and tries < max_tries:
|
|
51
|
+
tries += 1
|
|
52
|
+
x = rng.uniform(radius, width - radius)
|
|
53
|
+
y = rng.uniform(radius, height - radius)
|
|
54
|
+
candidate = (x, y)
|
|
55
|
+
|
|
56
|
+
if all(not circles_overlap(candidate, c, radius, min_gap=min_gap) for c in centers):
|
|
57
|
+
centers.append(candidate)
|
|
58
|
+
|
|
59
|
+
if len(centers) < n_circles:
|
|
60
|
+
raise RuntimeError(f"Could only place {len(centers)} circles after {max_tries} attempts.")
|
|
61
|
+
|
|
62
|
+
return np.array(centers)
|
|
63
|
+
|
|
64
|
+
def border_polygon_points(center, radius, n_points, SEED=1):
|
|
65
|
+
cx, cy = center
|
|
66
|
+
rng = np.random.default_rng(SEED)
|
|
67
|
+
|
|
68
|
+
# Random angles around the border
|
|
69
|
+
angles = np.sort(rng.uniform(0, 2 * np.pi, n_points))
|
|
70
|
+
|
|
71
|
+
# Points exactly on the circle border
|
|
72
|
+
xs = cx + radius * np.cos(angles)
|
|
73
|
+
ys = cy + radius * np.sin(angles)
|
|
74
|
+
|
|
75
|
+
return np.column_stack([xs, ys])
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from importlib.resources import files, as_file
|
|
7
|
+
from PIL import Image
|
|
8
|
+
from typing import Optional
|
|
9
|
+
from spatialdata.models import TableModel
|
|
10
|
+
import spatialdata as sd
|
|
11
|
+
|
|
12
|
+
def generate_tablemodel(
|
|
13
|
+
input: Optional[dict] = None,
|
|
14
|
+
sdata: sd.SpatialData = None
|
|
15
|
+
) -> TableModel:
|
|
16
|
+
|
|
17
|
+
if input is None:
|
|
18
|
+
return None
|
|
19
|
+
|
|
20
|
+
# add metadata to table
|
|
21
|
+
region = input["element"] + "_" + str(input["element_index"])
|
|
22
|
+
input["table"].obs['instance_id'] = input["table"].obs.index
|
|
23
|
+
input["table"].obs['region'] = region
|
|
24
|
+
input["table"].uns["spatialdata_attrs"] = {
|
|
25
|
+
"region": region,
|
|
26
|
+
"region_key": "region",
|
|
27
|
+
"instance_key": "instance_id",
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
return TableModel.parse(input["table"])
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from spatialdata.transformations import (
|
|
2
|
+
Affine,
|
|
3
|
+
Scale,
|
|
4
|
+
Sequence,
|
|
5
|
+
Translation,
|
|
6
|
+
BaseTransformation,
|
|
7
|
+
Identity
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
def generate_transformations(
|
|
11
|
+
trans: Optional[dict] = None
|
|
12
|
+
) -> list[BaseTransformation]:
|
|
13
|
+
|
|
14
|
+
if trans is None:
|
|
15
|
+
return None
|
|
16
|
+
|
|
17
|
+
coord_system = list(trans.keys())[0]
|
|
18
|
+
trans = list(trans.items())[0][1]
|
|
19
|
+
|
|
20
|
+
alltrans = []
|
|
21
|
+
for tr in trans:
|
|
22
|
+
if tr == "translation":
|
|
23
|
+
tr = Translation([10, 20], axes = ("x", "y"))
|
|
24
|
+
elif tr == "scale":
|
|
25
|
+
tr = Scale([0.5, 0.5], axes = ("x", "y"))
|
|
26
|
+
elif tr == "affine":
|
|
27
|
+
tr = Affine(matrix = [
|
|
28
|
+
[0.5, 0.2, 0],
|
|
29
|
+
[0.1, 0.5, 0],
|
|
30
|
+
[0, 0, 1],
|
|
31
|
+
],
|
|
32
|
+
input_axes=("x", "y"), output_axes=("x", "y"))
|
|
33
|
+
else:
|
|
34
|
+
raise ValueError(f"Transformation type '{tr}' not recognized. Please choose from 'translation', 'scale', or 'affine'.")
|
|
35
|
+
alltrans.append(tr)
|
|
36
|
+
|
|
37
|
+
if(len(alltrans) > 1):
|
|
38
|
+
alltrans = Sequence(alltrans)
|
|
39
|
+
else:
|
|
40
|
+
alltrans = alltrans[0]
|
|
41
|
+
|
|
42
|
+
finaltrans = {"global": Identity()}
|
|
43
|
+
finaltrans.update({coord_system: alltrans})
|
|
44
|
+
|
|
45
|
+
return finaltrans
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: dummy-spatialdata
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A package for creating arbitrary spatialdata for testing purposes.
|
|
5
|
+
Project-URL: Documentation, https://dummy-spatialdata.readthedocs.io/
|
|
6
|
+
Project-URL: Homepage, https://github.com/Artur-man/dummy-spatialdata
|
|
7
|
+
Project-URL: Source, https://github.com/Artur-man/dummy-spatialdata
|
|
8
|
+
Author-email: Artür Manukyan <amanukyan.umms@gmail.com>
|
|
9
|
+
License: MIT
|
|
10
|
+
Requires-Python: >=3.13
|
|
11
|
+
Requires-Dist: dummy-anndata>=0.0.3
|
|
12
|
+
Requires-Dist: geopandas>=0.14
|
|
13
|
+
Requires-Dist: pillow>=12.1.1
|
|
14
|
+
Requires-Dist: requests>=2.31
|
|
15
|
+
Requires-Dist: shapely>=2.0.1
|
|
16
|
+
Requires-Dist: spatialdata-plot>=0.3.2
|
|
17
|
+
Requires-Dist: spatialdata>=0.7.2
|
|
18
|
+
Provides-Extra: dev
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
|
|
21
|
+
# dummy-spatialdata
|
|
22
|
+
|
|
23
|
+
Allows generating dummy spatialdata objects, which can be useful for testing purposes.
|
|
24
|
+
|
|
25
|
+
## Installation
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
pip install dummy-spatialdata
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## Example usage
|
|
32
|
+
```{python}
|
|
33
|
+
from dummy_spatialdata import generate_dataset
|
|
34
|
+
import dummy_anndata
|
|
35
|
+
import spatialdata_plot as sdp
|
|
36
|
+
import spatialdata as sd
|
|
37
|
+
import matplotlib.pyplot as plt
|
|
38
|
+
import anndata as ad
|
|
39
|
+
|
|
40
|
+
# generate dummy anndata
|
|
41
|
+
adata = dummy_anndata.generate_dataset(n_obs=12, n_vars=20)
|
|
42
|
+
|
|
43
|
+
# generate dummy spatialdata
|
|
44
|
+
sdata = generate_dataset(
|
|
45
|
+
images = [
|
|
46
|
+
{"type": "rgb", "n_layers": 4, "shape": {"x": 1000, "y": 1000}, "transformations": {"trans_0": ["affine"]}},
|
|
47
|
+
{"type": "grayscale", "n_layers": 1, "shape": {"x": 1000, "y": 1000}, "transformations": {"trans_0": ["affine"]}},
|
|
48
|
+
],
|
|
49
|
+
labels = [
|
|
50
|
+
{"n_labels": 12, "n_layers": 4, "shape": {"x": 1000, "y": 1000}},
|
|
51
|
+
{"n_labels": 12, "n_layers": 0, "shape": {"x": 100, "y": 100}},
|
|
52
|
+
],
|
|
53
|
+
shapes = [
|
|
54
|
+
{"n_shapes": 12, "shape": {"x": 1000, "y": 1000}},
|
|
55
|
+
{"n_shapes": 20, "shape": {"x": 1000, "y": 1000}},
|
|
56
|
+
],
|
|
57
|
+
tables = [
|
|
58
|
+
{"table": adata, "element": "shape", "element_index": 0}
|
|
59
|
+
],
|
|
60
|
+
SEED=13
|
|
61
|
+
)
|
|
62
|
+
sdata
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
```
|
|
66
|
+
SpatialData object
|
|
67
|
+
├── Images
|
|
68
|
+
│ ├── 'image_0': DataTree[cyx] (3, 1000, 1000), (3, 500, 500), (3, 250, 250), (3, 125, 125)
|
|
69
|
+
│ └── 'image_1': DataTree[cyx] (1, 1000, 1000)
|
|
70
|
+
├── Labels
|
|
71
|
+
│ ├── 'label_0': DataTree[yx] (1000, 1000), (500, 500), (250, 250), (125, 125)
|
|
72
|
+
│ └── 'label_1': DataTree[yx] (100, 100)
|
|
73
|
+
├── Shapes
|
|
74
|
+
│ ├── 'shape_0': GeoDataFrame shape: (12, 1) (2D shapes)
|
|
75
|
+
│ └── 'shape_1': GeoDataFrame shape: (20, 1) (2D shapes)
|
|
76
|
+
└── Tables
|
|
77
|
+
└── 'table_0': AnnData (12, 20)
|
|
78
|
+
with coordinate systems:
|
|
79
|
+
▸ 'global', with elements:
|
|
80
|
+
image_0 (Images), image_1 (Images), label_0 (Labels), label_1 (Labels), shape_0 (Shapes), shape_1 (Shapes)
|
|
81
|
+
▸ 'trans_0', with elements:
|
|
82
|
+
image_0 (Images), image_1 (Images)
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
You can plot the demo data now!
|
|
86
|
+
|
|
87
|
+
```{python}
|
|
88
|
+
sdata.pl.render_images("image_0", ).pl.render_shapes("shape_0", color="Gene001", table_name = "table_0", table_layer = "float_matrix").pl.show(coordinate_systems = "global")
|
|
89
|
+
```
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
dummy_spatialdata/examples/bird-color.png,sha256=SF1vaF18PAVYC1sZgXxa8nHmEbRLycYbOPJgnWFk4T4,556865
|
|
2
|
+
dummy_spatialdata/examples/bird.png,sha256=-h7s_e3eELQcj7-9g3lIvGo_EyfZDW517Y3o6sg6qqg,187719
|
|
3
|
+
dummy_spatialdata/examples/nuclei.tif,sha256=bgUQSKwjmG7JSXqhVW1nZKREEuoUaZWeYoEhKcJBd2w,472093
|
|
4
|
+
dummy_spatialdata/__init__.py,sha256=HVtZz6teEqXleTBm2xGZZHAfEEYh797o4X2uf3s6fpU,583
|
|
5
|
+
dummy_spatialdata/generate_dataset.py,sha256=ZLSY8tb5Fbb58pi6N2R6NP0Qys_ioeCPNXbgk-k_2jY,2833
|
|
6
|
+
dummy_spatialdata/generate_imagemodel.py,sha256=HpKAK00FdyNoG23QkAk9U3ZWMThpQS8o5gNrnW7RNgY,2135
|
|
7
|
+
dummy_spatialdata/generate_labelmodel.py,sha256=mGREgojWndkvke4OOWexRi7FiJVYVx_nsO6jLDBtJlM,1142
|
|
8
|
+
dummy_spatialdata/generate_shapemodel.py,sha256=pR0-KQ6soFhpGwoTHw1Uq5q9Y25L9-iJAFeMDi8LwuM,2468
|
|
9
|
+
dummy_spatialdata/generate_tablemodel.py,sha256=R3K5iNBNS-OR9s2XMA6phIUHPKBJMkDSlM66MUZ52yw,780
|
|
10
|
+
dummy_spatialdata/generate_transformations.py,sha256=ltRGJ8y5Uk_Gery5cboKXUKqvGOmZF5g56mXWmQQ5Oc,1225
|
|
11
|
+
dummy_spatialdata-0.1.0.dist-info/METADATA,sha256=jvHkn8fTnRyRJRs0BtyuCDaVCQapHWJZpM3GBlJEBaY,2988
|
|
12
|
+
dummy_spatialdata-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
|
|
13
|
+
dummy_spatialdata-0.1.0.dist-info/RECORD,,
|