spacr 0.0.70__py3-none-any.whl → 0.0.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,15 +1,14 @@
1
1
  spacr/__init__.py,sha256=64QJU2_IUd_40TTKQ2j239rF3PJP_gyciL_rolQOxuU,1144
2
2
  spacr/__main__.py,sha256=L3Dnk-YG3lULeaMxD1mS-_t89g4qWrJ7bnpBvNiQhUE,283
3
3
  spacr/alpha.py,sha256=Y95sLEfpK2OSYKRn3M8eUOU33JJeXfV8zhrC4KnwSTY,35244
4
- spacr/annotate_app.py,sha256=_KlDYbnaKr_VvghMWSr6gWbP_lByPghGLiCfEIJ48so,19500
4
+ spacr/annotate_app.py,sha256=U7UfXEbXVAIdOc3X5ILEX2-3ac01JcwD-mIENdTh1q4,19478
5
5
  spacr/chris.py,sha256=YlBjSgeZaY8HPy6jkrT_ISAnCMAKVfvCxF0I9eAZLFM,2418
6
6
  spacr/cli.py,sha256=507jfOOEV8BoL4eeUcblvH-iiDHdBrEVJLu1ghAAPSc,1800
7
- spacr/core.py,sha256=EIhsuSyJQPxq1UfH3VNh3ubt403W1Ahvnz44pX35BCc,166307
7
+ spacr/core.py,sha256=nZSOV1l72eO7ubJl1geTaQGONtdadhNK3GNY7BDaFWs,166179
8
8
  spacr/deep_spacr.py,sha256=ljIakns6q74an5QwDU7j0xoj6jRCAz-ejY0QHj9X0d8,33193
9
- spacr/foldseek.py,sha256=w7M7qRYvP5syJlPErQkHzPLOkiyLN77E0XyL3NiFZwI,33540
10
- spacr/get_alfafold_structures.py,sha256=n0g8gne-oyAV3Uo6qxZoJq5X1cUUyD8u0pOC_W2PX40,3541
9
+ spacr/foldseek.py,sha256=YIP1d4Ci6CeA9jSyiv-HTDbNmAmcSM9Y_DaOs7wYzLY,33546
10
+ spacr/get_alfafold_structures.py,sha256=ehx_MQgb12k3hFecP6cYVlm5TLO8iWjgevy8ESyS3cw,3544
11
11
  spacr/graph_learning.py,sha256=M7KW1J72LA4hLfVNVBOqxf_4z9tXi-UyoZfhaLJXqSE,11986
12
- spacr/graph_learning_lap.py,sha256=MyNRLb63gsjBlui-ByZ0anHugYulL6M-OsGm8rnGBmE,3385
13
12
  spacr/gui.py,sha256=zu-i8ezLJ03jNRACK7CRgNhkM8g8-pJFwZ-OSDFzsPg,6498
14
13
  spacr/gui_2.py,sha256=FPlmvGm1VIood_YBnG44IafgjjaVfagybTnjVEOs5Ig,3299
15
14
  spacr/gui_classify_app.py,sha256=LY33wott1mR7AFYwBI9ZQZYY16lBB-wuaY4pL_poaQ0,7884
@@ -17,23 +16,21 @@ spacr/gui_mask_app.py,sha256=WKkAH0jv-SnfaZdJ8MkC7mkUIVSSrNE8lUfH3QBvUak,9747
17
16
  spacr/gui_measure_app.py,sha256=5vjjds5NFaOcE8XeuWDug9k-NI4jbTrwp54sJ7DNaNI,9625
18
17
  spacr/gui_sim_app.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
18
  spacr/gui_utils.py,sha256=JRWwmGEEVSPgs0UtZRukdNwIUJepbP675_Fvs5qocPk,49718
20
- spacr/io.py,sha256=8jpnsUOZMxfhGamduiq4BXgUtfh80kF4R_S-8N3EIrw,109305
19
+ spacr/io.py,sha256=q6KWOvoM5d9SLfu0KJA9MB2a-R_QhQ6GaprmlkV2SH8,108463
21
20
  spacr/logger.py,sha256=7Zqr3TuuOQLWT32gYr2q1qvv7x0a2JhLANmZcnBXAW8,670
22
21
  spacr/mask_app.py,sha256=jlKmj_evveIkkyH3PYEcAshcLXN0DOPWB1oc4hAwq9E,44201
23
22
  spacr/measure.py,sha256=KOBmrVE9nrKwIoXG16wV1sy5nSj9EOa_FhK7i431V-A,55325
24
23
  spacr/old_code.py,sha256=jw67DAGoLBd7mWofVzRJSEmCI1Qrff26zIo65SEkV00,13817
25
24
  spacr/plot.py,sha256=9tQEDDIHzex-T90VaS_PfcHwbWfYEYfMeKkgEdlK6ko,62045
26
25
  spacr/sequencing.py,sha256=OiRK6gpEkuEhKoUJcU-BXWDmz4RkDxKeJCE_C6w1zJc,50503
27
- spacr/sim.py,sha256=fpkOs-VBU40xBFuSoO97aTewQWPSYolg5C_PkjeQm_Y,71344
26
+ spacr/sim.py,sha256=FveaVgBi3eypO2oVB5Dx-v0CC1Ny7UPfXkJiiRRodAk,71212
28
27
  spacr/timelapse.py,sha256=5TNmkzR_urMxy0eVB4quGdjNj2QduyiwrLL2I-udlAg,39614
29
- spacr/train.py,sha256=rpOJBu3ho0Oec37I1mO4_3eQ480y_4bgFiKCOgiyN8s,31741
30
- spacr/umap.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
- spacr/utils.py,sha256=zX6zhS7UmCOA2JzBw4uDb-p0SPq5igaeYjzJ56d7hG8,171627
28
+ spacr/utils.py,sha256=1RPGvCO2bl3a0LfiJ_8bZwJVWyXKI5hv51toahSZNZE,169827
32
29
  spacr/version.py,sha256=axH5tnGwtgSnJHb5IDhiu4Zjk5GhLyAEDRe-rnaoFOA,409
33
30
  spacr/models/cp/toxo_pv_lumen.CP_model,sha256=2y_CindYhmTvVwBH39SNILF3rI3x9SsRn6qrMxHy3l0,26562451
34
- spacr-0.0.70.dist-info/LICENSE,sha256=SR-2MeGc6SCM1UORJYyarSWY_A-JaOMFDj7ReSs9tRM,1083
35
- spacr-0.0.70.dist-info/METADATA,sha256=cE8tuT8BTSynUjv16YSLYPrcFq2Ngn17Hc7dxV5S2KE,4928
36
- spacr-0.0.70.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
37
- spacr-0.0.70.dist-info/entry_points.txt,sha256=xncHsqD9MI5wj0_p4mgZlrB8dHm_g_qF0Ggo1c78LqY,315
38
- spacr-0.0.70.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
39
- spacr-0.0.70.dist-info/RECORD,,
31
+ spacr-0.0.71.dist-info/LICENSE,sha256=SR-2MeGc6SCM1UORJYyarSWY_A-JaOMFDj7ReSs9tRM,1083
32
+ spacr-0.0.71.dist-info/METADATA,sha256=GjQatvU0m5kUjGeE21ylp35mbauQHinUl2tqeGWHOpA,5121
33
+ spacr-0.0.71.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
34
+ spacr-0.0.71.dist-info/entry_points.txt,sha256=xncHsqD9MI5wj0_p4mgZlrB8dHm_g_qF0Ggo1c78LqY,315
35
+ spacr-0.0.71.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
36
+ spacr-0.0.71.dist-info/RECORD,,
@@ -1,84 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from torch.utils.data import Dataset, DataLoader, TensorDataset
5
-
6
- # Let's assume that the feature embedding part and the dataset loading part
7
- # has already been taken care of, and your data is already in the format
8
- # suitable for PyTorch (i.e., Tensors).
9
-
10
- class FeatureEmbedder(nn.Module):
11
- def __init__(self, vocab_sizes, embedding_size):
12
- super(FeatureEmbedder, self).__init__()
13
- self.embeddings = nn.ModuleDict({
14
- key: nn.Embedding(num_embeddings=vocab_size+1,
15
- embedding_dim=embedding_size,
16
- padding_idx=vocab_size)
17
- for key, vocab_size in vocab_sizes.items()
18
- })
19
- # Adding the 'visit' embedding
20
- self.embeddings['visit'] = nn.Parameter(torch.zeros(1, embedding_size))
21
-
22
- def forward(self, feature_map, max_num_codes):
23
- # Implementation will depend on how you want to handle sparse data
24
- # This is just a placeholder
25
- embeddings = {}
26
- masks = {}
27
- for key, tensor in feature_map.items():
28
- embeddings[key] = self.embeddings[key](tensor.long())
29
- mask = torch.ones_like(tensor, dtype=torch.float32)
30
- masks[key] = mask.unsqueeze(-1)
31
-
32
- # Batch size hardcoded for simplicity in example
33
- batch_size = 1 # Replace with actual batch size
34
- embeddings['visit'] = self.embeddings['visit'].expand(batch_size, -1, -1)
35
- masks['visit'] = torch.ones(batch_size, 1)
36
-
37
- return embeddings, masks
38
-
39
- class GraphConvolutionalTransformer(nn.Module):
40
- def __init__(self, embedding_size=128, num_attention_heads=1, **kwargs):
41
- super(GraphConvolutionalTransformer, self).__init__()
42
- # Transformer Blocks
43
- self.layers = nn.ModuleList([
44
- nn.TransformerEncoderLayer(
45
- d_model=embedding_size,
46
- nhead=num_attention_heads,
47
- batch_first=True)
48
- for _ in range(kwargs.get('num_transformer_stack', 3))
49
- ])
50
- # Output Layer for Classification
51
- self.output_layer = nn.Linear(embedding_size, 1)
52
-
53
- def feedforward(self, features, mask=None, training=None):
54
- # Implement feedforward logic (placeholder)
55
- pass
56
-
57
- def forward(self, embeddings, masks, mask=None, training=False):
58
- features = embeddings
59
- attentions = [] # Storing attentions if needed
60
-
61
- # Pass through each Transformer block
62
- for layer in self.layers:
63
- features = layer(features) # Apply transformer encoding here
64
-
65
- if mask is not None:
66
- features = features * mask
67
-
68
- logits = self.output_layer(features[:, 0, :]) # Using the 'visit' embedding for classification
69
- return logits, attentions
70
-
71
- # Usage Example
72
- vocab_sizes = {'dx_ints':3249, 'proc_ints':2210}
73
- embedding_size = 128
74
- gct_params = {
75
- 'embedding_size': embedding_size,
76
- 'num_transformer_stack': 3,
77
- 'num_attention_heads': 1
78
- }
79
- feature_embedder = FeatureEmbedder(vocab_sizes, embedding_size)
80
- gct_model = GraphConvolutionalTransformer(**gct_params)
81
-
82
- # Assume `feature_map` is a dictionary of tensors, and `max_num_codes` is provided
83
- embeddings, masks = feature_embedder(feature_map, max_num_codes)
84
- logits, attentions = gct_model(embeddings, masks)