scmcp-shared 0.1.0__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/PKG-INFO +2 -2
  2. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/pyproject.toml +1 -1
  3. scmcp_shared-0.2.0/src/scmcp_shared/__init__.py +3 -0
  4. scmcp_shared-0.2.0/src/scmcp_shared/schema/__init__.py +1 -0
  5. scmcp_shared-0.2.0/src/scmcp_shared/schema/base.py +11 -0
  6. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/src/scmcp_shared/schema/io.py +5 -8
  7. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/src/scmcp_shared/schema/pl.py +3 -3
  8. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/src/scmcp_shared/schema/pp.py +14 -70
  9. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/src/scmcp_shared/schema/tl.py +69 -18
  10. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/src/scmcp_shared/schema/util.py +18 -11
  11. scmcp_shared-0.2.0/src/scmcp_shared/server/__init__.py +51 -0
  12. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/src/scmcp_shared/server/io.py +19 -25
  13. scmcp_shared-0.2.0/src/scmcp_shared/server/pl.py +321 -0
  14. scmcp_shared-0.2.0/src/scmcp_shared/server/pp.py +363 -0
  15. scmcp_shared-0.2.0/src/scmcp_shared/server/tl.py +407 -0
  16. scmcp_shared-0.2.0/src/scmcp_shared/server/util.py +250 -0
  17. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/src/scmcp_shared/util.py +70 -47
  18. scmcp_shared-0.1.0/src/scmcp_shared/__init__.py +0 -3
  19. scmcp_shared-0.1.0/src/scmcp_shared/schema/__init__.py +0 -1
  20. scmcp_shared-0.1.0/src/scmcp_shared/server/__init__.py +0 -1
  21. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/.github/workflows/publish.yml +0 -0
  22. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/LICENSE +0 -0
  23. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/README.md +0 -0
  24. {scmcp_shared-0.1.0 → scmcp_shared-0.2.0}/src/scmcp_shared/logging_config.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: scmcp_shared
3
- Version: 0.1.0
3
+ Version: 0.2.0
4
4
  Summary: A shared function libray for scmcphub
5
5
  Author-email: shuang <hsh-me@outlook.com>
6
6
  License: BSD 3-Clause License
@@ -33,7 +33,7 @@ License: BSD 3-Clause License
33
33
  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
34
  License-File: LICENSE
35
35
  Requires-Python: >=3.10
36
- Requires-Dist: fastmcp>=2.3.0
36
+ Requires-Dist: fastmcp>=2.3.4
37
37
  Requires-Dist: mcp>=1.8.0
38
38
  Requires-Dist: pydantic
39
39
  Requires-Dist: scanpy
@@ -11,7 +11,7 @@ requires-python = ">=3.10"
11
11
  dependencies = [
12
12
  "scanpy",
13
13
  "mcp>=1.8.0",
14
- "fastmcp>=2.3.0",
14
+ "fastmcp>=2.3.4",
15
15
  "pydantic",
16
16
  ]
17
17
 
@@ -0,0 +1,3 @@
1
+
2
+ __version__ = "0.2.0"
3
+
@@ -0,0 +1 @@
1
+ from .base import AdataModel
@@ -0,0 +1,11 @@
1
+ from pydantic import Field, BaseModel,ConfigDict
2
+
3
+
4
+ class AdataModel(BaseModel):
5
+ """Input schema for the adata tool."""
6
+ sampleid: str = Field(default=None, description="adata sampleid")
7
+ adtype: str = Field(default="exp", description="adata.X data type")
8
+
9
+ model_config = ConfigDict(
10
+ extra="ignore"
11
+ )
@@ -1,16 +1,13 @@
1
1
  from pydantic import (
2
2
  Field,
3
- ValidationInfo,
4
- computed_field,
5
3
  field_validator,
6
4
  model_validator,
7
- BaseModel
8
5
  )
9
- from typing import Optional, Union, Literal, Any, Sequence, Dict
6
+ from typing import Optional, Literal
7
+ from .base import AdataModel
10
8
 
11
9
 
12
-
13
- class ReadModel(BaseModel):
10
+ class ReadModel(AdataModel):
14
11
  """Input schema for the read tool."""
15
12
  filename: str = Field(
16
13
  ...,
@@ -89,7 +86,7 @@ class ReadModel(BaseModel):
89
86
  return v
90
87
 
91
88
 
92
- class WriteModel(BaseModel):
89
+ class WriteModel(AdataModel):
93
90
  """Input schema for the write tool."""
94
91
  filename: str = Field(
95
92
  description="Path to save the file. If no extension is provided, the default format will be used."
@@ -113,7 +110,7 @@ class WriteModel(BaseModel):
113
110
  return v
114
111
 
115
112
  @model_validator(mode='after')
116
- def validate_extension_compression(self) -> 'WriteInput':
113
+ def validate_extension_compression(self) -> 'WriteModel':
117
114
  # If ext is provided and not h5, compression should be None
118
115
  if self.ext is not None and self.ext != 'h5' and self.compression is not None:
119
116
  raise ValueError("Compression can only be used with h5 files")
@@ -7,7 +7,7 @@ from pydantic import (
7
7
  model_validator,
8
8
  BaseModel
9
9
  )
10
-
10
+ from .base import AdataModel
11
11
 
12
12
  # 创建 Mixin 类处理特定功能
13
13
  class LegendMixin:
@@ -73,7 +73,7 @@ class FigureSizeMixin:
73
73
 
74
74
 
75
75
  # 基础可视化模型,包含所有可视化工具共享的字段
76
- class BaseVisualizationModel(BaseModel, LegendMixin, ColorMappingMixin, FigureSizeMixin):
76
+ class BaseVisualizationModel(AdataModel, LegendMixin, ColorMappingMixin, FigureSizeMixin):
77
77
  """基础可视化模型,包含所有可视化工具共享的字段"""
78
78
  pass
79
79
 
@@ -632,7 +632,7 @@ class RankGenesGroupsModel(BaseVisualizationModel):
632
632
 
633
633
 
634
634
  # 重构 ClusterMapModel
635
- class ClusterMapModel(BaseModel):
635
+ class ClusterMapModel(AdataModel):
636
636
  """Input schema for the clustermap plotting tool."""
637
637
 
638
638
  obs_keys: Optional[str] = Field(
@@ -4,14 +4,14 @@ from pydantic import (
4
4
  computed_field,
5
5
  field_validator,
6
6
  model_validator,
7
- BaseModel
8
7
  )
8
+ from .base import AdataModel
9
9
  from typing import Optional, Union, List, Dict, Any
10
10
  from typing import Literal
11
11
  import numpy as np
12
12
 
13
13
 
14
- class FilterCells(BaseModel):
14
+ class FilterCells(AdataModel):
15
15
  """Input schema for the filter_cells preprocessing tool."""
16
16
 
17
17
  min_counts: Optional[int] = Field(
@@ -42,7 +42,7 @@ class FilterCells(BaseModel):
42
42
  return v
43
43
 
44
44
 
45
- class FilterGenes(BaseModel):
45
+ class FilterGenes(AdataModel):
46
46
  """Input schema for the filter_genes preprocessing tool."""
47
47
 
48
48
  min_counts: Optional[int] = Field(
@@ -73,7 +73,7 @@ class FilterGenes(BaseModel):
73
73
  return v
74
74
 
75
75
 
76
- class SubsetCellModel(BaseModel):
76
+ class SubsetCellModel(AdataModel):
77
77
  """Input schema for subsetting AnnData objects based on various criteria."""
78
78
  obs_key: Optional[str] = Field(
79
79
  default=None,
@@ -109,7 +109,7 @@ class SubsetCellModel(BaseModel):
109
109
  )
110
110
 
111
111
 
112
- class SubsetGeneModel(BaseModel):
112
+ class SubsetGeneModel(AdataModel):
113
113
  """Input schema for subsetting AnnData objects based on various criteria."""
114
114
  min_counts: Optional[int] = Field(
115
115
  default=None,
@@ -145,7 +145,7 @@ class SubsetGeneModel(BaseModel):
145
145
  )
146
146
 
147
147
 
148
- class CalculateQCMetrics(BaseModel):
148
+ class CalculateQCMetrics(AdataModel):
149
149
  """Input schema for the calculate_qc_metrics preprocessing tool."""
150
150
 
151
151
  expr_type: str = Field(
@@ -196,7 +196,7 @@ class CalculateQCMetrics(BaseModel):
196
196
 
197
197
 
198
198
 
199
- class Log1PModel(BaseModel):
199
+ class Log1PModel(AdataModel):
200
200
  """Input schema for the log1p preprocessing tool."""
201
201
 
202
202
  base: Optional[Union[int, float]] = Field(
@@ -232,64 +232,8 @@ class Log1PModel(BaseModel):
232
232
  return v
233
233
 
234
234
 
235
- class PCAModel(BaseModel):
236
- """Input schema for the PCA preprocessing tool."""
237
-
238
- n_comps: Optional[int] = Field(
239
- default=None,
240
- description="Number of principal components to compute. Defaults to 50 or 1 - minimum dimension size.",
241
- gt=0
242
- )
243
-
244
- layer: Optional[str] = Field(
245
- default=None,
246
- description="If provided, which element of layers to use for PCA."
247
- )
248
-
249
- zero_center: Optional[bool] = Field(
250
- default=True,
251
- description="If True, compute standard PCA from covariance matrix."
252
- )
253
-
254
- svd_solver: Optional[Literal["arpack", "randomized", "auto", "lobpcg", "tsqr"]] = Field(
255
- default=None,
256
- description="SVD solver to use."
257
- )
258
- mask_var: Optional[Union[str, bool]] = Field(
259
- default=None,
260
- description="Boolean mask or string referring to var column for subsetting genes."
261
- )
262
- dtype: str = Field(
263
- default="float32",
264
- description="Numpy data type string for the result."
265
- )
266
- chunked: bool = Field(
267
- default=False,
268
- description="If True, perform an incremental PCA on segments."
269
- )
270
-
271
- chunk_size: Optional[int] = Field(
272
- default=None,
273
- description="Number of observations to include in each chunk.",
274
- gt=0
275
- )
276
-
277
- @field_validator('n_comps', 'chunk_size')
278
- def validate_positive_integers(cls, v: Optional[int]) -> Optional[int]:
279
- """Validate positive integers"""
280
- if v is not None and v <= 0:
281
- raise ValueError("must be a positive integer")
282
- return v
283
-
284
- @field_validator('dtype')
285
- def validate_dtype(cls, v: str) -> str:
286
- """Validate numpy dtype"""
287
- if v not in ["float32", "float64"]:
288
- raise ValueError("dtype must be either 'float32' or 'float64'")
289
- return v
290
-
291
235
 
292
- class HighlyVariableGenesModel(BaseModel):
236
+ class HighlyVariableGenesModel(AdataModel):
293
237
  """Input schema for the highly_variable_genes preprocessing tool."""
294
238
 
295
239
  layer: Optional[str] = Field(
@@ -363,7 +307,7 @@ class HighlyVariableGenesModel(BaseModel):
363
307
  return v
364
308
 
365
309
 
366
- class RegressOutModel(BaseModel):
310
+ class RegressOutModel(AdataModel):
367
311
  """Input schema for the regress_out preprocessing tool."""
368
312
 
369
313
  keys: Union[str, List[str]] = Field(
@@ -396,7 +340,7 @@ class RegressOutModel(BaseModel):
396
340
  raise ValueError("keys must be a string or list of strings")
397
341
 
398
342
 
399
- class ScaleModel(BaseModel):
343
+ class ScaleModel(AdataModel):
400
344
  """Input schema for the scale preprocessing tool."""
401
345
 
402
346
  zero_center: bool = Field(
@@ -432,7 +376,7 @@ class ScaleModel(BaseModel):
432
376
  return v
433
377
 
434
378
 
435
- class CombatModel(BaseModel):
379
+ class CombatModel(AdataModel):
436
380
  """Input schema for the combat batch effect correction tool."""
437
381
 
438
382
  key: str = Field(
@@ -461,7 +405,7 @@ class CombatModel(BaseModel):
461
405
  return v
462
406
 
463
407
 
464
- class ScrubletModel(BaseModel):
408
+ class ScrubletModel(AdataModel):
465
409
  """Input schema for the scrublet doublet prediction tool."""
466
410
 
467
411
  adata_sim: Optional[str] = Field(
@@ -567,7 +511,7 @@ class ScrubletModel(BaseModel):
567
511
  return v.lower()
568
512
 
569
513
 
570
- class NeighborsModel(BaseModel):
514
+ class NeighborsModel(AdataModel):
571
515
  """Input schema for the neighbors graph construction tool."""
572
516
 
573
517
  n_neighbors: int = Field(
@@ -645,7 +589,7 @@ class NeighborsModel(BaseModel):
645
589
  return v
646
590
 
647
591
 
648
- class NormalizeTotalModel(BaseModel):
592
+ class NormalizeTotalModel(AdataModel):
649
593
  """Input schema for the normalize_total preprocessing tool."""
650
594
 
651
595
  target_sum: Optional[float] = Field(
@@ -1,9 +1,9 @@
1
- from pydantic import BaseModel, Field, field_validator, ValidationInfo
1
+ from pydantic import Field, field_validator, ValidationInfo
2
2
  from typing import Optional, Union, List, Dict, Any, Tuple, Literal, Mapping
3
3
 
4
+ from .base import AdataModel
4
5
 
5
-
6
- class TSNEModel(BaseModel):
6
+ class TSNEModel(AdataModel):
7
7
  """Input schema for the t-SNE dimensionality reduction tool."""
8
8
  n_pcs: Optional[int] = Field(
9
9
  default=None,
@@ -60,7 +60,7 @@ class TSNEModel(BaseModel):
60
60
  return v.lower()
61
61
 
62
62
 
63
- class UMAPModel(BaseModel):
63
+ class UMAPModel(AdataModel):
64
64
  """Input schema for the UMAP dimensionality reduction tool."""
65
65
 
66
66
  min_dist: Optional[float] = Field(
@@ -146,7 +146,7 @@ class UMAPModel(BaseModel):
146
146
  return v.lower()
147
147
 
148
148
 
149
- class DrawGraphModel(BaseModel):
149
+ class DrawGraphModel(AdataModel):
150
150
  """Input schema for the force-directed graph drawing tool."""
151
151
 
152
152
  layout: str = Field(
@@ -200,7 +200,7 @@ class DrawGraphModel(BaseModel):
200
200
  return v
201
201
 
202
202
 
203
- class DiffMapModel(BaseModel):
203
+ class DiffMapModel(AdataModel):
204
204
  """Input schema for the Diffusion Maps dimensionality reduction tool."""
205
205
 
206
206
  n_comps: int = Field(
@@ -230,7 +230,7 @@ class DiffMapModel(BaseModel):
230
230
  return v
231
231
 
232
232
 
233
- class EmbeddingDensityModel(BaseModel):
233
+ class EmbeddingDensityModel(AdataModel):
234
234
  """Input schema for the embedding density calculation tool."""
235
235
 
236
236
  basis: str = Field(
@@ -258,7 +258,7 @@ class EmbeddingDensityModel(BaseModel):
258
258
  return v
259
259
 
260
260
 
261
- class LeidenModel(BaseModel):
261
+ class LeidenModel(AdataModel):
262
262
  """Input schema for the Leiden clustering algorithm."""
263
263
 
264
264
  resolution: Optional[float] = Field(
@@ -330,7 +330,7 @@ class LeidenModel(BaseModel):
330
330
  return v
331
331
 
332
332
 
333
- class LouvainModel(BaseModel):
333
+ class LouvainModel(AdataModel):
334
334
  """Input schema for the Louvain clustering algorithm."""
335
335
 
336
336
  resolution: Optional[float] = Field(
@@ -402,7 +402,7 @@ class LouvainModel(BaseModel):
402
402
  return v
403
403
 
404
404
 
405
- class DendrogramModel(BaseModel):
405
+ class DendrogramModel(AdataModel):
406
406
  """Input schema for the hierarchical clustering dendrogram tool."""
407
407
 
408
408
  groupby: str = Field(
@@ -467,7 +467,7 @@ class DendrogramModel(BaseModel):
467
467
  return v
468
468
 
469
469
 
470
- class DPTModel(BaseModel):
470
+ class DPTModel(AdataModel):
471
471
  """Input schema for the Diffusion Pseudotime (DPT) tool."""
472
472
 
473
473
  n_dcs: int = Field(
@@ -516,7 +516,7 @@ class DPTModel(BaseModel):
516
516
  raise ValueError("min_group_size must be between 0 and 1")
517
517
  return v
518
518
 
519
- class PAGAModel(BaseModel):
519
+ class PAGAModel(AdataModel):
520
520
  """Input schema for the Partition-based Graph Abstraction (PAGA) tool."""
521
521
 
522
522
  groups: Optional[str] = Field(
@@ -544,7 +544,7 @@ class PAGAModel(BaseModel):
544
544
  return v
545
545
 
546
546
 
547
- class IngestModel(BaseModel):
547
+ class IngestModel(AdataModel):
548
548
  """Input schema for the ingest tool that maps labels and embeddings from reference data to new data."""
549
549
 
550
550
  obs: Optional[Union[str, List[str]]] = Field(
@@ -593,7 +593,7 @@ class IngestModel(BaseModel):
593
593
  return v.lower()
594
594
 
595
595
 
596
- class RankGenesGroupsModel(BaseModel):
596
+ class RankGenesGroupsModel(AdataModel):
597
597
  """Input schema for the rank_genes_groups tool."""
598
598
 
599
599
  groupby: str = Field(
@@ -675,7 +675,7 @@ class RankGenesGroupsModel(BaseModel):
675
675
  return v
676
676
 
677
677
 
678
- class FilterRankGenesGroupsModel(BaseModel):
678
+ class FilterRankGenesGroupsModel(AdataModel):
679
679
  """Input schema for filtering ranked genes groups."""
680
680
 
681
681
  key: Optional[str] = Field(
@@ -738,7 +738,7 @@ class FilterRankGenesGroupsModel(BaseModel):
738
738
  return v
739
739
 
740
740
 
741
- class MarkerGeneOverlapModel(BaseModel):
741
+ class MarkerGeneOverlapModel(AdataModel):
742
742
  """Input schema for the marker gene overlap tool."""
743
743
 
744
744
  key: str = Field(
@@ -809,7 +809,7 @@ class MarkerGeneOverlapModel(BaseModel):
809
809
  return v
810
810
 
811
811
 
812
- class ScoreGenesModel(BaseModel):
812
+ class ScoreGenesModel(AdataModel):
813
813
  """Input schema for the score_genes tool that calculates gene scores based on average expression."""
814
814
 
815
815
  ctrl_size: int = Field(
@@ -852,7 +852,7 @@ class ScoreGenesModel(BaseModel):
852
852
  return v
853
853
 
854
854
 
855
- class ScoreGenesCellCycleModel(BaseModel):
855
+ class ScoreGenesCellCycleModel(AdataModel):
856
856
  """Input schema for the score_genes_cell_cycle tool that scores cell cycle genes."""
857
857
 
858
858
  s_genes: List[str] = Field(
@@ -900,3 +900,54 @@ class ScoreGenesCellCycleModel(BaseModel):
900
900
  return v
901
901
 
902
902
 
903
+
904
+
905
+ class PCAModel(AdataModel):
906
+ """Input schema for the PCA preprocessing tool."""
907
+
908
+ n_comps: Optional[int] = Field(
909
+ default=None,
910
+ description="Number of principal components to compute. Defaults to 50 or 1 - minimum dimension size.",
911
+ gt=0
912
+ )
913
+
914
+ layer: Optional[str] = Field(
915
+ default=None,
916
+ description="If provided, which element of layers to use for PCA."
917
+ )
918
+
919
+ zero_center: Optional[bool] = Field(
920
+ default=True,
921
+ description="If True, compute standard PCA from covariance matrix."
922
+ )
923
+
924
+ svd_solver: Optional[Literal["arpack", "randomized", "auto", "lobpcg", "tsqr"]] = Field(
925
+ default=None,
926
+ description="SVD solver to use."
927
+ )
928
+ mask_var: Optional[Union[str, bool]] = Field(
929
+ default=None,
930
+ description="Boolean mask or string referring to var column for subsetting genes."
931
+ )
932
+ # dtype: str = Field(
933
+ # default="float32",
934
+ # description="Numpy data type string for the result."
935
+ # )
936
+ chunked: bool = Field(
937
+ default=False,
938
+ description="If True, perform an incremental PCA on segments."
939
+ )
940
+
941
+ chunk_size: Optional[int] = Field(
942
+ default=None,
943
+ description="Number of observations to include in each chunk.",
944
+ gt=0
945
+ )
946
+
947
+ @field_validator('n_comps', 'chunk_size')
948
+ def validate_positive_integers(cls, v: Optional[int]) -> Optional[int]:
949
+ """Validate positive integers"""
950
+ if v is not None and v <= 0:
951
+ raise ValueError("must be a positive integer")
952
+ return v
953
+
@@ -1,6 +1,5 @@
1
1
 
2
2
  from pydantic import (
3
- BaseModel,
4
3
  Field,
5
4
  ValidationInfo,
6
5
  computed_field,
@@ -8,10 +7,10 @@ from pydantic import (
8
7
  model_validator,
9
8
  )
10
9
  from typing import Optional, Union, List, Dict, Any, Callable, Collection, Literal
10
+ from .base import AdataModel
11
11
 
12
12
 
13
-
14
- class MarkVarModel(BaseModel):
13
+ class MarkVarModel(AdataModel):
15
14
  """Determine or mark if each gene meets specific conditions and store results in adata.var as boolean values"""
16
15
 
17
16
  var_name: str = Field(
@@ -33,15 +32,15 @@ class MarkVarModel(BaseModel):
33
32
  )
34
33
 
35
34
 
36
- class ListVarModel(BaseModel):
35
+ class ListVarModel(AdataModel):
37
36
  """ListVarModel"""
38
37
  pass
39
38
 
40
- class ListObsModel(BaseModel):
39
+ class ListObsModel(AdataModel):
41
40
  """ListObsModel"""
42
41
  pass
43
42
 
44
- class VarNamesModel(BaseModel):
43
+ class VarNamesModel(AdataModel):
45
44
  """ListObsModel"""
46
45
  var_names: List[str] = Field(
47
46
  default=None,
@@ -49,7 +48,7 @@ class VarNamesModel(BaseModel):
49
48
  )
50
49
 
51
50
 
52
- class ConcatAdataModel(BaseModel):
51
+ class ConcatAdataModel(AdataModel):
53
52
  """Model for concatenating AnnData objects"""
54
53
 
55
54
  axis: Literal['obs', 0, 'var', 1] = Field(
@@ -90,7 +89,7 @@ class ConcatAdataModel(BaseModel):
90
89
  )
91
90
 
92
91
 
93
- class DPTIROOTModel(BaseModel):
92
+ class DPTIROOTModel(AdataModel):
94
93
  """Input schema for setting the root cell for diffusion pseudotime."""
95
94
  diffmap_key: str = Field(
96
95
  default="X_diffmap",
@@ -104,7 +103,7 @@ class DPTIROOTModel(BaseModel):
104
103
  )
105
104
 
106
105
 
107
- class CelltypeMapCellTypeModel(BaseModel):
106
+ class CelltypeMapCellTypeModel(AdataModel):
108
107
  """Input schema for mapping cluster IDs to cell type names."""
109
108
  cluster_key: str = Field(
110
109
  description="Key in adata.obs containing cluster IDs."
@@ -123,9 +122,17 @@ class CelltypeMapCellTypeModel(BaseModel):
123
122
 
124
123
 
125
124
 
126
- class AddLayerModel(BaseModel):
125
+ class AddLayerModel(AdataModel):
127
126
  """Input schema for adding a layer to AnnData object."""
128
127
  layer_name: str = Field(
129
128
  description="Name of the layer to add to adata.layers."
130
129
  )
131
-
130
+
131
+
132
+ class QueryOpLogModel(AdataModel):
133
+ """QueryOpLogModel"""
134
+ n: int = Field(
135
+ default=10,
136
+ description="Number of operations to return."
137
+ )
138
+
@@ -0,0 +1,51 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Iterable
4
+ from typing import Any
5
+
6
+ from .io import io_mcp
7
+ from .util import ul_mcp
8
+ from .pl import pl_mcp
9
+ from .pp import pp_mcp
10
+ from .tl import tl_mcp
11
+
12
+
13
+
14
+ class AdataState:
15
+ def __init__(self, add_adtypes=None):
16
+ self.adata_dic = {"exp": {}, "activity": {}, "cnv": {}, "splicing": {}}
17
+ if isinstance(add_adtypes, str):
18
+ self.adata_dic[add_adtypes] = {}
19
+ elif isinstance(add_adtypes, Iterable):
20
+ self.adata_dic.update({adtype: {} for adtype in add_adtypes})
21
+ self.active_id = None
22
+ self.metadatWa = {}
23
+ self.cr_kernel = {}
24
+ self.cr_estimator = {}
25
+
26
+ def get_adata(self, sampleid=None, adtype="exp", request=None):
27
+ if request is not None:
28
+ kwargs = request.model_dump()
29
+ sampleid = kwargs.get("sampleid", None)
30
+ adtype = kwargs.get("adtype", "exp")
31
+ try:
32
+ if self.active_id is None:
33
+ return None
34
+ sampleid = sampleid or self.active_id
35
+ return self.adata_dic[adtype][sampleid]
36
+ except KeyError as e:
37
+ raise KeyError(f"Key {e} not found in adata_dic[{adtype}].Please check the sampleid or adtype.")
38
+ except Exception as e:
39
+ raise Exception(f"fuck {e} {type(e)}")
40
+
41
+ def set_adata(self, adata, sampleid=None, sdtype="exp", request=None):
42
+ if request is not None:
43
+ kwargs = request.model_dump()
44
+ sampleid = kwargs.get("sampleid", None)
45
+ sdtype = kwargs.get("adtype", "exp")
46
+ sampleid = sampleid or self.active_id
47
+ if sdtype not in self.adata_dic:
48
+ self.adata_dic[sdtype] = {}
49
+ self.adata_dic[sdtype][sampleid] = adata
50
+
51
+