algorecell-types 0.90__py3-none-any.whl → 1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- algorecell_types/__init__.py +147 -28
- {algorecell_types-0.90.dist-info → algorecell_types-1.1.dist-info}/METADATA +10 -10
- algorecell_types-1.1.dist-info/RECORD +5 -0
- {algorecell_types-0.90.dist-info → algorecell_types-1.1.dist-info}/WHEEL +1 -1
- algorecell_types-0.90.dist-info/RECORD +0 -5
- {algorecell_types-0.90.dist-info → algorecell_types-1.1.dist-info}/top_level.txt +0 -0
algorecell_types/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""
|
|
2
2
|
This module implements generic types for representing predictions for
|
|
3
3
|
the control of attractors in Boolean and multivalued networks, with
|
|
4
|
-
various
|
|
4
|
+
various visualizations.
|
|
5
5
|
|
|
6
6
|
It accounts for instantaneous, temporary, and permanent perturbations, as well
|
|
7
7
|
as sequential reprogramming strategies.
|
|
@@ -9,6 +9,14 @@ as sequential reprogramming strategies.
|
|
|
9
9
|
Typically, a method computing reprogramming strategies returns an object of
|
|
10
10
|
class :py:class:`.ReprogrammingStrategies`, from which can be extracted and
|
|
11
11
|
visualized the set of identified strategies.
|
|
12
|
+
|
|
13
|
+
Exemples of projects using the ``algorecell_types`` module:
|
|
14
|
+
|
|
15
|
+
* `ActoNet <https://github.com/algorecell/pyActoNet>`_
|
|
16
|
+
* `CABEAN-python <https://github.com/algorecell/cabean-python>`_
|
|
17
|
+
* `Caspo-control <https://github.com/algorecell/caspo-control>`_
|
|
18
|
+
* `StableMotifs-python <https://github.com/algorecell/StableMotifs-python>`_
|
|
19
|
+
|
|
12
20
|
"""
|
|
13
21
|
|
|
14
22
|
import pandas as pd
|
|
@@ -73,6 +81,10 @@ class _Perturbation(_SymbolicType):
|
|
|
73
81
|
class PermanentPerturbation(_Perturbation):
|
|
74
82
|
"""
|
|
75
83
|
A permanent perturbation locks the specified components forever (mutation).
|
|
84
|
+
|
|
85
|
+
Example:
|
|
86
|
+
|
|
87
|
+
>>> p = PermanentPerturbation({"a": 1, "b": 0})
|
|
76
88
|
"""
|
|
77
89
|
pass
|
|
78
90
|
|
|
@@ -80,6 +92,10 @@ class TemporaryPerturbation(_Perturbation):
|
|
|
80
92
|
"""
|
|
81
93
|
A temporary perturbation locks the specified components until having reached
|
|
82
94
|
an attractor, or until a :py:class:`.ReleasePerturbation`.
|
|
95
|
+
|
|
96
|
+
Example:
|
|
97
|
+
|
|
98
|
+
>>> p = TemporaryPerturbation({"a": 1, "b": 0})
|
|
83
99
|
"""
|
|
84
100
|
pass
|
|
85
101
|
|
|
@@ -87,6 +103,10 @@ class ReleasePerturbation(_Perturbation):
|
|
|
87
103
|
"""
|
|
88
104
|
A release perturbation unlocks given components subject to a prior
|
|
89
105
|
:py:class:`.TemporaryPerturbation`.
|
|
106
|
+
|
|
107
|
+
Example:
|
|
108
|
+
|
|
109
|
+
>>> p = ReleasePerturbation({"a","b"})
|
|
90
110
|
"""
|
|
91
111
|
pass
|
|
92
112
|
|
|
@@ -94,6 +114,10 @@ class InstantaneousPerturbation(_Perturbation):
|
|
|
94
114
|
"""
|
|
95
115
|
An instantaneous perturbation modifies the states of the components and is
|
|
96
116
|
immediatly released.
|
|
117
|
+
|
|
118
|
+
Example:
|
|
119
|
+
|
|
120
|
+
>>> p = InstantaneousPerturbation({"a": 1, "b": 0})
|
|
97
121
|
"""
|
|
98
122
|
pass
|
|
99
123
|
|
|
@@ -118,6 +142,11 @@ class _Strategy(_SymbolicType):
|
|
|
118
142
|
return start
|
|
119
143
|
|
|
120
144
|
def perturbation_sequence(self):
|
|
145
|
+
"""
|
|
146
|
+
Returns the sequence of perturbations encoded by the strategy
|
|
147
|
+
|
|
148
|
+
:rtype: tuple of :py:class:`._Perturbation` objects
|
|
149
|
+
"""
|
|
121
150
|
ps = (self.perturbation(),)
|
|
122
151
|
s = self.next()
|
|
123
152
|
if s:
|
|
@@ -125,6 +154,16 @@ class _Strategy(_SymbolicType):
|
|
|
125
154
|
return ps
|
|
126
155
|
|
|
127
156
|
class FromAny(_Strategy):
|
|
157
|
+
"""
|
|
158
|
+
Reprogramming strategy that can be applied in any state of the network
|
|
159
|
+
"""
|
|
160
|
+
def __init__(self, perturbation, *seq):
|
|
161
|
+
"""
|
|
162
|
+
:param ._Perturbation perturbation: perturbation object
|
|
163
|
+
:keyword ._Strategy seq: optional the next strategy to apply (sequential reprogramming)
|
|
164
|
+
"""
|
|
165
|
+
assert len(seq) <= 1
|
|
166
|
+
super().__init__(perturbation, *seq)
|
|
128
167
|
def make_start_node(self):
|
|
129
168
|
n = pydot.Node("any", label="")
|
|
130
169
|
n.set_tooltip(self.__class__.__name__[4:])
|
|
@@ -138,6 +177,22 @@ class FromAny(_Strategy):
|
|
|
138
177
|
return self.args[1]
|
|
139
178
|
|
|
140
179
|
class FromState(_Strategy):
|
|
180
|
+
"""
|
|
181
|
+
Reprograming strategy that should be applied in the specified state
|
|
182
|
+
"""
|
|
183
|
+
alias_template = 's{}'
|
|
184
|
+
def __init__(self, state, perturbation, *seq):
|
|
185
|
+
"""
|
|
186
|
+
:param str state: alias of the state
|
|
187
|
+
:param ._Perturbation perturbation: perturbation object
|
|
188
|
+
:keyword ._Strategy seq: optional the next strategy to apply (sequential reprogramming)
|
|
189
|
+
"""
|
|
190
|
+
assert len(seq) <= 1
|
|
191
|
+
super().__init__(state, perturbation, *seq)
|
|
192
|
+
def key(self):
|
|
193
|
+
return self.args[0]
|
|
194
|
+
def replace_key(self, key):
|
|
195
|
+
self.args[0] = key
|
|
141
196
|
def make_start_node(self):
|
|
142
197
|
n = pydot.Node(self.args[0])
|
|
143
198
|
n.set_tooltip(self.__class__.__name__[4:])
|
|
@@ -149,9 +204,17 @@ class FromState(_Strategy):
|
|
|
149
204
|
return self.args[2]
|
|
150
205
|
|
|
151
206
|
class FromCondition(FromState):
|
|
152
|
-
|
|
207
|
+
"""
|
|
208
|
+
Reprogramming strategy that should be applied only with the given condition
|
|
209
|
+
"""
|
|
210
|
+
alias_template = 'c{}'
|
|
153
211
|
|
|
154
212
|
class FromSteadyState(FromState):
|
|
213
|
+
"""
|
|
214
|
+
Reprogramming strategy that should be applied in the given steady state
|
|
215
|
+
(fixed point).
|
|
216
|
+
"""
|
|
217
|
+
alias_template = 'a{}'
|
|
155
218
|
def make_start_node(self):
|
|
156
219
|
n = super().make_start_node()
|
|
157
220
|
n.set_style("filled")
|
|
@@ -159,6 +222,11 @@ class FromSteadyState(FromState):
|
|
|
159
222
|
return n
|
|
160
223
|
|
|
161
224
|
class FromOneInLimitCycle(FromState):
|
|
225
|
+
"""
|
|
226
|
+
Reprogramming strategy that should be applied in one state of the given
|
|
227
|
+
cyclic attractor.
|
|
228
|
+
"""
|
|
229
|
+
alias_template = 'a{}'
|
|
162
230
|
def make_start_node(self):
|
|
163
231
|
n = super().make_start_node()
|
|
164
232
|
n.set_style("dashed")
|
|
@@ -166,37 +234,25 @@ class FromOneInLimitCycle(FromState):
|
|
|
166
234
|
|
|
167
235
|
|
|
168
236
|
class ReprogrammingStrategies(object):
|
|
237
|
+
"""
|
|
238
|
+
Stores a list of reprogramming strategies and offers various visualization
|
|
239
|
+
methods, including IPython representation.
|
|
240
|
+
"""
|
|
169
241
|
def __init__(self):
|
|
242
|
+
"""
|
|
243
|
+
"""
|
|
170
244
|
self.__d = []
|
|
171
245
|
self.__aliases = {}
|
|
172
246
|
self.__autoaliases = {}
|
|
173
247
|
|
|
174
|
-
@property
|
|
175
|
-
def aliases(self):
|
|
176
|
-
return pd.DataFrame(self.__aliases).T
|
|
177
|
-
|
|
178
|
-
def autoalias(self, pattern, state):
|
|
179
|
-
h = tuple(sorted(state.items()))
|
|
180
|
-
reg = self.__autoaliases.get(pattern)
|
|
181
|
-
if not reg:
|
|
182
|
-
reg = self.__known_alias[pattern] = {}
|
|
183
|
-
a = reg.get(h)
|
|
184
|
-
if not a:
|
|
185
|
-
a = pattern.format(len(reg))
|
|
186
|
-
reg[h] = a
|
|
187
|
-
self.register_alias(a, state)
|
|
188
|
-
return a
|
|
189
|
-
|
|
190
|
-
def register_alias(self, name, state):
|
|
191
|
-
self.__aliases[name] = state
|
|
192
|
-
|
|
193
|
-
def add(self, s, **props):
|
|
194
|
-
self.__d.append((s, props))
|
|
195
|
-
|
|
196
|
-
def _repr_pretty_(self, p, cycle):
|
|
197
|
-
p.pretty([a[0] for a in self.__d])
|
|
198
|
-
|
|
199
248
|
def as_graph(self, compact=False):
|
|
249
|
+
"""
|
|
250
|
+
Returns a directed graph representation of the strategies
|
|
251
|
+
Edge labels indicate the type and specification of perturbations.
|
|
252
|
+
|
|
253
|
+
:keyword bool compact: draw compact edge labels
|
|
254
|
+
:rtype: `pydot.Dot <https://github.com/pydot/pydot>`_ graph
|
|
255
|
+
"""
|
|
200
256
|
g = pydot.Dot("")
|
|
201
257
|
g.set_rankdir("LR")
|
|
202
258
|
target = pydot.Node("target")
|
|
@@ -210,6 +266,18 @@ class ReprogrammingStrategies(object):
|
|
|
210
266
|
return g
|
|
211
267
|
|
|
212
268
|
def as_table(self):
|
|
269
|
+
"""
|
|
270
|
+
Returns a `pandas.DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
|
|
271
|
+
where each row corresponds to a reprogramming stategy, and columns
|
|
272
|
+
indicate which nodes are perturbated, in which direction.
|
|
273
|
+
|
|
274
|
+
Note that this representation hides the type of perturbation.
|
|
275
|
+
Moreover sequential reproogramming strategies are flatten.
|
|
276
|
+
|
|
277
|
+
Red cells indicate a forced activation, green cells a forced inhibtion.
|
|
278
|
+
Yellow cells indicate a sequential reprogramming strategy in which the
|
|
279
|
+
node is first activated and then later inhibited, or conversely.
|
|
280
|
+
"""
|
|
213
281
|
#TODO: support multi-valued
|
|
214
282
|
l = set()
|
|
215
283
|
for a in self.__d:
|
|
@@ -253,15 +321,66 @@ class ReprogrammingStrategies(object):
|
|
|
253
321
|
if val == "*":
|
|
254
322
|
return "color: black; background-color: yellow"
|
|
255
323
|
return ""
|
|
256
|
-
df = df.
|
|
324
|
+
df = df.map(colorize)
|
|
257
325
|
return df
|
|
258
326
|
|
|
259
327
|
def perturbations(self):
|
|
328
|
+
"""
|
|
329
|
+
Returns the set of :py:meth:`._Strategy.perturbation_sequence` of
|
|
330
|
+
registered reprogramming strategies.
|
|
331
|
+
|
|
332
|
+
:rtype: set(tuple(._Perturbation))
|
|
333
|
+
"""
|
|
260
334
|
ps = set()
|
|
261
335
|
for a in self.__d:
|
|
262
336
|
ps.add(a[0].perturbation_sequence())
|
|
263
337
|
return ps
|
|
264
338
|
|
|
339
|
+
@property
|
|
340
|
+
def aliases(self):
|
|
341
|
+
"""
|
|
342
|
+
Returns a `pandas.DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
|
|
343
|
+
listing the state aliases used by the reprogramming strategies.
|
|
344
|
+
"""
|
|
345
|
+
return pd.DataFrame(self.__aliases).T
|
|
346
|
+
|
|
347
|
+
def autoalias(self, pattern, state):
|
|
348
|
+
h = tuple(sorted(state.items()))
|
|
349
|
+
reg = self.__autoaliases.get(pattern)
|
|
350
|
+
if not reg:
|
|
351
|
+
reg = self.__known_alias[pattern] = {}
|
|
352
|
+
a = reg.get(h)
|
|
353
|
+
if not a:
|
|
354
|
+
a = pattern.format(len(reg))
|
|
355
|
+
reg[h] = a
|
|
356
|
+
self.register_alias(a, state)
|
|
357
|
+
return a
|
|
358
|
+
|
|
359
|
+
def register_alias(self, name, state):
|
|
360
|
+
"""
|
|
361
|
+
Register `name` as being an alias of `state`.
|
|
362
|
+
|
|
363
|
+
:param str name:
|
|
364
|
+
:param dict[str,int] state:
|
|
365
|
+
"""
|
|
366
|
+
self.__aliases[name] = state
|
|
367
|
+
|
|
368
|
+
def add(self, s, **props):
|
|
369
|
+
"""
|
|
370
|
+
Add a reprogramming strategy `s`, with optional properties `props`.
|
|
371
|
+
"""
|
|
372
|
+
self.__d.append((s, props))
|
|
373
|
+
|
|
374
|
+
def __iter__(self):
|
|
375
|
+
"""
|
|
376
|
+
Iterator over registered strategies
|
|
377
|
+
"""
|
|
378
|
+
return iter(self.__d)
|
|
379
|
+
|
|
380
|
+
def _repr_pretty_(self, p, cycle):
|
|
381
|
+
p.pretty([a[0] for a in self.__d])
|
|
382
|
+
|
|
383
|
+
|
|
265
384
|
if IN_IPYTHON:
|
|
266
385
|
try:
|
|
267
386
|
ip = get_ipython()
|
|
@@ -1,18 +1,18 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
2
|
-
Name:
|
|
3
|
-
Version:
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: algorecell_types
|
|
3
|
+
Version: 1.1
|
|
4
4
|
Summary: Generic types for reprogramming predictions from logical models
|
|
5
5
|
Home-page: https://github.com/algorecell/algorecell_types
|
|
6
6
|
Author: Loïc Paulevé
|
|
7
7
|
Author-email: loic.pauleve@labri.fr
|
|
8
|
-
License: UNKNOWN
|
|
9
|
-
Platform: UNKNOWN
|
|
10
8
|
Classifier: Intended Audience :: Science/Research
|
|
11
9
|
Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
|
|
12
|
-
Requires-Dist:
|
|
10
|
+
Requires-Dist: colomoto_jupyter
|
|
13
11
|
Requires-Dist: pandas
|
|
14
12
|
Requires-Dist: pydot
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
13
|
+
Dynamic: author
|
|
14
|
+
Dynamic: author-email
|
|
15
|
+
Dynamic: classifier
|
|
16
|
+
Dynamic: home-page
|
|
17
|
+
Dynamic: requires-dist
|
|
18
|
+
Dynamic: summary
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
algorecell_types/__init__.py,sha256=P4P6NkCeR7hKqilgODQys_aF8p3xDdfnrCoI8LJumQo,12456
|
|
2
|
+
algorecell_types-1.1.dist-info/METADATA,sha256=laqEdXcdi3it_9B_p2kim6Teb9GBPdf6B9uhTyaMyGw,553
|
|
3
|
+
algorecell_types-1.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
4
|
+
algorecell_types-1.1.dist-info/top_level.txt,sha256=09Tdfvl67VmpJ0S33H6oZLd8RcFSUPHVLIWIoZfYySY,17
|
|
5
|
+
algorecell_types-1.1.dist-info/RECORD,,
|
|
@@ -1,5 +0,0 @@
|
|
|
1
|
-
algorecell_types/__init__.py,sha256=peWkkP78NlyEM5seNkcoQXhZSZRsqeqa5Ho15Nb7kD8,8477
|
|
2
|
-
algorecell_types-0.90.dist-info/METADATA,sha256=6o7Eu4qvBhKpjjp9RwDXwtRyA2TRAeDyiYAD7q4kYgo,483
|
|
3
|
-
algorecell_types-0.90.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92
|
|
4
|
-
algorecell_types-0.90.dist-info/top_level.txt,sha256=09Tdfvl67VmpJ0S33H6oZLd8RcFSUPHVLIWIoZfYySY,17
|
|
5
|
-
algorecell_types-0.90.dist-info/RECORD,,
|
|
File without changes
|