sapiopycommons 2024.3.19a157__py3-none-any.whl → 2025.1.17a402__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sapiopycommons might be problematic. Click here for more details.

Files changed (52) hide show
  1. sapiopycommons/callbacks/__init__.py +0 -0
  2. sapiopycommons/callbacks/callback_util.py +2041 -0
  3. sapiopycommons/callbacks/field_builder.py +545 -0
  4. sapiopycommons/chem/IndigoMolecules.py +46 -1
  5. sapiopycommons/chem/Molecules.py +100 -21
  6. sapiopycommons/customreport/__init__.py +0 -0
  7. sapiopycommons/customreport/column_builder.py +60 -0
  8. sapiopycommons/customreport/custom_report_builder.py +137 -0
  9. sapiopycommons/customreport/term_builder.py +315 -0
  10. sapiopycommons/datatype/attachment_util.py +14 -15
  11. sapiopycommons/datatype/data_fields.py +61 -0
  12. sapiopycommons/datatype/pseudo_data_types.py +440 -0
  13. sapiopycommons/eln/experiment_handler.py +355 -91
  14. sapiopycommons/eln/experiment_report_util.py +649 -0
  15. sapiopycommons/eln/plate_designer.py +152 -0
  16. sapiopycommons/files/complex_data_loader.py +31 -0
  17. sapiopycommons/files/file_bridge.py +149 -25
  18. sapiopycommons/files/file_bridge_handler.py +555 -0
  19. sapiopycommons/files/file_data_handler.py +633 -0
  20. sapiopycommons/files/file_util.py +263 -163
  21. sapiopycommons/files/file_validator.py +569 -0
  22. sapiopycommons/files/file_writer.py +377 -0
  23. sapiopycommons/flowcyto/flow_cyto.py +77 -0
  24. sapiopycommons/flowcyto/flowcyto_data.py +75 -0
  25. sapiopycommons/general/accession_service.py +375 -0
  26. sapiopycommons/general/aliases.py +250 -15
  27. sapiopycommons/general/audit_log.py +185 -0
  28. sapiopycommons/general/custom_report_util.py +251 -31
  29. sapiopycommons/general/directive_util.py +86 -0
  30. sapiopycommons/general/exceptions.py +69 -7
  31. sapiopycommons/general/popup_util.py +59 -7
  32. sapiopycommons/general/sapio_links.py +50 -0
  33. sapiopycommons/general/storage_util.py +148 -0
  34. sapiopycommons/general/time_util.py +91 -7
  35. sapiopycommons/multimodal/multimodal.py +146 -0
  36. sapiopycommons/multimodal/multimodal_data.py +490 -0
  37. sapiopycommons/processtracking/__init__.py +0 -0
  38. sapiopycommons/processtracking/custom_workflow_handler.py +406 -0
  39. sapiopycommons/processtracking/endpoints.py +192 -0
  40. sapiopycommons/recordmodel/record_handler.py +621 -148
  41. sapiopycommons/rules/eln_rule_handler.py +87 -8
  42. sapiopycommons/rules/on_save_rule_handler.py +87 -12
  43. sapiopycommons/sftpconnect/__init__.py +0 -0
  44. sapiopycommons/sftpconnect/sftp_builder.py +70 -0
  45. sapiopycommons/webhook/webhook_context.py +39 -0
  46. sapiopycommons/webhook/webhook_handlers.py +614 -71
  47. sapiopycommons/webhook/webservice_handlers.py +317 -0
  48. {sapiopycommons-2024.3.19a157.dist-info → sapiopycommons-2025.1.17a402.dist-info}/METADATA +5 -4
  49. sapiopycommons-2025.1.17a402.dist-info/RECORD +60 -0
  50. {sapiopycommons-2024.3.19a157.dist-info → sapiopycommons-2025.1.17a402.dist-info}/WHEEL +1 -1
  51. sapiopycommons-2024.3.19a157.dist-info/RECORD +0 -28
  52. {sapiopycommons-2024.3.19a157.dist-info → sapiopycommons-2025.1.17a402.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,377 @@
1
+ from __future__ import annotations
2
+
3
+ import warnings
4
+ from abc import abstractmethod
5
+ from enum import Enum
6
+ from typing import Any
7
+
8
+ from sapiopycommons.general.aliases import SapioRecord, AliasUtil
9
+ from sapiopycommons.general.exceptions import SapioException
10
+ from sapiopycommons.general.time_util import TimeUtil
11
+
12
+
13
+ class FileWriter:
14
+ """
15
+ This class helps with the creation of character separated value files (e.g. CSVs, TSVs). You can make use of
16
+ FileUtil.csv_to_xlsx to convert these files to xlsx files.
17
+ """
18
+ headers: list[str]
19
+ body: list[list[Any]]
20
+ delimiter: str
21
+ line_break: str
22
+ column_definitions: dict[str, ColumnDef]
23
+
24
+ def __init__(self, headers: list[str], delimiter: str = ",", line_break: str = "\r\n"):
25
+ """
26
+ :param headers: The headers to display at the top of the file in the order in which they should appear.
27
+ :param delimiter: The delimiter character(s) to ues between cells in the file.
28
+ :param line_break: The character(s) to use as a line break at the end of rows.
29
+ """
30
+ self.headers = headers
31
+ self.delimiter = delimiter
32
+ self.line_break = line_break
33
+ self.body = []
34
+ self.column_definitions = {}
35
+
36
+ def add_row_list(self, row: list[Any]) -> None:
37
+ """
38
+ Add a row of values to the file from a list. The length of the given list should be equal to the number of
39
+ headers defined for this FileWriter.
40
+
41
+ To be used when you just want to put together a simple file and don't want to deal with ColumnDefinitions and
42
+ RowBundles.
43
+
44
+ :param row: A row of values to add to the end of the file.
45
+ """
46
+ row_count: int = len(row)
47
+ header_count: int = len(self.headers)
48
+ if row_count != header_count:
49
+ raise SapioException(f"The given list has {row_count} elements but this FileWriter has {header_count} "
50
+ f"headers. The number of row elements must equal the number of headers.")
51
+ self.body.append(row)
52
+
53
+ def add_row_dict(self, row: dict[str, Any]) -> None:
54
+ """
55
+ Add a row of values to the file from a dict. The dict is expected to contain keys that match the headers of
56
+ the file. For any header that exists for the file that doesn't have a matching key in the given dict, an empty
57
+ string is printed.
58
+
59
+ To be used when you just want to put together a simple file and don't want to deal with ColumnDefinitions and
60
+ RowBundles.
61
+
62
+ :param row: A row of values to add to the end of the file.
63
+ """
64
+ new_row: list[Any] = []
65
+ for header in self.headers:
66
+ new_row.append(row.get(header, ""))
67
+ self.body.append(new_row)
68
+
69
+ def add_column_definition(self, header: str, column_def: ColumnDef) -> None:
70
+ """
71
+ Add a new column definition to this FileWriter for a specific header.
72
+
73
+ ColumnDefs are only used if the build_file function is provided with a list of RowBundles. Every header must
74
+ have a column definition if this is the case.
75
+
76
+ Custom column definitions can be created by defining a class that extends ColumnDef and implements the print
77
+ method.
78
+
79
+ :param column_def: A column definitions to be used to construct the file when build_file is
80
+ called.
81
+ :param header: The header that this column definition is for. If a header is provided that isn't in the headers
82
+ list, the header is appended to the end of the list.
83
+ """
84
+ if header not in self.headers:
85
+ self.headers.append(header)
86
+ self.column_definitions[header] = column_def
87
+
88
+ def add_column_definitions(self, column_defs: dict[str, ColumnDef]) -> None:
89
+ """
90
+ Add new column definitions to this FileWriter.
91
+
92
+ ColumnDefs are only used if the build_file function is provided with a list of RowBundles. Every header must
93
+ have a column definition if this is the case.
94
+
95
+ Custom column definitions can be created by defining a class that extends ColumnDef and implements the print
96
+ method.
97
+
98
+ :param column_defs: A dictionary of header names to column definitions to be used to construct the file when
99
+ build_file is called.
100
+ """
101
+ # For backwards compatibility purposes, if column definitions are provided as a list,
102
+ # add them in order of appearance of the headers. This will only work if the headers are defined first, though.
103
+ if isinstance(column_defs, list):
104
+ warnings.warn("Adding column definitions is no longer expected as a list. Continuing to provide a list to "
105
+ "this function may result in undesirable behavior.", UserWarning)
106
+ if not self.headers:
107
+ raise SapioException("No headers provided to FileWriter before the column definitions were added.")
108
+ for header, column_def in zip(self.headers, column_defs):
109
+ self.column_definitions[header] = column_def
110
+ for header, column_def in column_defs.items():
111
+ self.add_column_definition(header, column_def)
112
+
113
+ def build_file(self, rows: list[RowBundle] | None = None, sorter=None, reverse: bool = False) -> str:
114
+ """
115
+ Build the file according the information that has been given to this FileWriter. If any add_row calls were
116
+ made, those will be listed first before any rows constructed using any given RowBundles (assuming you aren't
117
+ sorting the rows). RowBundles can only be used if this FileWriter was also provided with ColumnDefs for mapping
118
+ the bundles to the file.
119
+
120
+ If ever a None value is countered, instead prints an empty string.
121
+
122
+ :param rows: A list of information used to populate rows in the file. If this parameter is provided, then this
123
+ FileWriter must have also been given column definitions to map the row information to the file with.
124
+ :param sorter: Some function to sort the rows by before they are printed.
125
+ See https://docs.python.org/3.10/howto/sorting.html#sortinghowto for details on the type of functions that
126
+ can be provided.
127
+ :param reverse: Whether the above sorter should be run in reverse.
128
+ :return: A string of the created file. Call string.encode() to turn this into a byte array for client callbacks.
129
+ """
130
+ # If any column definitions have been provided, the number of column definitions and headers must be equal.
131
+ if self.column_definitions:
132
+ for header in self.headers:
133
+ if header not in self.column_definitions:
134
+ raise SapioException(f"FileWriter has no column definition for the header {header}. If any column "
135
+ f"definitions are provided, then all headers must have a column definition.")
136
+ # If any RowBundles have been provided, there must be column definitions for mapping them to the file.
137
+ elif rows:
138
+ raise SapioException(f"FileWriter was given RowBundles but contains no column definitions for mapping "
139
+ f"them to file contents. Either add ColumnDefs to map the RowBundles with, or use the "
140
+ f"simple add_row functions.")
141
+
142
+ file: str = self.delimiter.join(self.headers) + self.line_break
143
+ self.__build_rows(rows)
144
+ if sorter is not None:
145
+ sorted(self.body, key=sorter, reverse=reverse)
146
+ for row in self.body:
147
+ file += self.delimiter.join([self.__str(x) for x in row]) + self.line_break
148
+ return file
149
+
150
+ def __build_rows(self, rows: list[RowBundle] | None = None) -> None:
151
+ """
152
+ Populate the FileWriter's body using the RowBundles and ColumnDefs.
153
+
154
+ :param rows: A list of RowBundles to populate the file body with.
155
+ """
156
+ if not rows:
157
+ return
158
+ rows.sort(key=lambda x: x.index)
159
+ for row in rows:
160
+ new_row: list[Any] = []
161
+ for header in self.headers:
162
+ column = self.column_definitions[header]
163
+ if column.may_skip and row.may_skip:
164
+ new_row.append("")
165
+ else:
166
+ new_row.append(column.print(row))
167
+ self.body.append(new_row)
168
+
169
+ def __str(self, value: Any) -> str:
170
+ """
171
+ :param value: Some value to convert to a string.
172
+ :return: The input value as a string. If the string of the input value contains the delimiter character, then
173
+ the returned value is surrounded by quotation marks.
174
+ """
175
+ if value is None:
176
+ return ""
177
+ ret: str = str(value)
178
+ if self.delimiter in ret:
179
+ ret = "\"" + ret + "\""
180
+ return ret
181
+
182
+
183
+ class RowBundle:
184
+ """
185
+ A RowBundle represents a collection of information which may be used to print a row in a file.
186
+ """
187
+ index: int
188
+ record: SapioRecord
189
+ records: dict[str, SapioRecord]
190
+ fields: dict[str, Any]
191
+ may_skip: bool
192
+
193
+ def __init__(self, index: int | None = None,
194
+ record: SapioRecord | None = None,
195
+ records: dict[str, SapioRecord] | None = None,
196
+ fields: dict[str, Any] | None = None,
197
+ may_skip: bool | None = None):
198
+ """
199
+ :param index: An index for this RowBundle. RowBundles are sorted by index before they are printed by the
200
+ FileWriter. The FileWriter.build_file's sorter parameter is run after this sorting.
201
+ :param record: A singular record for column definitions to pull information from.
202
+ :param records: A dictionary of records for column definitions to pull information from. Each record is keyed by
203
+ some name that column definitions can use to determine which record to get certain information from.
204
+ :param fields: A list of "fields" specific to this bundle which aren't tied to a record.
205
+ :param may_skip: If true, this RowBundle will return an empty string for ColumnDefs where may_skip is true.
206
+ """
207
+ self.index = index if index is not None else 0
208
+ self.record = record
209
+ self.records = records if records is not None else {}
210
+ self.fields = fields if fields is not None else {}
211
+ self.may_skip = may_skip if may_skip is not None else bool(records)
212
+
213
+
214
+ class ColumnDef:
215
+ """
216
+ The base class for all column definitions. Each column definition may cause a RowBundle to print a value in a
217
+ different manner.
218
+ """
219
+ may_skip: bool
220
+ """If true, this ColumnDef will return an empty string for RowBundles where may_skip is true."""
221
+
222
+ @abstractmethod
223
+ def print(self, row: RowBundle) -> Any:
224
+ """
225
+ :param row: The RowBundle to print some information for.
226
+ :return: The printed value for the given RowBundle.
227
+ """
228
+ pass
229
+
230
+
231
+ class StaticColumn(ColumnDef):
232
+ """
233
+ A static column will always print the same value regardless of the input RowBundle.
234
+ """
235
+ value: Any
236
+
237
+ def __init__(self, value: Any, may_skip: bool = False):
238
+ """
239
+ :param value: The value to print for this column.
240
+ :param may_skip: If true, this ColumnDef will return an empty string for RowBundles where may_skip is true.
241
+ """
242
+ self.value = value
243
+ self.may_skip = may_skip
244
+
245
+ def print(self, row: RowBundle) -> Any:
246
+ return self.value
247
+
248
+
249
+ class EmptyColumn(StaticColumn):
250
+ """
251
+ An empty column is a static column that always prints an empty string.
252
+ """
253
+ def __init__(self):
254
+ super().__init__("")
255
+
256
+
257
+ class FieldSearchOrder(Enum):
258
+ """
259
+ An enum that specifies the order in which fields should be searched for in the RowBundle for FieldColumns.
260
+ """
261
+ RECORD_FIRST = 0
262
+ """First search the fields on the record, then search the fields in the bundle."""
263
+ BUNDLE_FIRST = 1
264
+ """First search the fields in the bundle, then search the fields on the record."""
265
+ RECORD_ONLY = 2
266
+ """Only search the fields on the record."""
267
+ BUNDLE_ONLY = 3
268
+ """Only search the fields in the bundle."""
269
+
270
+
271
+ class FieldColumn(ColumnDef):
272
+ """
273
+ A field column prints the value of a given field from the input RowBundle. This field may come from a record in the
274
+ RowBundle or from the RowBundle itself.
275
+ """
276
+ field_name: str
277
+ record_key: str | None
278
+ search_order: FieldSearchOrder
279
+ skip_none_values: bool
280
+
281
+ def __init__(self, field_name: str, record_key: str | None = None,
282
+ search_order: FieldSearchOrder = FieldSearchOrder.RECORD_FIRST,
283
+ skip_none_values: bool = False,
284
+ may_skip: bool = False):
285
+ """
286
+ :param field_name: The name of the field in the RowBundle to get the value of.
287
+ :param record_key: If a record key is given, looks in the RowBundle's record dict for the record with they key.
288
+ If no record key is given, look as the RowBundle's singular record.
289
+ :param search_order: An enum that specifies the order in which fields should be searched for in the RowBundle.
290
+ :param skip_none_values: If true and search_order is RECORD_FIRST or BUNDLE_FIRST, use the value of the second
291
+ location's field if the first location's value is None.
292
+ :param may_skip: If true, this ColumnDef will return an empty string for RowBundles where may_skip is true.
293
+ """
294
+ self.field_name = field_name
295
+ self.record_key = record_key
296
+ self.search_order = search_order
297
+ self.skip_none_values = skip_none_values
298
+ self.may_skip = may_skip
299
+
300
+ def print(self, row: RowBundle) -> Any:
301
+ return self._get_field(row)
302
+
303
+ def _get_field(self, row: RowBundle) -> Any:
304
+ record: SapioRecord = row.records.get(self.record_key) if self.record_key else row.record
305
+ if self.search_order == FieldSearchOrder.RECORD_ONLY:
306
+ return record.get_field_value(self.field_name) if record else None
307
+ elif self.search_order == FieldSearchOrder.BUNDLE_ONLY:
308
+ return row.fields.get(self.field_name)
309
+ elif self.search_order == FieldSearchOrder.RECORD_FIRST:
310
+ fields: dict[str, Any] = AliasUtil.to_field_map_lists([record])[0] if record else {}
311
+ if self.field_name not in fields or (self.skip_none_values and fields.get(self.field_name) is None):
312
+ return row.fields.get(self.field_name)
313
+ return fields.get(self.field_name)
314
+ elif self.search_order == FieldSearchOrder.BUNDLE_FIRST:
315
+ if self.field_name not in row.fields or (self.skip_none_values and row.fields.get(self.field_name) is None):
316
+ return record.get_field_value(self.field_name) if record else None
317
+ return row.fields.get(self.field_name)
318
+
319
+
320
+ class DateColumn(FieldColumn):
321
+ """
322
+ A time column takes a field value which is an integer timestamp since the epoch and converts it to a human-readable
323
+ date/time string.
324
+ """
325
+ time_format: str
326
+ timezone: str | None = None
327
+
328
+ def __init__(self, field_name: str, time_format: str, timezone: str | None = None, record_key: str | None = None,
329
+ search_order: FieldSearchOrder = FieldSearchOrder.RECORD_FIRST,
330
+ skip_none_values: bool = False, may_skip: bool = False):
331
+ """
332
+ :param field_name: The name of the field in the RowBundle to get the value of.
333
+ :param time_format: The format to put the date in. See TimeUtil for more details on date formats.
334
+ :param timezone: The timezone to convert the date to. If not specified, uses the default timezone of
335
+ TimeUtil. See TimeUtil for more details on timezones.
336
+ :param record_key: If a record key is given, looks in the RowBundle's record dict for the record with they key.
337
+ If no record key is given, look as the RowBundle's singular record.
338
+ :param search_order: An enum that specifies the order in which fields should be searched for in the RowBundle.
339
+ :param skip_none_values: If true and search_order is RECORD_FIRST or BUNDLE_FIRST, use the value of the second
340
+ location's field if the first location's value is None.
341
+ :param may_skip: If true, this ColumnDef will return an empty string for RowBundles where may_skip is true.
342
+ """
343
+ super().__init__(field_name, record_key, search_order, skip_none_values, may_skip)
344
+ self.time_format = time_format
345
+ self.timezone = timezone
346
+
347
+ def print(self, row: RowBundle) -> str:
348
+ field: int = self._get_field(row)
349
+ return TimeUtil.millis_to_format(field, self.time_format, self.timezone)
350
+
351
+
352
+ class ListColumn(ColumnDef):
353
+ """
354
+ A list column is like a static column, except it may print a different value for each row. If there are more rows
355
+ than the length of this list, then all subsequent rows will print a blank value.
356
+
357
+ Note that using the FileWriter.build_file's sorter parameter may result in these values not being in the expected
358
+ order in the output file.
359
+ """
360
+ column_list: list[Any]
361
+ list_size: int
362
+ index: int = 0
363
+
364
+ def __init__(self, column_list: list[Any]):
365
+ """
366
+ :param column_list: The list of values to print for this column.
367
+ """
368
+ self.column_list = column_list
369
+ self.list_size = len(column_list)
370
+ self.may_skip = False
371
+
372
+ def print(self, row: RowBundle) -> Any:
373
+ if self.index >= self.list_size:
374
+ return ""
375
+ val: Any = self.column_list[self.index]
376
+ self.index += 1
377
+ return val
@@ -0,0 +1,77 @@
1
+ from __future__ import annotations
2
+
3
+ from weakref import WeakValueDictionary
4
+
5
+ from sapiopylib.rest.User import SapioUser
6
+ from databind.json import dumps
7
+
8
+ from sapiopycommons.flowcyto.flowcyto_data import FlowJoWorkspaceInputJson, UploadFCSInputJson, \
9
+ ComputeFlowStatisticsInputJson
10
+
11
+
12
+ class FlowCytoManager:
13
+ """
14
+ This manager includes flow cytometry analysis tools that would require FlowCyto license to use.
15
+ """
16
+ _user: SapioUser
17
+
18
+ __instances: WeakValueDictionary[SapioUser, FlowCytoManager] = WeakValueDictionary()
19
+ __initialized: bool
20
+
21
+ def __new__(cls, user: SapioUser):
22
+ """
23
+ Observes singleton pattern per record model manager object.
24
+
25
+ :param user: The user that will make the webservice request to the application.
26
+ """
27
+ obj = cls.__instances.get(user)
28
+ if not obj:
29
+ obj = object.__new__(cls)
30
+ obj.__initialized = False
31
+ cls.__instances[user] = obj
32
+ return obj
33
+
34
+ def __init__(self, user: SapioUser):
35
+ if self.__initialized:
36
+ return
37
+ self._user = user
38
+ self.__initialized = True
39
+
40
+ def create_flowjo_workspace(self, workspace_input: FlowJoWorkspaceInputJson) -> int:
41
+ """
42
+ Create FlowJo Workspace and return the workspace record ID of workspace root record,
43
+ after successful creation.
44
+ :param workspace_input: the request data payload.
45
+ :return: The new workspace record ID.
46
+ """
47
+ payload = dumps(workspace_input, FlowJoWorkspaceInputJson)
48
+ response = self._user.plugin_post("flowcyto/workspace", payload=payload, is_payload_plain_text=True)
49
+ self._user.raise_for_status(response)
50
+ return int(response.json())
51
+
52
+ def upload_fcs_for_sample(self, upload_input: UploadFCSInputJson) -> int:
53
+ """
54
+ Upload FCS file as root of the sample FCS.
55
+ :param upload_input: The request data payload
56
+ :return: The root FCS file uploaded under sample.
57
+ """
58
+ payload = dumps(upload_input, UploadFCSInputJson)
59
+ response = self._user.plugin_post("flowcyto/fcs", payload=payload, is_payload_plain_text=True)
60
+ self._user.raise_for_status(response)
61
+ return int(response.json())
62
+
63
+ def compute_statistics(self, stat_compute_input: ComputeFlowStatisticsInputJson) -> list[int]:
64
+ """
65
+ Requests to compute flow cytometry statistics.
66
+ The children are of type FCSStatistic.
67
+ If the FCS files have not been evaluated yet,
68
+ then the lazy evaluation will be performed immediately prior to computing statistics, which can take longer.
69
+ If any new statistics are computed as children of FCS, they will be returned in the result record id list.
70
+ Note: if input has multiple FCS files, the client should try to get parent FCS file from each record to figure out which one is for which FCS.
71
+ :param stat_compute_input:
72
+ :return:
73
+ """
74
+ payload = dumps(stat_compute_input, ComputeFlowStatisticsInputJson)
75
+ response = self._user.plugin_post("flowcyto/statistics", payload=payload, is_payload_plain_text=True)
76
+ self._user.raise_for_status(response)
77
+ return list(response.json())
@@ -0,0 +1,75 @@
1
+ import base64
2
+ from enum import Enum
3
+
4
+ from databind.core.dataclasses import dataclass
5
+
6
+
7
+ class ChannelStatisticType(Enum):
8
+ """
9
+ All supported channel statistics type.
10
+ """
11
+ MEAN = "(Mean) MFI"
12
+ MEDIAN = "(Median) MFI"
13
+ STD_EV = "Std. Dev."
14
+ COEFFICIENT_OF_VARIATION = "CV"
15
+
16
+ display_name: str
17
+
18
+ def __init__(self, display_name: str):
19
+ self.display_name = display_name
20
+
21
+
22
+ @dataclass
23
+ class ChannelStatisticsParameterJSON:
24
+ channelNameList: list[str]
25
+ statisticsType: ChannelStatisticType
26
+
27
+ def __init__(self, channel_name_list: list[str], stat_type: ChannelStatisticType):
28
+ self.channelNameList = channel_name_list
29
+ self.statisticsType = stat_type
30
+
31
+
32
+ @dataclass
33
+ class ComputeFlowStatisticsInputJson:
34
+ fcsFileRecordIdList: list[int]
35
+ statisticsParameterList: list[ChannelStatisticsParameterJSON]
36
+
37
+ def __init__(self, fcs_file_record_id_list: list[int], statistics_parameter_list: list[ChannelStatisticsParameterJSON]):
38
+ self.fcsFileRecordIdList = fcs_file_record_id_list
39
+ self.statisticsParameterList = statistics_parameter_list
40
+
41
+
42
+ @dataclass
43
+ class FlowJoWorkspaceInputJson:
44
+ filePath: str
45
+ base64Data: str
46
+
47
+ def __init__(self, filePath: str, file_data: bytes):
48
+ self.filePath = filePath
49
+ self.base64Data = base64.b64encode(file_data).decode('utf-8')
50
+
51
+
52
+ @dataclass
53
+ class UploadFCSInputJson:
54
+ """
55
+ Request to upload new FCS file
56
+ Attributes:
57
+ filePath: The file name of the FCS file to be uploaded. For FlowJo workspace, this is important to match the file in group (via file names).
58
+ attachmentDataType: the attachment data type that contains already-uploaded FCS data.
59
+ attachmentRecordId: the attachment record ID that contains already-uploaded FCS data.
60
+ associatedRecordDataType: the "parent" association for the FCS. Can either be a workspace or a sample record.
61
+ associatedRecordId: the "parent" association for the FCS. Can either be a workspace or a sample record.
62
+ """
63
+ filePath: str
64
+ attachmentDataType: str
65
+ attachmentRecordId: int
66
+ associatedRecordDataType: str
67
+ associatedRecordId: int
68
+
69
+ def __init__(self, associated_record_data_type: str, associated_record_id: int,
70
+ file_path: str, attachment_data_type: str, attachment_record_id: int):
71
+ self.filePath = file_path
72
+ self.attachmentDataType = attachment_data_type
73
+ self.attachmentRecordId = attachment_record_id
74
+ self.associatedRecordDataType = associated_record_data_type
75
+ self.associatedRecordId = associated_record_id