guts-base 2.0.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,595 @@
1
+ """This module has been developed in the PollinERA project to deal with time of
2
+ death notation and add another import format.
3
+
4
+ TODO: Surpress warnings for too long sheet names
5
+ TODO: Skip files that have locked postprocessing files and give a warning
6
+ TODO: Give a Status message, which file is being processed
7
+ TODO: Apply these changes also to openguts.py
8
+ TODO: Write tests for the imports and produce templates. See test_data_import.py
9
+ """
10
+
11
+
12
+ from typing import List
13
+ import os
14
+ import warnings
15
+ from datetime import timedelta
16
+ from guts_base.data.utils import datalad_locked_file_warning
17
+
18
+ import click
19
+ import pandas as pd
20
+ import numpy as np
21
+
22
+ # default columns
23
+ DEFAULT_COLUMNS_whitespace = dict(
24
+ id_subject = ["id subject", "subject id", "id", "id bee"],
25
+ id_treatment = ["id treatment", "treatment id", "treatment"],
26
+ id_replicate = ["id replicate", "replicate id", "replicate name", "replicate"],
27
+ n = ["individuals", "n", "number_replicates", "n_individuals", "replicates"],
28
+ censored = ["cenzus", "censoring", "escaped"],
29
+ time_start_experiment = ["date of start", "experiment start", "start experiment"],
30
+ time_end_experiment = ["date of end", "experiment end", "end experiment"],
31
+ time_start_exposure = ["time of exposure start", "start exposure", "exposure start"],
32
+ time_end_exposure = ["time of exposure end", "end exposure", "exposure end"],
33
+ time_death = ["time of death", "survival time", "date of death"],
34
+ )
35
+
36
+ DEFAULT_COLUMNS_underscore = {
37
+ k: [v_.replace(" ", "_") for v_ in v]
38
+ for k, v in DEFAULT_COLUMNS_whitespace.items()
39
+ }
40
+
41
+ DEFAULT_COLUMNS = {
42
+ k: list(set(DEFAULT_COLUMNS_whitespace[k] + DEFAULT_COLUMNS_underscore[k]))
43
+ for k in DEFAULT_COLUMNS_whitespace.keys()
44
+ }
45
+
46
+ REQUIRED_COLUMNS = dict(
47
+ id_subject = True,
48
+ id_treatment = True,
49
+ id_replicate = False,
50
+ censored = False,
51
+ n = False,
52
+ time_start_experiment = True,
53
+ time_end_experiment = False,
54
+ time_start_exposure = True,
55
+ time_end_exposure = True,
56
+ time_death = True,
57
+ )
58
+
59
+
60
+
61
+ def clean_column_names(columns: List[str]):
62
+ cleaned_columns = []
63
+ for c in columns:
64
+ c = c.lower() # convert to lowercase
65
+ c = c.strip() # strip leading and trailing whitespace
66
+ c = c.replace(" ", "_")
67
+ c = c.replace("[", "")
68
+ c = c.replace("]", "")
69
+ c = c.replace("/", "_")
70
+
71
+ cleaned_columns.append(c)
72
+
73
+ return cleaned_columns
74
+
75
+ def standardize_column_names(
76
+ columns: List[str],
77
+ raise_error=True,
78
+ ignore_columns=[]
79
+ ):
80
+ column_mapper = invert_dict_of_lists(DEFAULT_COLUMNS)
81
+ standardized_columns = []
82
+ for c in columns:
83
+ c = column_mapper.get(c, c) # try to get a standard value for the column
84
+
85
+ standardized_columns.append(c)
86
+
87
+ missing_columns = [
88
+ k for k in DEFAULT_COLUMNS.keys()
89
+ if k not in standardized_columns and k not in ignore_columns
90
+ ]
91
+ if len(missing_columns) > 0 and raise_error:
92
+ raise KeyError(
93
+ f"Not all necessary columns could be found. {missing_columns} "
94
+ "could not identified. Rename columns or add the corresponding "
95
+ "columns in the mapper."
96
+ )
97
+
98
+ return standardized_columns
99
+
100
+
101
+ def invert_dict_of_lists(original_dict):
102
+ inverted_dict = {}
103
+ for key, value_list in original_dict.items():
104
+ for value in value_list:
105
+ inverted_dict[value] = key
106
+ return inverted_dict
107
+
108
+ def long_to_wide(df_long, id_columns, time_column, observation_column):
109
+ df_long["id"] = df_long[id_columns].apply(
110
+ lambda x: '__'.join(x.astype(str)), axis=1
111
+ )
112
+
113
+ df_wide = df_long.pivot(
114
+ # data=df_long.reset_index(),
115
+ index=time_column,
116
+ values=observation_column,
117
+ columns="id",
118
+ )
119
+
120
+ return df_wide
121
+
122
+ def wide_to_long(df_wide, id_columns, time_column, observation_column):
123
+ df_long = pd.melt(
124
+ frame=df_wide.reset_index(),
125
+ value_vars=df_wide.columns,
126
+ id_vars=time_column,
127
+ var_name="id",
128
+ value_name=observation_column
129
+ )
130
+
131
+ df_long[id_columns] = df_long.id.str.split("__", n=1, expand=True)
132
+ df_long = df_long.drop(columns="id")
133
+ return df_long[id_columns+[time_column, observation_column]]
134
+
135
+
136
+ def get_unique_value(series, action_if_not_unique="mean"):
137
+ if series.nunique() == 1:
138
+ return series.drop_duplicates().iloc[0]
139
+ else:
140
+ if action_if_not_unique == "mean":
141
+ return series.mean()
142
+ elif action_if_not_unique == "max":
143
+ return series.max()
144
+ elif action_if_not_unique == "min":
145
+ return series.min()
146
+ elif action_if_not_unique == "error":
147
+ raise ValueError("Start time contains non unique values")
148
+ else:
149
+ raise NotImplementedError("Aggregation action is not implemented.")
150
+
151
+
152
+ def make_openguts_intervention_table(
153
+ df: pd.DataFrame,
154
+ intervention:str,
155
+ intervention_time_unit:str="d",
156
+ rect_interpolate=True,
157
+ ) -> List[pd.DataFrame]:
158
+ # create exposure tables
159
+ id_columns = ["id_treatment", "id_replicate"]
160
+ time_column = f"time [{intervention_time_unit}]"
161
+
162
+ df_long = []
163
+ for (tid, rid), group in df.groupby(id_columns):
164
+ intervention_value = float(group[intervention].unique())
165
+ if f"time_start_exposure_{intervention}" in group:
166
+ intervention_start = get_unique_value(group[f"time_start_exposure_{intervention}"])
167
+ else:
168
+ intervention_start = get_unique_value(group["time_start_exposure"])
169
+ if f"time_end_exposure_{intervention}" in group:
170
+ intervention_end = get_unique_value(group[f"time_end_exposure_{intervention}"])
171
+ else:
172
+ intervention_end = get_unique_value(group["time_end_exposure"])
173
+
174
+ experiment_start = get_unique_value(group["time_start_experiment"])
175
+ experiment_end = get_unique_value(group["time_end_experiment"])
176
+
177
+ time = np.array([
178
+ experiment_start,
179
+ intervention_start,
180
+ intervention_end,
181
+ experiment_end
182
+ ])
183
+
184
+ value = np.array([0, intervention_value, 0, 0])
185
+
186
+ m = pd.DataFrame(
187
+ data=np.column_stack([
188
+ np.repeat(tid, len(time)),
189
+ np.repeat(rid, len(time)),
190
+ time, value
191
+ ]),
192
+ columns=list(group[id_columns].columns) + [time_column, intervention]
193
+ )
194
+ # this throws the first value out if the time of exposure start is
195
+ # identical to the exposure end
196
+ m = m.drop_duplicates(subset=id_columns + [time_column], keep="last")
197
+
198
+ df_long.append(m)
199
+
200
+ df_long = pd.concat(df_long)
201
+ df_wide = long_to_wide(df_long, id_columns, time_column, intervention).reset_index()
202
+ df_wide[time_column] = (df_wide[time_column] - experiment_start)
203
+ df_wide = df_wide.set_index(time_column)
204
+
205
+ if rect_interpolate:
206
+ df_wide = df_wide.reindex(np.unique(np.concatenate([
207
+ np.array(list(df_wide.index)),
208
+ np.array(list(df_wide.index - pd.Timedelta(1, "s")))[1:]
209
+ ])))
210
+ df_wide = df_wide.fillna(method="ffill")
211
+
212
+ df_wide.index = df_wide.index / pd.Timedelta(1, "d")
213
+ return df_wide
214
+
215
+
216
+
217
+ def make_openguts_observation_table(
218
+ df: pd.DataFrame,
219
+ observation="censored",
220
+ observation_schedule:str="d",
221
+ ) -> List[pd.DataFrame]:
222
+ """returns counts of censored individuals"""
223
+ df = df.copy()
224
+
225
+ experiment_start = get_unique_value(df["time_start_experiment"])
226
+ experiment_end = get_unique_value(df["time_end_experiment"])
227
+
228
+ times_nominal = pd.date_range(experiment_start, experiment_end, freq=observation_schedule)
229
+ timecol_name = f"time [{observation_schedule.lower()}]"
230
+
231
+
232
+ id_columns = ["id_treatment", "id_replicate"]
233
+
234
+ # calculate survival time
235
+ df[timecol_name] = df["time_death"] - df["time_start_experiment"]
236
+
237
+ # this seems to have been necessary, because reindexing removed times smaller than
238
+ # the observation_schedule interval. This is now resolved by concatenating true
239
+ # times and nominal times
240
+ # TODO: remove this commented block when there appear no more errors
241
+ # time_remainder = df[timecol_name] % pd.Timedelta(1, observation_schedule)
242
+ # if (time_remainder > pd.Timedelta(0)).any():
243
+ # raise ValueError(
244
+ # "Observations should be entered at the same time as the experiment start "+
245
+ # "df['time_death] - df['time_experiment_start'] should be a multiple of "+
246
+ # f"the time resolution of the observation schedule. Here: 1{observation_schedule}"
247
+ # )
248
+
249
+ if observation == "censored":
250
+ # sum IDs that were marked as censored at time t
251
+ df_long = df.groupby(id_columns+[timecol_name])["censored"].sum()
252
+
253
+ elif observation == "lethality":
254
+ # count IDs that died at time t
255
+ df_long = df.groupby(id_columns+[timecol_name])["time_death"].count()
256
+
257
+ else:
258
+ raise NotImplementedError(f"observation {observation} is not implemented.")
259
+
260
+ df_long = df_long.rename(observation)
261
+
262
+ # df to wide frame
263
+ df_wide = long_to_wide(df_long.reset_index(), id_columns, timecol_name, observation)
264
+
265
+ # get a time vector that contains all nominal observation times and also actually
266
+ # occurred days
267
+ observation_times = np.unique(np.concatenate([df_wide.index, times_nominal-experiment_start]))
268
+
269
+ # reindex wide dataframe on time
270
+ df_wide = df_wide.reindex(index=observation_times, method=None)
271
+ df_wide.index = df_wide.index.set_names(timecol_name)
272
+ df_wide = df_wide.fillna(0)
273
+
274
+ return df_wide
275
+
276
+
277
+ # write to excel file
278
+ def excel_writer(df: pd.DataFrame, file, sheet):
279
+ with warnings.catch_warnings():
280
+ warnings.simplefilter(action="ignore")
281
+ if not os.path.exists(file):
282
+ with pd.ExcelWriter(file, mode="w") as writer:
283
+ df.to_excel(writer, sheet_name=sheet)
284
+
285
+ else:
286
+ with pd.ExcelWriter(file, if_sheet_exists="replace", mode="a") as writer:
287
+ df.to_excel(writer, sheet_name=sheet)
288
+
289
+ def write_data_template(
290
+ notation="time_of_death",
291
+ time_start_experiment=()
292
+ ):
293
+ pass
294
+
295
+ def time_to_fraction(data, column, experiment_start):
296
+ data
297
+
298
+
299
+ class TimeOfDeathIO:
300
+ def __init__(
301
+ self,
302
+ file,
303
+ intervention_columns: List[str],
304
+ sheet:str = "time-of-death",
305
+ ):
306
+ self._file = file
307
+ self.data = self.from_file()
308
+
309
+ def main(file: str, sheet: str, out:str, intervention_columns: List[str],
310
+ extra_observation_columns: List[str] = [],
311
+ observation_schedule="d", rect_interpolate=False):
312
+ intervention_columns = clean_column_names(list(intervention_columns))
313
+ extra_observation_columns = clean_column_names(list(extra_observation_columns))
314
+ processed_file = f"{out}/openguts_{os.path.basename(file)}"
315
+
316
+ print("\n")
317
+ print(f"Processing File: {file}")
318
+ print(f"Converting from time-of-death to openguts")
319
+ print("-----------------------------------------")
320
+
321
+ if os.access(processed_file, os.EX_OK):
322
+ if not os.access(processed_file, os.W_OK):
323
+ datalad_locked_file_warning(processed_file)
324
+ return
325
+ else:
326
+ directory = os.path.dirname(processed_file)
327
+ if os.access(directory, os.EX_OK):
328
+ pass
329
+ else:
330
+ os.makedirs(directory)
331
+
332
+ # read datafile
333
+ data = pd.read_excel(io=file, sheet_name=sheet)
334
+ data.columns = clean_column_names(data.columns)
335
+
336
+ # Assumptions
337
+ # -----------
338
+ # this should not be too small, Bürger and Focks (2025) assume a topical exposure
339
+ # duration of 1 hour. If exposure duration is too small, it will result in
340
+ # problems with k_d
341
+ exposure_duration = timedelta(seconds=3600)
342
+ exposure_start_delay = timedelta(hours=0)
343
+ id_zfill = 2 # number of zeros to pad ID column values with
344
+
345
+ # standardize columns
346
+ data.columns = standardize_column_names(data.columns, raise_error=False)
347
+ data["id_treatment"] = data["id_treatment"].astype(str).str.zfill(id_zfill)
348
+
349
+ # add optional columns to the dataframe if they are not present
350
+ experiment_start = get_unique_value(data["time_start_experiment"])
351
+ if "time_start_exposure" not in data:
352
+ warnings.warn(
353
+ "No column: 'time_start_exposure'. "
354
+ f"Assuming time_start_exposure=time_start_experiment"
355
+ f"({experiment_start}) + {exposure_start_delay}",
356
+ category=UserWarning
357
+ )
358
+ exposure_start = experiment_start + exposure_start_delay
359
+ data["time_start_exposure"] = exposure_start
360
+
361
+ if "time_end_exposure" in data:
362
+ if (data["time_start_experiment"] == data["time_end_exposure"]).all():
363
+ warnings.warn(
364
+ "'time_end_exposure' equals 'time_start_exposure'. "+
365
+ "Removing column 'time_end_exposure'"
366
+ )
367
+ data = data.drop("time_end_exposure", axis=1)
368
+
369
+ if "time_end_exposure" not in data:
370
+ exposure_start = data["time_start_exposure"]
371
+ warnings.warn(
372
+ "No column: 'time_end_exposure'. "
373
+ f"Assuming time_end_exposure=time_start_exposure + {exposure_duration}",
374
+ category=UserWarning
375
+ )
376
+ exposure_end = exposure_start + exposure_duration
377
+ data["time_end_exposure"] = exposure_end
378
+
379
+ if "time_end_experiment" not in data:
380
+ experiment_end = data.time_death.max()
381
+ warnings.warn(
382
+ "No column: 'time_end_experiment' "
383
+ f"Using the time of the last observation: {experiment_end}",
384
+ category=UserWarning
385
+ )
386
+ data["time_end_experiment"] = experiment_end
387
+
388
+ if "id_replicate" not in data:
389
+ warnings.warn(
390
+ "No column: 'id_replicate'. "
391
+ "Assuming all treatments were only carried out with 1 replicate "
392
+ "(containing n individuals).",
393
+ category=UserWarning
394
+ )
395
+ data["id_replicate"] = 0
396
+
397
+ # check for replicates
398
+ id_columns = ["id_treatment"]
399
+ for rid, (_, group) in enumerate(data[id_columns+intervention_columns]
400
+ .groupby(id_columns)):
401
+ data.loc[group.index, "id_replicate"] = rid + 1
402
+
403
+ data["id_replicate"] = data["id_replicate"].astype(str).str.zfill(id_zfill)
404
+ id_columns = ["id_treatment", "id_replicate"]
405
+
406
+ elif data["id_replicate"].isna().all():
407
+ warnings.warn(
408
+ "column: 'id_replicate' contained only NAN values"
409
+ "Assuming all treatments were only carried out with 1 replicate "
410
+ "(containing n individuals).",
411
+ category=UserWarning
412
+ )
413
+ data["id_replicate"] = 0
414
+
415
+ # check for replicates
416
+ id_columns = ["id_treatment"]
417
+ for rid, (_, group) in enumerate(data[id_columns+intervention_columns]
418
+ .groupby(id_columns)):
419
+ data.loc[group.index, "id_replicate"] = rid + 1
420
+
421
+ data["id_replicate"] = data["id_replicate"].astype(str).str.zfill(id_zfill)
422
+ id_columns = ["id_treatment", "id_replicate"]
423
+ else:
424
+ data["id_replicate"] = data["id_replicate"].astype(str)
425
+ data["id_treatment"] = data["id_treatment"].astype(str)
426
+ id_columns = ["id_treatment", "id_replicate"]
427
+
428
+ if "censored" not in data:
429
+ warnings.warn(
430
+ "No column: 'censoring'. "
431
+ "Assuming all observation are not censored, meaning each "
432
+ "'time of death' indication comes from an individual that was "
433
+ "observed dead at that time (as opposed to escaped or removed from "
434
+ "the experiment)",
435
+ category=UserWarning
436
+ )
437
+ data["censored"] = 0
438
+
439
+ data.columns = standardize_column_names(data.columns, raise_error=True, ignore_columns=["n"])
440
+
441
+ (data["time_start_experiment"] - experiment_start).dt.seconds
442
+ (data["time_start_experiment"] - experiment_start).dt.seconds
443
+
444
+ interventions = []
445
+ for iv in intervention_columns:
446
+ iv_wide = make_openguts_intervention_table(
447
+ data,
448
+ intervention=iv,
449
+ intervention_time_unit="d",
450
+ rect_interpolate=rect_interpolate,
451
+ )
452
+ interventions.append(iv_wide)
453
+ excel_writer(iv_wide, file=processed_file, sheet=iv)
454
+
455
+ censored = make_openguts_observation_table(
456
+ data,
457
+ observation="censored",
458
+ observation_schedule=observation_schedule,
459
+ )
460
+
461
+ lethality = make_openguts_observation_table(
462
+ data,
463
+ observation="lethality",
464
+ observation_schedule=observation_schedule,
465
+ )
466
+
467
+ _extra_observations = []
468
+ for eob in extra_observation_columns:
469
+ ob_wide = make_openguts_observation_table(
470
+ data,
471
+ observation=eob,
472
+ observation_schedule=observation_schedule
473
+ )
474
+ _extra_observations.append(ob_wide)
475
+ excel_writer(ob_wide, file=processed_file, sheet=eob)
476
+
477
+
478
+ deaths = lethality - censored
479
+
480
+ # excel export
481
+ excel_writer(censored, file=processed_file, sheet="censored")
482
+ excel_writer(lethality, file=processed_file, sheet="lethality (uncensored)")
483
+ excel_writer(deaths, file=processed_file, sheet="lethality (censored)")
484
+
485
+
486
+ cens_long = wide_to_long(censored, id_columns, f"time [{observation_schedule}]", "censored")
487
+ leth_long = wide_to_long(lethality, id_columns, f"time [{observation_schedule}]", "lethality")
488
+
489
+ if "n" in data:
490
+ if data["n"].isna().all():
491
+ warnings.warn(
492
+ "column: 'n' contained only NAN values. "+
493
+ "Removed (so it can be created from scratch)",
494
+ category=UserWarning
495
+ )
496
+ data = data.drop("n", axis=1)
497
+ else:
498
+ pass
499
+
500
+ if "n" not in data:
501
+ warnings.warn(
502
+ "No column: 'n'. "
503
+ "Inferring the number of individuals at the beginning of the "
504
+ "experiment from the uncensored number of dead organisms "
505
+ "(including those escaped and alive at the end of the experiment).",
506
+ category=UserWarning
507
+ )
508
+ n = leth_long.groupby(id_columns)["lethality"].sum().rename("n")
509
+ data = pd.merge(data, n.reset_index(), on=id_columns, how="left")
510
+
511
+
512
+ # calculate survival
513
+ n = data.groupby(id_columns)["n"].agg("unique").astype(int)
514
+ survival = pd.merge(leth_long, n, on=id_columns, how="left")
515
+ mortality = survival.groupby(id_columns)["lethality"].cumsum()
516
+ survival["survival"] = survival["n"] - mortality
517
+ survival_wide = long_to_wide(survival, id_columns, f"time [{observation_schedule}]", "survival")
518
+ excel_writer(survival_wide, file=processed_file, sheet="survival")
519
+
520
+ # Calculate the number of present organisms just after censoring
521
+ # n_observed_after_censoring = survival_wide.copy()
522
+ # n_observed_after_censoring[survival_wide.columns] = np.row_stack([
523
+ # survival_wide.iloc[0].values,
524
+ # survival_wide.iloc[:-1].values - censored.iloc[1:].values
525
+ # ])
526
+ # excel_writer(n_observed_after_censoring, file=processed_file,
527
+ # sheet="n_observed_after_censoring")
528
+
529
+
530
+ # Calculate the number of organisms alive after the last observation
531
+ n_observed_after_last_observation = survival_wide.copy()
532
+ n_observed_after_last_observation[survival_wide.columns] = np.row_stack([
533
+ np.full_like(survival_wide.iloc[0].values, np.nan),
534
+ survival_wide.iloc[:-1].values
535
+ ])
536
+ excel_writer(n_observed_after_last_observation, file=processed_file,
537
+ sheet="n_observed_after_last_observation")
538
+
539
+
540
+ data.columns = standardize_column_names(data.columns)
541
+ data_minimal = data[list(DEFAULT_COLUMNS.keys()) + intervention_columns]
542
+ excel_writer(data_minimal.set_index("id_subject"), file=processed_file,
543
+ sheet="time_of_death")
544
+
545
+ if "meta" in pd.ExcelFile(file).sheet_names:
546
+ excel_writer(
547
+ df=pd.read_excel(file, sheet_name="meta").set_index("Metadata"),
548
+ file=processed_file,
549
+ sheet="meta"
550
+ )
551
+ elif "Info" in pd.ExcelFile(file).sheet_names:
552
+ metadata = pd.read_excel(io=file, sheet_name="Info")
553
+ metadata = metadata.set_index("Experiment information")
554
+ metadata.columns = ["value", "description"]
555
+ metadata.loc["interventions", "value"] = ", ".join(intervention_columns)
556
+ metadata.loc["observations", "value"] = ", ".join(["survival", "censored"])
557
+ excel_writer(metadata, file=processed_file, sheet="meta")
558
+ else:
559
+ warnings.warn("No metadata found in sheets 'meta' or 'Info'.")
560
+
561
+ return processed_file
562
+
563
+ @click.command()
564
+ @click.option("--file", "-f", help="Path to the xlsx file")
565
+ @click.option("--sheet", "-s", help="Name of the excel sheet")
566
+ @click.option("--out", "-o", help="Output directory", default="processed_data")
567
+ @click.option("--observation_schedule", help="Schedule of the observations: d - daily, h - hourly", default="d")
568
+ @click.option("--intervention_columns", "-c", multiple=True, type=str, help="Names of the columns that carry the exposure information")
569
+ @click.option("--extra_observation_columns", "-e", multiple=True, type=str, default=[], help="Names of the columns that carry additional observations beside time-of-death and censoring")
570
+ def time_of_death_to_openguts(file, sheet, out, observation_schedule, intervention_columns, extra_observation_columns):
571
+ _ = main(
572
+ file=file,
573
+ sheet=sheet,
574
+ out=out,
575
+ intervention_columns=intervention_columns,
576
+ extra_observation_columns=extra_observation_columns,
577
+ observation_schedule=observation_schedule
578
+ )
579
+
580
+
581
+ if __name__ == "__main__":
582
+
583
+
584
+ if os.path.basename(os.getcwd()) != "data":
585
+ os.chdir("case_studies/tktd-osmia/data")
586
+ # call the underlying function
587
+ ctx = click.Context(time_of_death_to_openguts)
588
+ ctx.forward(
589
+ time_of_death_to_openguts,
590
+ file="test/template_time_of_death.xlsx",
591
+ sheet="time-of-death",
592
+ intervention_columns=["Substance_A", "Substance B"],
593
+ )
594
+ else:
595
+ time_of_death_to_openguts()
@@ -0,0 +1,8 @@
1
+ import warnings
2
+
3
+ def datalad_locked_file_warning(file):
4
+ warnings.warn(
5
+ f"The file '{file}' does not have write access. "
6
+ f"To unlock the file, use DataLad with the command: "
7
+ f"datalad unlock '{file}'."
8
+ )