konfai 1.1.3__py3-none-any.whl → 1.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of konfai might be problematic. Click here for more details.

@@ -134,7 +134,6 @@ class EulerTransform(DataAugmentation):
134
134
 
135
135
  class Translate(EulerTransform):
136
136
 
137
- @config("Translate")
138
137
  def __init__(self, t_min: float = -10, t_max = 10, is_int: bool = False):
139
138
  super().__init__()
140
139
  self.t_min = t_min
@@ -152,7 +151,6 @@ class Translate(EulerTransform):
152
151
 
153
152
  class Rotate(EulerTransform):
154
153
 
155
- @config("Rotate")
156
154
  def __init__(self, a_min: float = 0, a_max: float = 360, is_quarter: bool = False):
157
155
  super().__init__()
158
156
  self.a_min = a_min
@@ -174,7 +172,6 @@ class Rotate(EulerTransform):
174
172
 
175
173
  class Scale(EulerTransform):
176
174
 
177
- @config("Scale")
178
175
  def __init__(self, s_std: float = 0.2):
179
176
  super().__init__()
180
177
  self.s_std = s_std
@@ -187,7 +184,6 @@ class Scale(EulerTransform):
187
184
 
188
185
  class Flip(DataAugmentation):
189
186
 
190
- @config("Flip")
191
187
  def __init__(self, f_prob: Union[list[float], None] = [0.33, 0.33 ,0.33]) -> None:
192
188
  super().__init__()
193
189
  self.f_prob = f_prob
@@ -211,7 +207,6 @@ class Flip(DataAugmentation):
211
207
 
212
208
  class ColorTransform(DataAugmentation):
213
209
 
214
- @config("ColorTransform")
215
210
  def __init__(self, groups: Union[list[str], None] = None) -> None:
216
211
  super().__init__(groups)
217
212
  self.matrix: dict[int, list[torch.Tensor]] = {}
@@ -236,7 +231,6 @@ class ColorTransform(DataAugmentation):
236
231
 
237
232
  class Brightness(ColorTransform):
238
233
 
239
- @config("Brightness")
240
234
  def __init__(self, b_std: float, groups: Union[list[str], None] = None) -> None:
241
235
  super().__init__(groups)
242
236
  self.b_std = b_std
@@ -248,7 +242,6 @@ class Brightness(ColorTransform):
248
242
 
249
243
  class Contrast(ColorTransform):
250
244
 
251
- @config("Contrast")
252
245
  def __init__(self, c_std: float, groups: Union[list[str], None] = None) -> None:
253
246
  super().__init__(groups)
254
247
  self.c_std = c_std
@@ -260,7 +253,6 @@ class Contrast(ColorTransform):
260
253
 
261
254
  class LumaFlip(ColorTransform):
262
255
 
263
- @config("LumaFlip")
264
256
  def __init__(self, groups: Union[list[str], None] = None) -> None:
265
257
  super().__init__(groups)
266
258
  self.v = torch.tensor([1, 1, 1, 0])/torch.sqrt(torch.tensor(3))
@@ -285,7 +277,6 @@ class HUE(ColorTransform):
285
277
 
286
278
  class Saturation(ColorTransform):
287
279
 
288
- @config("Saturation")
289
280
  def __init__(self, s_std: float, groups: Union[list[str], None] = None) -> None:
290
281
  super().__init__(groups)
291
282
  self.s_std = s_std
@@ -372,7 +363,6 @@ class Saturation(ColorTransform):
372
363
 
373
364
  class Noise(DataAugmentation):
374
365
 
375
- @config("Noise")
376
366
  def __init__(self, n_std: float, noise_step: int=1000, beta_start: float = 1e-4, beta_end: float = 0.02, groups: Union[list[str], None] = None) -> None:
377
367
  super().__init__(groups)
378
368
  self.n_std = n_std
@@ -427,7 +417,6 @@ class Noise(DataAugmentation):
427
417
 
428
418
  class CutOUT(DataAugmentation):
429
419
 
430
- @config("CutOUT")
431
420
  def __init__(self, c_prob: float, cutout_size: int, value: float, groups: Union[list[str], None] = None) -> None:
432
421
  super().__init__(groups)
433
422
  self.c_prob = c_prob
@@ -458,7 +447,6 @@ class CutOUT(DataAugmentation):
458
447
 
459
448
  class Elastix(DataAugmentation):
460
449
 
461
- @config("Elastix")
462
450
  def __init__(self, grid_spacing: int = 16, max_displacement: int = 16) -> None:
463
451
  super().__init__()
464
452
  self.grid_spacing = grid_spacing
@@ -529,7 +517,6 @@ class Elastix(DataAugmentation):
529
517
 
530
518
  class Permute(DataAugmentation):
531
519
 
532
- @config("Permute")
533
520
  def __init__(self, prob_permute: Union[list[float], None] = [0.5 ,0.5]) -> None:
534
521
  super().__init__()
535
522
  self._permute_dims = torch.tensor([[0, 2, 1, 3], [0, 3, 1, 2]])
@@ -567,7 +554,6 @@ class Permute(DataAugmentation):
567
554
 
568
555
  class Mask(DataAugmentation):
569
556
 
570
- @config("Mask")
571
557
  def __init__(self, mask: str, value: float, groups: Union[list[str], None] = None) -> None:
572
558
  super().__init__(groups)
573
559
  if mask is not None:
@@ -217,7 +217,6 @@ class Subset():
217
217
  else:
218
218
  names_filtred = names
219
219
  size = len(names_filtred)
220
-
221
220
  index = []
222
221
  if self.subset is None:
223
222
  index = list(range(0, size))
@@ -226,15 +225,24 @@ class Subset():
226
225
  r = np.clip(np.asarray([int(self.subset.split(":")[0]), int(self.subset.split(":")[1])]), 0, size)
227
226
  index = list(range(r[0], r[1]))
228
227
  elif os.path.exists(self.subset):
229
- validation_names = []
228
+ train_names = []
230
229
  with open(self.subset, "r") as f:
231
230
  for name in f:
232
- validation_names.append(name.strip())
231
+ train_names.append(name.strip())
233
232
  index = []
234
233
  for i, name in enumerate(names_filtred):
235
- if name in validation_names:
234
+ if name in train_names:
236
235
  index.append(i)
237
-
236
+ elif self.subset.startswith("~") and os.path.exists(self.subset[1:]):
237
+ exclude_names = []
238
+ with open(self.subset[1:], "r") as f:
239
+ for name in f:
240
+ exclude_names.append(name.strip())
241
+ index = []
242
+ for i, name in enumerate(names_filtred):
243
+ if name not in exclude_names:
244
+ index.append(i)
245
+
238
246
  elif isinstance(self.subset, list):
239
247
  if len(self.subset) > 0:
240
248
  if isinstance(self.subset[0], int):
konfai/evaluator.py CHANGED
@@ -102,7 +102,7 @@ class Evaluator(DistributedObject):
102
102
  result = {}
103
103
  for output_group in self.metrics:
104
104
  for target_group in self.metrics[output_group]:
105
- targets = [data_dict[group][0].to(0) if torch.cuda.is_available() else data_dict[group][0] for group in target_group.split("/") if group in data_dict]
105
+ targets = [data_dict[group][0].to(0) if torch.cuda.is_available() else data_dict[group][0] for group in target_group.split(";") if group in data_dict]
106
106
  name = data_dict[output_group][1][0]
107
107
  for metric in self.metrics[output_group][target_group]:
108
108
  result["{}:{}:{}".format(output_group, target_group, metric.__class__.__name__)] = metric(data_dict[output_group][0].to(0) if torch.cuda.is_available() else data_dict[output_group][0], *targets).item()
@@ -135,8 +135,11 @@ class Evaluator(DistributedObject):
135
135
  f"Available groups: {sorted(groupsDest)}"
136
136
  )
137
137
 
138
- target_groups = {target for targets in self.metrics.values() for target in targets}
139
- missing_targets = target_groups - set(groupsDest)
138
+ target_groups = []
139
+ for i in {target for targets in self.metrics.values() for target in targets}:
140
+ for u in i.split(";"):
141
+ target_groups.append(u)
142
+ missing_targets = set(target_groups) - set(groupsDest)
140
143
  if missing_targets:
141
144
  raise EvaluatorError(
142
145
  f"The following metric target groups are missing from 'groupsDest': {sorted(missing_targets)}. ",
@@ -31,7 +31,6 @@ class NestedUNet(network.Network):
31
31
  self.add_module("Softmax", torch.nn.Softmax(dim=1))
32
32
  self.add_module("Argmax", blocks.ArgMax(dim=1))
33
33
 
34
- @config("NestedUNet")
35
34
  def __init__( self,
36
35
  optimizer : network.OptimizerLoader = network.OptimizerLoader(),
37
36
  schedulers : network.LRSchedulersLoader = network.LRSchedulersLoader(),
@@ -38,7 +38,6 @@ class UNetBlock(network.ModuleArgsDict):
38
38
 
39
39
  class UNet(network.Network):
40
40
 
41
- @config("UNet")
42
41
  def __init__( self,
43
42
  optimizer : network.OptimizerLoader = network.OptimizerLoader(),
44
43
  schedulers : network.LRSchedulersLoader = network.LRSchedulersLoader(),
konfai/network/network.py CHANGED
@@ -157,7 +157,7 @@ class Measure():
157
157
  outputs_group_rename = {}
158
158
 
159
159
  modules = []
160
- for i,_ in model.named_modules():
160
+ for i,_,_ in model.named_ModuleArgsDict():
161
161
  modules.append(i)
162
162
 
163
163
  for output_group in self.outputsCriterions.keys():
@@ -167,11 +167,12 @@ class Measure():
167
167
  "Please check that the name matches exactly a submodule or output of your model architecture."
168
168
  )
169
169
  for target_group in self.outputsCriterions[output_group]:
170
- if target_group not in group_dest:
171
- raise MeasureError(
172
- f"The target_group '{target_group}' defined in 'outputsCriterions.{output_group}.targetsCriterions' was not found in the available destination groups.",
173
- "This target_group is expected for loss or metric computation, but was not loaded in 'group_dest'.",
174
- f"Please make sure that the group '{target_group}' is defined in 'Dataset:groups_src:...:groups_dest:'{target_group}'' and correctly loaded from the dataset.")
170
+ for target_group_tmp in target_group.split(";"):
171
+ if target_group_tmp not in group_dest:
172
+ raise MeasureError(
173
+ f"The target_group '{target_group_tmp}' defined in 'outputsCriterions.{output_group}.targetsCriterions' was not found in the available destination groups.",
174
+ "This target_group is expected for loss or metric computation, but was not loaded in 'group_dest'.",
175
+ f"Please make sure that the group '{target_group_tmp}' is defined in 'Dataset:groups_src:...:groups_dest:'{target_group_tmp}'' and correctly loaded from the dataset.")
175
176
  for criterion in self.outputsCriterions[output_group][target_group]:
176
177
  if not self.outputsCriterions[output_group][target_group][criterion].isTorchCriterion:
177
178
  outputs_group_rename[output_group] = criterion.init(model, output_group, target_group)
@@ -189,7 +190,7 @@ class Measure():
189
190
 
190
191
  def update(self, output_group: str, output : torch.Tensor, data_dict: dict[str, torch.Tensor], it: int, nb_patch: int, training: bool) -> None:
191
192
  for target_group in self.outputsCriterions[output_group]:
192
- target = [data_dict[group].to(output[0].device).detach() for group in target_group.split("/") if group in data_dict]
193
+ target = [data_dict[group].to(output[0].device).detach() for group in target_group.split(";") if group in data_dict]
193
194
 
194
195
  for criterion, criterionsAttr in self.outputsCriterions[output_group][target_group].items():
195
196
  if it >= criterionsAttr.stepStart and (criterionsAttr.stepStop is None or it <= criterionsAttr.stepStop):
@@ -928,6 +929,16 @@ class Network(ModuleArgsDict, ABC):
928
929
  if isinstance(module, ModuleArgsDict):
929
930
  module._training = state
930
931
 
932
+ class MinimalModel(Network):
933
+
934
+ def __init__(self, model: Network, optimizer : OptimizerLoader = OptimizerLoader(),
935
+ schedulers : LRSchedulersLoader = LRSchedulersLoader(),
936
+ outputsCriterions: dict[str, TargetCriterionsLoader] = {"default" : TargetCriterionsLoader()},
937
+ patch : Union[ModelPatch, None] = None,
938
+ dim : int = 3, nb_batch_per_step = 1, init_type = "normal", init_gain = 0.02):
939
+ super().__init__(1, optimizer, schedulers, outputsCriterions, patch, nb_batch_per_step, init_type, init_gain, dim)
940
+ self.add_module("Model", model)
941
+
931
942
  class ModelLoader():
932
943
 
933
944
  @config("Model")
@@ -937,11 +948,13 @@ class ModelLoader():
937
948
  def getModel(self, train : bool = True, DL_args: Union[str, None] = None, DL_without=["optimizer", "schedulers", "nb_batch_per_step", "init_type", "init_gain"]) -> Network:
938
949
  if not DL_args:
939
950
  DL_args="{}.Model".format(KONFAI_ROOT())
940
- model = partial(getattr(importlib.import_module(self.module), self.name), config = None, DL_args=DL_args)
941
- if not train:
942
- model = partial(model, DL_without = DL_without)
943
- return model()
944
-
951
+ DL_args += "."+self.name
952
+ model = config(DL_args)(getattr(importlib.import_module(self.module), self.name))(config = None, DL_without = DL_without if not train else [])
953
+ if not isinstance(model, Network):
954
+ model = config(DL_args)(partial(MinimalModel, model))(config = None, DL_without = DL_without+["model"] if not train else [])
955
+ model.setName(self.name)
956
+ return model
957
+
945
958
  class CPU_Model():
946
959
 
947
960
  def __init__(self, model: Network) -> None:
konfai/predictor.py CHANGED
@@ -258,7 +258,6 @@ class _Predictor():
258
258
  desc = lambda : "Prediction : {}".format(description(self.modelComposite))
259
259
  self.dataloader_prediction.dataset.load("Prediction")
260
260
  with tqdm.tqdm(iterable = enumerate(self.dataloader_prediction), leave=True, desc = desc(), total=len(self.dataloader_prediction), ncols=0) as batch_iter:
261
- dist.barrier()
262
261
  for it, data_dict in batch_iter:
263
262
  with torch.amp.autocast('cuda', enabled=self.autocast):
264
263
  input = self.getInput(data_dict)
@@ -272,7 +271,7 @@ class _Predictor():
272
271
 
273
272
  batch_iter.set_description(desc())
274
273
  self.it += 1
275
-
274
+
276
275
  def _predict_log(self, data_dict : dict[str, tuple[torch.Tensor, int, int, int]]):
277
276
  measures = DistributedObject.getMeasure(self.world_size, self.global_rank, self.local_rank, {"" : self.modelComposite.module}, 1)
278
277
 
konfai/trainer.py CHANGED
@@ -34,7 +34,7 @@ class EarlyStoppingBase:
34
34
  class EarlyStopping(EarlyStoppingBase):
35
35
 
36
36
  @config("EarlyStopping")
37
- def __init__(self, monitor: Union[list[str], None] = [], patience=10, min_delta=0.0, mode="min"):
37
+ def __init__(self, monitor: Union[list[str], None] = [], patience: int=10, min_delta: float=0.0, mode: str="min"):
38
38
  super().__init__()
39
39
  self.monitor = [] if monitor is None else monitor
40
40
  self.patience = patience
konfai/utils/config.py CHANGED
@@ -3,7 +3,7 @@ import ruamel.yaml
3
3
  import inspect
4
4
  import collections
5
5
  from copy import deepcopy
6
- from typing import Union, Literal, get_origin, get_args
6
+ from typing import Union, Literal, get_origin, get_args, Any
7
7
  import torch
8
8
  from konfai import CONFIG_FILE
9
9
  from konfai.utils.utils import ConfigError
@@ -175,8 +175,11 @@ def config(key : Union[str, None] = None):
175
175
  os.environ['KONFAI_CONFIG_VARIABLE'] = "False"
176
176
  kwargs = {}
177
177
  for param in list(inspect.signature(function).parameters.values())[len(args):]:
178
-
178
+ if param.name in without:
179
+ continue
180
+
179
181
  annotation = param.annotation
182
+
180
183
  # --- support Literal ---
181
184
  if get_origin(annotation) is Literal:
182
185
  allowed_values = get_args(annotation)
@@ -192,11 +195,10 @@ def config(key : Union[str, None] = None):
192
195
  for i in annotation.__args__:
193
196
  annotation = i
194
197
  break
195
- if param.name in without:
196
- continue
198
+
197
199
  if not annotation == inspect._empty:
198
200
  if annotation not in [int, str, bool, float, torch.Tensor]:
199
- if str(annotation).startswith("list") or str(annotation).startswith("tuple") or str(annotation).startswith("typing.Tuple") or str(annotation).startswith("typing.List"):
201
+ if str(annotation).startswith("list") or str(annotation).startswith("tuple") or str(annotation).startswith("typing.Tuple") or str(annotation).startswith("typing.List") or str(annotation).startswith("typing.Sequence"):
200
202
  elem_type = annotation.__args__[0]
201
203
  values = config.getValue(param.name, param.default)
202
204
  if getattr(elem_type, '__origin__', None) is Union:
@@ -221,7 +223,7 @@ def config(key : Union[str, None] = None):
221
223
  elif str(annotation).startswith("dict"):
222
224
  if annotation.__args__[0] == str:
223
225
  values = config.getValue(param.name, param.default)
224
- if values is not None and annotation.__args__[1] not in [int, str, bool, float]:
226
+ if values is not None and annotation.__args__[1] not in [int, str, bool, float, Any]:
225
227
  try:
226
228
  kwargs[param.name] = {value : annotation.__args__[1](config = filename, DL_args = key_tmp+"."+param.name+"."+value) for value in values}
227
229
  except ValueError as e:
@@ -229,6 +231,7 @@ def config(key : Union[str, None] = None):
229
231
  except Exception as e:
230
232
  raise ConfigError("{} {}".format(values, e))
231
233
  else:
234
+
232
235
  kwargs[param.name] = values
233
236
  else:
234
237
  raise ConfigError("Config: The config only supports types : config(Object), int, str, bool, float, list[int], list[str], list[bool], list[float], dict[str, Object]")
@@ -236,7 +239,7 @@ def config(key : Union[str, None] = None):
236
239
  try:
237
240
  kwargs[param.name] = annotation(config = filename, DL_args = key_tmp)
238
241
  except Exception as e:
239
- raise ConfigError("Failed to instantiate {} with type {}, error {} ".format(param.name, annotation.__name__, e))
242
+ raise ConfigError("Failed to instantiate {} with type {}, error {} ".format(param.name, annotation, e))
240
243
 
241
244
  if os.environ['KONFAI_CONFIG_VARIABLE'] == "True":
242
245
  os.environ['KONFAI_CONFIG_VARIABLE'] = "False"
konfai/utils/utils.py CHANGED
@@ -21,7 +21,7 @@ import sys
21
21
  import re
22
22
 
23
23
 
24
- def description(model, modelEMA = None, showMemory: bool = True) -> str:
24
+ def description(model, modelEMA = None, showMemory: bool = True, train: bool = True) -> str:
25
25
  values_desc = lambda weights, values: " ".join(["{}({:.2f}) : {:.6f}".format(name.split(":")[-1], weight, value) for (name, value), weight in zip(values.items(), weights.values())])
26
26
  model_desc = lambda model : "("+" ".join(["{}({:.6f}) : {}".format(name, network.optimizer.param_groups[0]['lr'] if network.optimizer is not None else 0, values_desc(network.measure.getLastWeights(), network.measure.getLastValues())) for name, network in model.module.getNetworks().items() if network.measure is not None])+")"
27
27
  result = "Loss {}".format(model_desc(model))
@@ -33,9 +33,9 @@ def description(model, modelEMA = None, showMemory: bool = True) -> str:
33
33
  return result
34
34
 
35
35
  def _getModule(classpath : str, type : str) -> tuple[str, str]:
36
- if len(classpath.split("_")) > 1:
37
- module = ".".join(classpath.split("_")[:-1])
38
- name = classpath.split("_")[-1]
36
+ if len(classpath.split(":")) > 1:
37
+ module = ".".join(classpath.split(":")[:-1])
38
+ name = classpath.split(":")[-1]
39
39
  else:
40
40
  module = "konfai."+type+("." if len(classpath.split(".")) > 2 else "")+".".join(classpath.split(".")[:-1])
41
41
  name = classpath.split(".")[-1]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: konfai
3
- Version: 1.1.3
3
+ Version: 1.1.4
4
4
  Summary: Modular and configurable Deep Learning framework with YAML and PyTorch
5
5
  Author-email: Valentin Boussot <boussot.v@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -1,11 +1,11 @@
1
1
  konfai/__init__.py,sha256=YXG-wpSEXWs6Jt3BDI77V4r89gEUNX-6lxW9btj5VYI,851
2
- konfai/evaluator.py,sha256=rAhfdRemMjzC3VoaqyQKJR0SBekuLDiLT1nhblH8RQk,8293
2
+ konfai/evaluator.py,sha256=G8tXI-g8yTxBZXmESpRW7B5UEI4SMXq47fsGksU0G0M,8394
3
3
  konfai/main.py,sha256=xhCIs3VKXxNCgyyN616K-VANEFCsZZRR-l8LIMHlmPw,2597
4
- konfai/predictor.py,sha256=SgHs_gylsyVtQ41DBRBRmcpHIMg2ghLbxELbQBxjjFY,22747
5
- konfai/trainer.py,sha256=GCi8oRgTlbZxwKfmJlS2S4VU1yj1pCl6QeAWNwdSFPs,20223
4
+ konfai/predictor.py,sha256=R9ohOqEcSoq6Wu9Yv3pkerVfm6qSotd4aICe7ctOsU0,22728
5
+ konfai/trainer.py,sha256=gJ7qmU302M3Mevq3l1aIdPpey5C3prtibD69LyxbnAg,20240
6
6
  konfai/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- konfai/data/augmentation.py,sha256=kp4HyMSGflbuHbOsNiXBxszLFSuOZ4zRRQT0cHNIVUI,32620
8
- konfai/data/data_manager.py,sha256=oG73iHT5qV52UTWQPC3MKjsjaQHJUOAPF1WA-XeTmUE,29700
7
+ konfai/data/augmentation.py,sha256=ruvS7ykFveOZBe2Mv_TOU5a8AzUgH7gfTipwqYgRM7k,32293
8
+ konfai/data/data_manager.py,sha256=TCQFZ_zpRHYIomPOotG8U_I0DfpjhSaajb7ypd6fyRU,30103
9
9
  konfai/data/patching.py,sha256=oq43r3JlS9hw-pnzpjT3epvXJxEKqiQEtFVZPjY9Jic,15749
10
10
  konfai/data/transform.py,sha256=ZZoxft0V8t-DaKjF3OsIFezz41B8_xwPqBvYCYwF0dk,26471
11
11
  konfai/metric/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -20,20 +20,20 @@ konfai/models/generation/gan.py,sha256=-GoKxHm3W9NdD4U77UcJrG5TfOZ3NWFUZG663kt2X
20
20
  konfai/models/generation/vae.py,sha256=_3JYVT2ojZ0P98tYcD2ny7a-gWVUmnByLDhY7i-n_4g,4719
21
21
  konfai/models/registration/registration.py,sha256=18EiWt4RJIXLyFtqU-kHjV1sMnQRm9mxAA6_-2B1YqI,6313
22
22
  konfai/models/representation/representation.py,sha256=RwQYoxtdph440-t_ZLelykl0hkUAD1zdspQaLkgxb-0,2677
23
- konfai/models/segmentation/NestedUNet.py,sha256=6XGizAIc4bDL8vx4AHW8BBFjUvovRYcjdMBHsN4ViNo,4301
24
- konfai/models/segmentation/UNet.py,sha256=JhKyGuaFXY7thJcbQ2mWGuIBtNdUhh5FoacpWDZ0N1k,4065
23
+ konfai/models/segmentation/NestedUNet.py,sha256=wutcHYpZkOm8FqPPt5zifD6dGSDlhB9zIBlibzVyAYY,4275
24
+ konfai/models/segmentation/UNet.py,sha256=Uei51-_0Le24ZJs9tbuL6hQn66kigkfbaLSMlH38els,4045
25
25
  konfai/network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  konfai/network/blocks.py,sha256=RaTI0Lrvq1V-GIFei-WTUB6wlg4LydZksAyJ8DMk40M,13502
27
- konfai/network/network.py,sha256=SnwQUKFTZZU8zuix5sm0vW8s0G3h6Fa8pHoIcwC984Q,46512
27
+ konfai/network/network.py,sha256=YCPfgQ7EuRiTgAlMlQTx5ospluO4aiqSbsdU4GdLiMA,47467
28
28
  konfai/utils/ITK.py,sha256=OxTieDNNYHGkn7zxJsAG-6ecRG1VYMvn1dlBbBe1DOs,13955
29
29
  konfai/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- konfai/utils/config.py,sha256=OsCUqgNaPX56nD_MpJ2WRpiQ7xxqHonfPYaS9VGtsl8,12413
30
+ konfai/utils/config.py,sha256=f7o83ix5_oNbr2pki-Czqr-yHi-8n92ZL64nlo0XGwA,12514
31
31
  konfai/utils/dataset.py,sha256=6ZzevdhJ7e5zlXATAVwSh9O6acKXM7gYNxkMAa5DrmM,36351
32
32
  konfai/utils/registration.py,sha256=v1srEBOcgDnHrx0YtsK6bcj0yCMH7wNeaQ3wC7gEvOw,8898
33
- konfai/utils/utils.py,sha256=Laq8bGc5mGKFZlJIkHxa-BrC9uR2F7MTRuL4YFAIxQY,23439
34
- konfai-1.1.3.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
35
- konfai-1.1.3.dist-info/METADATA,sha256=Fj6snnVsKrMZjmdZZkp4u1YoonXYd_VWQV_B3hxd7qQ,2515
36
- konfai-1.1.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
37
- konfai-1.1.3.dist-info/entry_points.txt,sha256=fG82HRN5-g39ACSOCtij_I3N6EHxfYnMR0D7TI_8pW8,81
38
- konfai-1.1.3.dist-info/top_level.txt,sha256=xF470dkIlFoFqTZEOlRehKJr4WU_8OKGXrJqYm9vWKs,7
39
- konfai-1.1.3.dist-info/RECORD,,
33
+ konfai/utils/utils.py,sha256=-W5fbsYsTh4wIOEZBEQY7krZvGU7JC8tXHYC2rZAkaM,23459
34
+ konfai-1.1.4.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
35
+ konfai-1.1.4.dist-info/METADATA,sha256=86Azx-ool5VrcYT48km-OOMvvBQzjJX1jZihzDxAhgs,2515
36
+ konfai-1.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
37
+ konfai-1.1.4.dist-info/entry_points.txt,sha256=fG82HRN5-g39ACSOCtij_I3N6EHxfYnMR0D7TI_8pW8,81
38
+ konfai-1.1.4.dist-info/top_level.txt,sha256=xF470dkIlFoFqTZEOlRehKJr4WU_8OKGXrJqYm9vWKs,7
39
+ konfai-1.1.4.dist-info/RECORD,,
File without changes