konfai 1.0.7__tar.gz → 1.0.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. {konfai-1.0.7 → konfai-1.0.8}/PKG-INFO +5 -3
  2. {konfai-1.0.7 → konfai-1.0.8}/README.md +4 -2
  3. {konfai-1.0.7 → konfai-1.0.8}/konfai/network/network.py +6 -11
  4. {konfai-1.0.7 → konfai-1.0.8}/konfai/predictor.py +80 -38
  5. {konfai-1.0.7 → konfai-1.0.8}/konfai.egg-info/PKG-INFO +5 -3
  6. {konfai-1.0.7 → konfai-1.0.8}/pyproject.toml +1 -1
  7. {konfai-1.0.7 → konfai-1.0.8}/LICENSE +0 -0
  8. {konfai-1.0.7 → konfai-1.0.8}/konfai/__init__.py +0 -0
  9. {konfai-1.0.7 → konfai-1.0.8}/konfai/data/HDF5.py +0 -0
  10. {konfai-1.0.7 → konfai-1.0.8}/konfai/data/__init__.py +0 -0
  11. {konfai-1.0.7 → konfai-1.0.8}/konfai/data/augmentation.py +0 -0
  12. {konfai-1.0.7 → konfai-1.0.8}/konfai/data/dataset.py +0 -0
  13. {konfai-1.0.7 → konfai-1.0.8}/konfai/data/transform.py +0 -0
  14. {konfai-1.0.7 → konfai-1.0.8}/konfai/evaluator.py +0 -0
  15. {konfai-1.0.7 → konfai-1.0.8}/konfai/main.py +0 -0
  16. {konfai-1.0.7 → konfai-1.0.8}/konfai/metric/__init__.py +0 -0
  17. {konfai-1.0.7 → konfai-1.0.8}/konfai/metric/measure.py +0 -0
  18. {konfai-1.0.7 → konfai-1.0.8}/konfai/metric/schedulers.py +0 -0
  19. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/classification/convNeXt.py +0 -0
  20. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/classification/resnet.py +0 -0
  21. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/generation/cStyleGan.py +0 -0
  22. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/generation/ddpm.py +0 -0
  23. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/generation/diffusionGan.py +0 -0
  24. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/generation/gan.py +0 -0
  25. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/generation/vae.py +0 -0
  26. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/registration/registration.py +0 -0
  27. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/representation/representation.py +0 -0
  28. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/segmentation/NestedUNet.py +0 -0
  29. {konfai-1.0.7 → konfai-1.0.8}/konfai/models/segmentation/UNet.py +0 -0
  30. {konfai-1.0.7 → konfai-1.0.8}/konfai/network/__init__.py +0 -0
  31. {konfai-1.0.7 → konfai-1.0.8}/konfai/network/blocks.py +0 -0
  32. {konfai-1.0.7 → konfai-1.0.8}/konfai/trainer.py +0 -0
  33. {konfai-1.0.7 → konfai-1.0.8}/konfai/utils/ITK.py +0 -0
  34. {konfai-1.0.7 → konfai-1.0.8}/konfai/utils/__init__.py +0 -0
  35. {konfai-1.0.7 → konfai-1.0.8}/konfai/utils/config.py +0 -0
  36. {konfai-1.0.7 → konfai-1.0.8}/konfai/utils/dataset.py +0 -0
  37. {konfai-1.0.7 → konfai-1.0.8}/konfai/utils/registration.py +0 -0
  38. {konfai-1.0.7 → konfai-1.0.8}/konfai/utils/utils.py +0 -0
  39. {konfai-1.0.7 → konfai-1.0.8}/konfai.egg-info/SOURCES.txt +0 -0
  40. {konfai-1.0.7 → konfai-1.0.8}/konfai.egg-info/dependency_links.txt +0 -0
  41. {konfai-1.0.7 → konfai-1.0.8}/konfai.egg-info/entry_points.txt +0 -0
  42. {konfai-1.0.7 → konfai-1.0.8}/konfai.egg-info/requires.txt +0 -0
  43. {konfai-1.0.7 → konfai-1.0.8}/konfai.egg-info/top_level.txt +0 -0
  44. {konfai-1.0.7 → konfai-1.0.8}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: konfai
3
- Version: 1.0.7
3
+ Version: 1.0.8
4
4
  Summary: Modular and configurable Deep Learning framework with YAML and PyTorch
5
5
  Author-email: Valentin Boussot <boussot.v@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -31,9 +31,11 @@ Provides-Extra: plot
31
31
  Requires-Dist: matplotlib; extra == "plot"
32
32
  Dynamic: license-file
33
33
 
34
+ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/vboussot/KonfAI/blob/main/LICENSE)
35
+ [![PyPI version](https://img.shields.io/pypi/v/konfai)](https://pypi.org/project/konfai/)
34
36
 
35
37
  # 🧠 KonfAI
36
- <img src="https://raw.githubusercontent.com/vboussot/KonfAI/main/logo.png" alt="KonfAI Logo" width="200" align="right"/>
38
+ <img src="https://raw.githubusercontent.com/vboussot/KonfAI/main/logo.png" alt="KonfAI Logo" width="250" align="right"/>
37
39
 
38
40
  **KonfAI** is a modular and highly configurable deep learning framework built on PyTorch, driven entirely by YAML configuration files.
39
41
 
@@ -54,7 +56,7 @@ It is designed to support complex medical imaging workflows, flexible model arch
54
56
 
55
57
  ## 🚀 Installation
56
58
 
57
- ### From PyPI (recommended)
59
+ ### From PyPI
58
60
 
59
61
  Install KonfAI from PyPI:
60
62
 
@@ -1,6 +1,8 @@
1
+ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/vboussot/KonfAI/blob/main/LICENSE)
2
+ [![PyPI version](https://img.shields.io/pypi/v/konfai)](https://pypi.org/project/konfai/)
1
3
 
2
4
  # 🧠 KonfAI
3
- <img src="https://raw.githubusercontent.com/vboussot/KonfAI/main/logo.png" alt="KonfAI Logo" width="200" align="right"/>
5
+ <img src="https://raw.githubusercontent.com/vboussot/KonfAI/main/logo.png" alt="KonfAI Logo" width="250" align="right"/>
4
6
 
5
7
  **KonfAI** is a modular and highly configurable deep learning framework built on PyTorch, driven entirely by YAML configuration files.
6
8
 
@@ -21,7 +23,7 @@ It is designed to support complex medical imaging workflows, flexible model arch
21
23
 
22
24
  ## 🚀 Installation
23
25
 
24
- ### From PyPI (recommended)
26
+ ### From PyPI
25
27
 
26
28
  Install KonfAI from PyPI:
27
29
 
@@ -509,10 +509,10 @@ class Network(ModuleArgsDict, ABC):
509
509
  results[name_function(self)] = function(self, *args, **kwargs)
510
510
  return results
511
511
 
512
- def _function_network(t : bool = False):
512
+ def _function_network():
513
513
  def _function_network_d(function : Callable):
514
514
  def new_function(self : Self, *args, **kwargs) -> dict[str, object]:
515
- return self._apply_network(lambda network: network._getName() if t else network.getName(), [], self.getName(), function, *args, **kwargs)
515
+ return self._apply_network(lambda network: network.getName(), [], self.getName(), function, *args, **kwargs)
516
516
  return new_function
517
517
  return _function_network_d
518
518
 
@@ -547,7 +547,7 @@ class Network(ModuleArgsDict, ABC):
547
547
  self._it = 0
548
548
  self.outputsGroup : list[OutputsGroup]= []
549
549
 
550
- @_function_network(True)
550
+ @_function_network()
551
551
  def state_dict(self) -> dict[str, OrderedDict]:
552
552
  destination = OrderedDict()
553
553
  destination._metadata = OrderedDict()
@@ -618,13 +618,13 @@ class Network(ModuleArgsDict, ABC):
618
618
  module.apply(fn)
619
619
  fn(self)
620
620
 
621
- @_function_network(True)
621
+ @_function_network()
622
622
  def load(self, state_dict : dict[str, dict[str, torch.Tensor]], init: bool = True, ema : bool =False):
623
623
  if init:
624
624
  self.apply(partial(ModuleArgsDict.init_func, init_type=self.init_type, init_gain=self.init_gain))
625
625
  name = "Model" + ("_EMA" if ema else "")
626
626
  if name in state_dict:
627
- model_state_dict_tmp = {k.split(".")[-1] : v for k, v in state_dict[name].items()}[self._getName()]
627
+ model_state_dict_tmp = {k.split(".")[-1] : v for k, v in state_dict[name].items()}[self.getName()]
628
628
  map = self.getMap()
629
629
  model_state_dict : OrderedDict[str, torch.Tensor] = OrderedDict()
630
630
 
@@ -806,8 +806,6 @@ class Network(ModuleArgsDict, ABC):
806
806
 
807
807
  if not len(layers_name):
808
808
  break
809
-
810
-
811
809
 
812
810
  def init_outputsGroup(self):
813
811
  metric_tmp = {network.measure : network.measure.outputsCriterions.keys() for network in self.getNetworks().values() if network.measure}
@@ -906,15 +904,12 @@ class Network(ModuleArgsDict, ABC):
906
904
  return module
907
905
 
908
906
  def getName(self) -> str:
909
- return self.__class__.__name__
907
+ return self.name
910
908
 
911
909
  def setName(self, name: str) -> Self:
912
910
  self.name = name
913
911
  return self
914
912
 
915
- def _getName(self) -> str:
916
- return self.name
917
-
918
913
  def setState(self, state: NetState):
919
914
  for module in self.modules():
920
915
  if isinstance(module, ModuleArgsDict):
@@ -22,6 +22,8 @@ import torch.distributed as dist
22
22
  from torch.nn.parallel import DistributedDataParallel as DDP
23
23
  from torch.utils.data import DataLoader
24
24
  import importlib
25
+ import copy
26
+ from collections import defaultdict
25
27
 
26
28
  class OutDataset(Dataset, NeedDevice, ABC):
27
29
 
@@ -152,7 +154,7 @@ class OutSameAsGroupDataset(OutDataset):
152
154
  if self.redution == "mean":
153
155
  result = torch.mean(result.float(), dim=0).to(dtype)
154
156
  elif self.redution == "median":
155
- result, _ = torch.median(result.float(), dim=0).to(dtype)
157
+ result, _ = torch.median(result.float(), dim=0)
156
158
  else:
157
159
  raise NameError("Reduction method does not exist (mean, median)")
158
160
  for transform in self.final_transforms:
@@ -199,19 +201,19 @@ class OutDatasetLoader():
199
201
 
200
202
  class _Predictor():
201
203
 
202
- def __init__(self, world_size: int, global_rank: int, local_rank: int, predict_path: str, data_log: Union[list[str], None], outsDataset: dict[str, OutDataset], model: DDP, dataloader_prediction: DataLoader) -> None:
204
+ def __init__(self, world_size: int, global_rank: int, local_rank: int, predict_path: str, data_log: Union[list[str], None], outsDataset: dict[str, OutDataset], modelComposite: DDP, dataloader_prediction: DataLoader) -> None:
203
205
  self.world_size = world_size
204
206
  self.global_rank = global_rank
205
207
  self.local_rank = local_rank
206
208
 
207
- self.model = model
209
+ self.modelComposite = modelComposite
208
210
  self.dataloader_prediction = dataloader_prediction
209
211
  self.outsDataset = outsDataset
210
212
 
211
213
 
212
214
  self.it = 0
213
215
 
214
- self.device = self.model.device
216
+ self.device = self.modelComposite.device
215
217
  self.dataset: DatasetIter = self.dataloader_prediction.dataset
216
218
  patch_size, overlap = self.dataset.getPatchConfig()
217
219
  for outDataset in self.outsDataset.values():
@@ -220,7 +222,7 @@ class _Predictor():
220
222
  if data_log is not None:
221
223
  for data in data_log:
222
224
  self.data_log[data.split("/")[0].replace(":", ".")] = (DataLog.__getitem__(data.split("/")[1]).value[0], int(data.split("/")[2]))
223
- self.tb = SummaryWriter(log_dir = predict_path+"Metric/") if len([network for network in self.model.module.getNetworks().values() if network.measure is not None]) or len(self.data_log) else None
225
+ self.tb = SummaryWriter(log_dir = predict_path+"Metric/") if len([network for network in self.modelComposite.module.getNetworks().values() if network.measure is not None]) or len(self.data_log) else None
224
226
 
225
227
  def __enter__(self):
226
228
  return self
@@ -234,15 +236,15 @@ class _Predictor():
234
236
 
235
237
  @torch.no_grad()
236
238
  def run(self):
237
- self.model.eval()
238
- self.model.module.setState(NetState.PREDICTION)
239
- desc = lambda : "Prediction : {}".format(description(self.model))
239
+ self.modelComposite.eval()
240
+ self.modelComposite.module.setState(NetState.PREDICTION)
241
+ desc = lambda : "Prediction : {}".format(description(self.modelComposite))
240
242
  self.dataloader_prediction.dataset.load()
241
243
  with tqdm.tqdm(iterable = enumerate(self.dataloader_prediction), leave=False, desc = desc(), total=len(self.dataloader_prediction), disable=self.global_rank != 0 and "DL_API_CLUSTER" not in os.environ) as batch_iter:
242
244
  dist.barrier()
243
245
  for it, data_dict in batch_iter:
244
246
  input = self.getInput(data_dict)
245
- for name, output in self.model(input, list(self.outsDataset.keys())):
247
+ for name, output in self.modelComposite(input, list(self.outsDataset.keys())):
246
248
  self._predict_log(data_dict)
247
249
  outDataset = self.outsDataset[name]
248
250
  for i, (index, patch_augmentation, patch_index) in enumerate([(int(index), int(patch_augmentation), int(patch_index)) for index, patch_augmentation, patch_index in zip(list(data_dict.values())[0][1], list(data_dict.values())[0][2], list(data_dict.values())[0][3])]):
@@ -254,7 +256,7 @@ class _Predictor():
254
256
  self.it += 1
255
257
 
256
258
  def _predict_log(self, data_dict : dict[str, tuple[torch.Tensor, int, int, int]]):
257
- measures = DistributedObject.getMeasure(self.world_size, self.global_rank, self.local_rank, {"" : self.model.module}, 1)
259
+ measures = DistributedObject.getMeasure(self.world_size, self.global_rank, self.local_rank, {"" : self.modelComposite.module}, 1)
258
260
 
259
261
  if self.global_rank == 0:
260
262
  images_log = []
@@ -265,20 +267,56 @@ class _Predictor():
265
267
  else:
266
268
  images_log.append(name.replace(":", "."))
267
269
 
268
- for name, network in self.model.module.getNetworks().items():
270
+ for name, network in self.modelComposite.module.getNetworks().items():
269
271
  if network.measure is not None:
270
272
  self.tb.add_scalars("Prediction/{}/Loss".format(name), {k : v[1] for k, v in measures["{}{}".format(name, "")][0].items()}, self.it)
271
273
  self.tb.add_scalars("Prediction/{}/Metric".format(name), {k : v[1] for k, v in measures["{}{}".format(name, "")][1].items()}, self.it)
272
274
  if len(images_log):
273
275
  for name, layer, _ in self.model.module.get_layers([v.to(0) for k, v in self.getInput(data_dict).items() if k[1]], images_log):
274
276
  self.data_log[name][0](self.tb, "Prediction/{}".format(name), layer[:self.data_log[name][1]].detach().cpu().numpy(), self.it)
277
+
278
+ class ModelComposite(Network):
279
+
280
+ def __init__(self, model: Network, nb_models: int, method: str):
281
+ super().__init__(model.in_channels, model.optimizer, model.schedulers, model.outputsCriterionsLoader, model.patch, model.nb_batch_per_step, model.init_type, model.init_gain, model.dim)
282
+ self.method = method
283
+ for i in range(nb_models):
284
+ self.add_module("Model_{}".format(i), copy.deepcopy(model), in_branch=[0], out_branch=["output_{}".format(i)])
285
+
286
+ def load(self, state_dicts : list[dict[str, dict[str, torch.Tensor]]]):
287
+ for i, state_dict in enumerate(state_dicts):
288
+ self["Model_{}".format(i)].load(state_dict, init=False)
289
+ self["Model_{}".format(i)].setName("{}_{}".format(self["Model_{}".format(i)].getName(), i))
290
+
291
+ def forward(self, data_dict: dict[tuple[str, bool], torch.Tensor], output_layers: list[str] = []) -> list[tuple[str, torch.Tensor]]:
292
+ result = {}
293
+ for name, module in self.items():
294
+ result[name] = module(data_dict, output_layers)
275
295
 
296
+ aggregated = defaultdict(list)
297
+ for module_outputs in result.values():
298
+ for key, tensor in module_outputs:
299
+ aggregated[key].append(tensor)
300
+
301
+ final_outputs = []
302
+ for key, tensors in aggregated.items():
303
+ stacked = torch.stack(tensors, dim=0)
304
+ if self.method == 'mean':
305
+ agg = torch.mean(stacked, dim=0)
306
+ elif self.method == 'median':
307
+ agg = torch.median(stacked, dim=0).values
308
+ final_outputs.append((key, agg))
309
+
310
+ return final_outputs
311
+
312
+
276
313
  class Predictor(DistributedObject):
277
314
 
278
315
  @config("Predictor")
279
316
  def __init__(self,
280
317
  model: ModelLoader = ModelLoader(),
281
318
  dataset: DataPrediction = DataPrediction(),
319
+ combine: str = "mean",
282
320
  train_name: str = "name",
283
321
  manual_seed : Union[int, None] = None,
284
322
  gpu_checkpoints: Union[list[str], None] = None,
@@ -289,6 +327,7 @@ class Predictor(DistributedObject):
289
327
  super().__init__(train_name)
290
328
  self.manual_seed = manual_seed
291
329
  self.dataset = dataset
330
+ self.combine = combine
292
331
 
293
332
  self.model = model.getModel(train=False)
294
333
  self.it = 0
@@ -305,28 +344,31 @@ class Predictor(DistributedObject):
305
344
 
306
345
  self.gpu_checkpoints = gpu_checkpoints
307
346
 
308
- def _load(self) -> dict[str, dict[str, torch.Tensor]]:
309
- if MODEL().startswith("https://"):
310
- try:
311
- state_dict = {MODEL().split(":")[1]: torch.hub.load_state_dict_from_url(url=MODEL().split(":")[0], map_location="cpu", check_hash=True)}
312
- except:
313
- raise Exception("Model : {} does not exist !".format(MODEL()))
314
- else:
315
- if MODEL() != "":
316
- path = ""
317
- name = MODEL()
347
+ def _load(self) -> list[dict[str, dict[str, torch.Tensor]]]:
348
+ model_paths = MODEL().split(":")
349
+ state_dicts = []
350
+ for model_path in model_paths:
351
+ if model_path.startswith("https://"):
352
+ try:
353
+ state_dicts.append(torch.hub.load_state_dict_from_url(url=model_path, map_location="cpu", check_hash=True))
354
+ except:
355
+ raise Exception("Model : {} does not exist !".format(model_path))
318
356
  else:
319
- if self.name.endswith(".pt"):
320
- path = MODELS_DIRECTORY()+"/".join(self.name.split("/")[:-1])+"/StateDict/"
321
- name = self.name.split("/")[-1]
357
+ if model_path != "":
358
+ path = ""
359
+ name = model_path
322
360
  else:
323
- path = MODELS_DIRECTORY()+self.name+"/StateDict/"
324
- name = sorted(os.listdir(path))[-1]
325
- if os.path.exists(path+name):
326
- state_dict = torch.load(path+name, weights_only=False)
327
- else:
328
- raise Exception("Model : {} does not exist !".format(path+name))
329
- return state_dict
361
+ if self.name.endswith(".pt"):
362
+ path = MODELS_DIRECTORY()+"/".join(self.name.split("/")[:-1])+"/StateDict/"
363
+ name = self.name.split("/")[-1]
364
+ else:
365
+ path = MODELS_DIRECTORY()+self.name+"/StateDict/"
366
+ name = sorted(os.listdir(path))[-1]
367
+ if os.path.exists(path+name):
368
+ state_dicts.append(torch.load(path+name, weights_only=False))
369
+ else:
370
+ raise Exception("Model : {} does not exist !".format(path+name))
371
+ return state_dicts
330
372
 
331
373
  def setup(self, world_size: int):
332
374
  for dataset_filename in self.datasets_filename:
@@ -345,11 +387,11 @@ class Predictor(DistributedObject):
345
387
  self.model.init(autocast=False, state = State.PREDICTION)
346
388
  self.model.init_outputsGroup()
347
389
  self.model._compute_channels_trace(self.model, self.model.in_channels, None, self.gpu_checkpoints)
348
- self.model.load(self._load(), init=False)
349
-
350
- if len(list(self.outsDataset.keys())) == 0 and len([network for network in self.model.getNetworks().values() if network.measure is not None]) == 0:
351
- exit(0)
390
+ self.modelComposite = ModelComposite(self.model, len(MODEL().split(":")), self.combine)
391
+ self.modelComposite.load(self._load())
352
392
 
393
+ if len(list(self.outsDataset.keys())) == 0 and len([network for network in self.modelComposite.getNetworks().values() if network.measure is not None]) == 0:
394
+ exit(0)
353
395
 
354
396
  self.size = (len(self.gpu_checkpoints)+1 if self.gpu_checkpoints else 1)
355
397
  self.dataloader = self.dataset.getData(world_size//self.size)
@@ -358,9 +400,9 @@ class Predictor(DistributedObject):
358
400
 
359
401
 
360
402
  def run_process(self, world_size: int, global_rank: int, local_rank: int, dataloaders: list[DataLoader]):
361
- model = Network.to(self.model, local_rank*self.size)
362
- model = DDP(model, static_graph=True) if torch.cuda.is_available() else CPU_Model(model)
363
- with _Predictor(world_size, global_rank, local_rank, self.predict_path, self.images_log, self.outsDataset, model, *dataloaders) as p:
403
+ modelComposite = Network.to(self.modelComposite, local_rank*self.size)
404
+ modelComposite = DDP(modelComposite, static_graph=True) if torch.cuda.is_available() else CPU_Model(modelComposite)
405
+ with _Predictor(world_size, global_rank, local_rank, self.predict_path, self.images_log, self.outsDataset, modelComposite, *dataloaders) as p:
364
406
  p.run()
365
407
 
366
408
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: konfai
3
- Version: 1.0.7
3
+ Version: 1.0.8
4
4
  Summary: Modular and configurable Deep Learning framework with YAML and PyTorch
5
5
  Author-email: Valentin Boussot <boussot.v@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -31,9 +31,11 @@ Provides-Extra: plot
31
31
  Requires-Dist: matplotlib; extra == "plot"
32
32
  Dynamic: license-file
33
33
 
34
+ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/vboussot/KonfAI/blob/main/LICENSE)
35
+ [![PyPI version](https://img.shields.io/pypi/v/konfai)](https://pypi.org/project/konfai/)
34
36
 
35
37
  # 🧠 KonfAI
36
- <img src="https://raw.githubusercontent.com/vboussot/KonfAI/main/logo.png" alt="KonfAI Logo" width="200" align="right"/>
38
+ <img src="https://raw.githubusercontent.com/vboussot/KonfAI/main/logo.png" alt="KonfAI Logo" width="250" align="right"/>
37
39
 
38
40
  **KonfAI** is a modular and highly configurable deep learning framework built on PyTorch, driven entirely by YAML configuration files.
39
41
 
@@ -54,7 +56,7 @@ It is designed to support complex medical imaging workflows, flexible model arch
54
56
 
55
57
  ## 🚀 Installation
56
58
 
57
- ### From PyPI (recommended)
59
+ ### From PyPI
58
60
 
59
61
  Install KonfAI from PyPI:
60
62
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "konfai"
7
- version = "1.0.7"
7
+ version = "1.0.8"
8
8
  description = "Modular and configurable Deep Learning framework with YAML and PyTorch"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes