monai-weekly 1.5.dev2509__py3-none-any.whl → 1.5.dev2511__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. monai/__init__.py +1 -1
  2. monai/_version.py +3 -3
  3. monai/apps/deepedit/interaction.py +1 -1
  4. monai/apps/deepgrow/interaction.py +1 -1
  5. monai/apps/detection/networks/retinanet_detector.py +1 -1
  6. monai/apps/detection/networks/retinanet_network.py +5 -5
  7. monai/apps/detection/utils/box_coder.py +2 -2
  8. monai/apps/generation/maisi/networks/autoencoderkl_maisi.py +4 -0
  9. monai/apps/mmars/mmars.py +1 -1
  10. monai/apps/reconstruction/networks/blocks/varnetblock.py +1 -1
  11. monai/bundle/scripts.py +3 -4
  12. monai/data/dataset.py +2 -9
  13. monai/data/utils.py +1 -1
  14. monai/data/video_dataset.py +1 -1
  15. monai/engines/evaluator.py +11 -16
  16. monai/engines/trainer.py +11 -17
  17. monai/engines/utils.py +1 -1
  18. monai/engines/workflow.py +2 -2
  19. monai/fl/client/monai_algo.py +1 -1
  20. monai/handlers/checkpoint_loader.py +1 -1
  21. monai/inferers/inferer.py +33 -13
  22. monai/inferers/merger.py +16 -13
  23. monai/losses/perceptual.py +1 -1
  24. monai/losses/sure_loss.py +1 -1
  25. monai/networks/blocks/crossattention.py +1 -6
  26. monai/networks/blocks/feature_pyramid_network.py +4 -2
  27. monai/networks/blocks/selfattention.py +1 -6
  28. monai/networks/blocks/upsample.py +3 -11
  29. monai/networks/layers/vector_quantizer.py +2 -2
  30. monai/networks/nets/hovernet.py +5 -4
  31. monai/networks/nets/resnet.py +2 -2
  32. monai/networks/nets/senet.py +1 -1
  33. monai/networks/nets/swin_unetr.py +46 -49
  34. monai/networks/nets/transchex.py +3 -2
  35. monai/networks/nets/vista3d.py +7 -7
  36. monai/networks/schedulers/__init__.py +1 -0
  37. monai/networks/schedulers/rectified_flow.py +322 -0
  38. monai/networks/utils.py +5 -4
  39. monai/transforms/intensity/array.py +1 -1
  40. monai/transforms/spatial/array.py +6 -6
  41. monai/utils/misc.py +1 -1
  42. monai/utils/state_cacher.py +1 -1
  43. {monai_weekly-1.5.dev2509.dist-info → monai_weekly-1.5.dev2511.dist-info}/METADATA +4 -3
  44. {monai_weekly-1.5.dev2509.dist-info → monai_weekly-1.5.dev2511.dist-info}/RECORD +66 -64
  45. {monai_weekly-1.5.dev2509.dist-info → monai_weekly-1.5.dev2511.dist-info}/WHEEL +1 -1
  46. tests/bundle/test_bundle_download.py +16 -6
  47. tests/config/test_cv2_dist.py +1 -2
  48. tests/inferers/test_controlnet_inferers.py +96 -32
  49. tests/inferers/test_diffusion_inferer.py +99 -1
  50. tests/inferers/test_latent_diffusion_inferer.py +217 -211
  51. tests/integration/test_integration_bundle_run.py +2 -4
  52. tests/integration/test_integration_classification_2d.py +1 -1
  53. tests/integration/test_integration_fast_train.py +2 -2
  54. tests/integration/test_integration_segmentation_3d.py +1 -1
  55. tests/metrics/test_compute_multiscalessim_metric.py +3 -3
  56. tests/metrics/test_surface_dice.py +3 -3
  57. tests/networks/nets/test_autoencoderkl.py +1 -1
  58. tests/networks/nets/test_controlnet.py +1 -1
  59. tests/networks/nets/test_diffusion_model_unet.py +1 -1
  60. tests/networks/nets/test_network_consistency.py +1 -1
  61. tests/networks/nets/test_swin_unetr.py +1 -1
  62. tests/networks/nets/test_transformer.py +1 -1
  63. tests/networks/schedulers/test_scheduler_rflow.py +105 -0
  64. tests/networks/test_save_state.py +1 -1
  65. {monai_weekly-1.5.dev2509.dist-info → monai_weekly-1.5.dev2511.dist-info}/LICENSE +0 -0
  66. {monai_weekly-1.5.dev2509.dist-info → monai_weekly-1.5.dev2511.dist-info}/top_level.txt +0 -0
@@ -19,7 +19,7 @@ from parameterized import parameterized
19
19
 
20
20
  from monai.inferers import LatentDiffusionInferer
21
21
  from monai.networks.nets import VQVAE, AutoencoderKL, DiffusionModelUNet, SPADEAutoencoderKL, SPADEDiffusionModelUNet
22
- from monai.networks.schedulers import DDPMScheduler
22
+ from monai.networks.schedulers import DDPMScheduler, RFlowScheduler
23
23
  from monai.utils import optional_import
24
24
 
25
25
  _, has_einops = optional_import("einops")
@@ -339,31 +339,32 @@ class TestDiffusionSamplingInferer(unittest.TestCase):
339
339
 
340
340
  input = torch.randn(input_shape).to(device)
341
341
  noise = torch.randn(latent_shape).to(device)
342
- scheduler = DDPMScheduler(num_train_timesteps=10)
343
- inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
344
- scheduler.set_timesteps(num_inference_steps=10)
345
- timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long()
346
342
 
347
- if dm_model_type == "SPADEDiffusionModelUNet":
348
- input_shape_seg = list(input_shape)
349
- if "label_nc" in stage_2_params.keys():
350
- input_shape_seg[1] = stage_2_params["label_nc"]
343
+ for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]:
344
+ inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
345
+ scheduler.set_timesteps(num_inference_steps=10)
346
+ timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long()
347
+
348
+ if dm_model_type == "SPADEDiffusionModelUNet":
349
+ input_shape_seg = list(input_shape)
350
+ if "label_nc" in stage_2_params.keys():
351
+ input_shape_seg[1] = stage_2_params["label_nc"]
352
+ else:
353
+ input_shape_seg[1] = autoencoder_params["label_nc"]
354
+ input_seg = torch.randn(input_shape_seg).to(device)
355
+ prediction = inferer(
356
+ inputs=input,
357
+ autoencoder_model=stage_1,
358
+ diffusion_model=stage_2,
359
+ seg=input_seg,
360
+ noise=noise,
361
+ timesteps=timesteps,
362
+ )
351
363
  else:
352
- input_shape_seg[1] = autoencoder_params["label_nc"]
353
- input_seg = torch.randn(input_shape_seg).to(device)
354
- prediction = inferer(
355
- inputs=input,
356
- autoencoder_model=stage_1,
357
- diffusion_model=stage_2,
358
- seg=input_seg,
359
- noise=noise,
360
- timesteps=timesteps,
361
- )
362
- else:
363
- prediction = inferer(
364
- inputs=input, autoencoder_model=stage_1, diffusion_model=stage_2, noise=noise, timesteps=timesteps
365
- )
366
- self.assertEqual(prediction.shape, latent_shape)
364
+ prediction = inferer(
365
+ inputs=input, autoencoder_model=stage_1, diffusion_model=stage_2, noise=noise, timesteps=timesteps
366
+ )
367
+ self.assertEqual(prediction.shape, latent_shape)
367
368
 
368
369
  @parameterized.expand(TEST_CASES)
369
370
  @skipUnless(has_einops, "Requires einops")
@@ -388,29 +389,30 @@ class TestDiffusionSamplingInferer(unittest.TestCase):
388
389
  stage_2.eval()
389
390
 
390
391
  noise = torch.randn(latent_shape).to(device)
391
- scheduler = DDPMScheduler(num_train_timesteps=10)
392
- inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
393
- scheduler.set_timesteps(num_inference_steps=10)
394
392
 
395
- if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet":
396
- input_shape_seg = list(input_shape)
397
- if "label_nc" in stage_2_params.keys():
398
- input_shape_seg[1] = stage_2_params["label_nc"]
393
+ for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]:
394
+ inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
395
+ scheduler.set_timesteps(num_inference_steps=10)
396
+
397
+ if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet":
398
+ input_shape_seg = list(input_shape)
399
+ if "label_nc" in stage_2_params.keys():
400
+ input_shape_seg[1] = stage_2_params["label_nc"]
401
+ else:
402
+ input_shape_seg[1] = autoencoder_params["label_nc"]
403
+ input_seg = torch.randn(input_shape_seg).to(device)
404
+ sample = inferer.sample(
405
+ input_noise=noise,
406
+ autoencoder_model=stage_1,
407
+ diffusion_model=stage_2,
408
+ scheduler=scheduler,
409
+ seg=input_seg,
410
+ )
399
411
  else:
400
- input_shape_seg[1] = autoencoder_params["label_nc"]
401
- input_seg = torch.randn(input_shape_seg).to(device)
402
- sample = inferer.sample(
403
- input_noise=noise,
404
- autoencoder_model=stage_1,
405
- diffusion_model=stage_2,
406
- scheduler=scheduler,
407
- seg=input_seg,
408
- )
409
- else:
410
- sample = inferer.sample(
411
- input_noise=noise, autoencoder_model=stage_1, diffusion_model=stage_2, scheduler=scheduler
412
- )
413
- self.assertEqual(sample.shape, input_shape)
412
+ sample = inferer.sample(
413
+ input_noise=noise, autoencoder_model=stage_1, diffusion_model=stage_2, scheduler=scheduler
414
+ )
415
+ self.assertEqual(sample.shape, input_shape)
414
416
 
415
417
  @parameterized.expand(TEST_CASES)
416
418
  @skipUnless(has_einops, "Requires einops")
@@ -437,37 +439,38 @@ class TestDiffusionSamplingInferer(unittest.TestCase):
437
439
  stage_2.eval()
438
440
 
439
441
  noise = torch.randn(latent_shape).to(device)
440
- scheduler = DDPMScheduler(num_train_timesteps=10)
441
- inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
442
- scheduler.set_timesteps(num_inference_steps=10)
443
442
 
444
- if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet":
445
- input_shape_seg = list(input_shape)
446
- if "label_nc" in stage_2_params.keys():
447
- input_shape_seg[1] = stage_2_params["label_nc"]
443
+ for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]:
444
+ inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
445
+ scheduler.set_timesteps(num_inference_steps=10)
446
+
447
+ if ae_model_type == "SPADEAutoencoderKL" or dm_model_type == "SPADEDiffusionModelUNet":
448
+ input_shape_seg = list(input_shape)
449
+ if "label_nc" in stage_2_params.keys():
450
+ input_shape_seg[1] = stage_2_params["label_nc"]
451
+ else:
452
+ input_shape_seg[1] = autoencoder_params["label_nc"]
453
+ input_seg = torch.randn(input_shape_seg).to(device)
454
+ sample, intermediates = inferer.sample(
455
+ input_noise=noise,
456
+ autoencoder_model=stage_1,
457
+ diffusion_model=stage_2,
458
+ scheduler=scheduler,
459
+ seg=input_seg,
460
+ save_intermediates=True,
461
+ intermediate_steps=1,
462
+ )
448
463
  else:
449
- input_shape_seg[1] = autoencoder_params["label_nc"]
450
- input_seg = torch.randn(input_shape_seg).to(device)
451
- sample, intermediates = inferer.sample(
452
- input_noise=noise,
453
- autoencoder_model=stage_1,
454
- diffusion_model=stage_2,
455
- scheduler=scheduler,
456
- seg=input_seg,
457
- save_intermediates=True,
458
- intermediate_steps=1,
459
- )
460
- else:
461
- sample, intermediates = inferer.sample(
462
- input_noise=noise,
463
- autoencoder_model=stage_1,
464
- diffusion_model=stage_2,
465
- scheduler=scheduler,
466
- save_intermediates=True,
467
- intermediate_steps=1,
468
- )
469
- self.assertEqual(len(intermediates), 10)
470
- self.assertEqual(intermediates[0].shape, input_shape)
464
+ sample, intermediates = inferer.sample(
465
+ input_noise=noise,
466
+ autoencoder_model=stage_1,
467
+ diffusion_model=stage_2,
468
+ scheduler=scheduler,
469
+ save_intermediates=True,
470
+ intermediate_steps=1,
471
+ )
472
+ self.assertEqual(len(intermediates), 10)
473
+ self.assertEqual(intermediates[0].shape, input_shape)
471
474
 
472
475
  @parameterized.expand(TEST_CASES)
473
476
  @skipUnless(has_einops, "Requires einops")
@@ -614,40 +617,40 @@ class TestDiffusionSamplingInferer(unittest.TestCase):
614
617
  conditioning_shape[1] = n_concat_channel
615
618
  conditioning = torch.randn(conditioning_shape).to(device)
616
619
 
617
- scheduler = DDPMScheduler(num_train_timesteps=10)
618
- inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
619
- scheduler.set_timesteps(num_inference_steps=10)
620
-
621
- timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long()
622
-
623
- if dm_model_type == "SPADEDiffusionModelUNet":
624
- input_shape_seg = list(input_shape)
625
- if "label_nc" in stage_2_params.keys():
626
- input_shape_seg[1] = stage_2_params["label_nc"]
620
+ for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]:
621
+ inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
622
+ scheduler.set_timesteps(num_inference_steps=10)
623
+
624
+ timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long()
625
+
626
+ if dm_model_type == "SPADEDiffusionModelUNet":
627
+ input_shape_seg = list(input_shape)
628
+ if "label_nc" in stage_2_params.keys():
629
+ input_shape_seg[1] = stage_2_params["label_nc"]
630
+ else:
631
+ input_shape_seg[1] = autoencoder_params["label_nc"]
632
+ input_seg = torch.randn(input_shape_seg).to(device)
633
+ prediction = inferer(
634
+ inputs=input,
635
+ autoencoder_model=stage_1,
636
+ diffusion_model=stage_2,
637
+ noise=noise,
638
+ timesteps=timesteps,
639
+ condition=conditioning,
640
+ mode="concat",
641
+ seg=input_seg,
642
+ )
627
643
  else:
628
- input_shape_seg[1] = autoencoder_params["label_nc"]
629
- input_seg = torch.randn(input_shape_seg).to(device)
630
- prediction = inferer(
631
- inputs=input,
632
- autoencoder_model=stage_1,
633
- diffusion_model=stage_2,
634
- noise=noise,
635
- timesteps=timesteps,
636
- condition=conditioning,
637
- mode="concat",
638
- seg=input_seg,
639
- )
640
- else:
641
- prediction = inferer(
642
- inputs=input,
643
- autoencoder_model=stage_1,
644
- diffusion_model=stage_2,
645
- noise=noise,
646
- timesteps=timesteps,
647
- condition=conditioning,
648
- mode="concat",
649
- )
650
- self.assertEqual(prediction.shape, latent_shape)
644
+ prediction = inferer(
645
+ inputs=input,
646
+ autoencoder_model=stage_1,
647
+ diffusion_model=stage_2,
648
+ noise=noise,
649
+ timesteps=timesteps,
650
+ condition=conditioning,
651
+ mode="concat",
652
+ )
653
+ self.assertEqual(prediction.shape, latent_shape)
651
654
 
652
655
  @parameterized.expand(TEST_CASES)
653
656
  @skipUnless(has_einops, "Requires einops")
@@ -681,36 +684,36 @@ class TestDiffusionSamplingInferer(unittest.TestCase):
681
684
  conditioning_shape[1] = n_concat_channel
682
685
  conditioning = torch.randn(conditioning_shape).to(device)
683
686
 
684
- scheduler = DDPMScheduler(num_train_timesteps=10)
685
- inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
686
- scheduler.set_timesteps(num_inference_steps=10)
687
-
688
- if dm_model_type == "SPADEDiffusionModelUNet":
689
- input_shape_seg = list(input_shape)
690
- if "label_nc" in stage_2_params.keys():
691
- input_shape_seg[1] = stage_2_params["label_nc"]
687
+ for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]:
688
+ inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
689
+ scheduler.set_timesteps(num_inference_steps=10)
690
+
691
+ if dm_model_type == "SPADEDiffusionModelUNet":
692
+ input_shape_seg = list(input_shape)
693
+ if "label_nc" in stage_2_params.keys():
694
+ input_shape_seg[1] = stage_2_params["label_nc"]
695
+ else:
696
+ input_shape_seg[1] = autoencoder_params["label_nc"]
697
+ input_seg = torch.randn(input_shape_seg).to(device)
698
+ sample = inferer.sample(
699
+ input_noise=noise,
700
+ autoencoder_model=stage_1,
701
+ diffusion_model=stage_2,
702
+ scheduler=scheduler,
703
+ conditioning=conditioning,
704
+ mode="concat",
705
+ seg=input_seg,
706
+ )
692
707
  else:
693
- input_shape_seg[1] = autoencoder_params["label_nc"]
694
- input_seg = torch.randn(input_shape_seg).to(device)
695
- sample = inferer.sample(
696
- input_noise=noise,
697
- autoencoder_model=stage_1,
698
- diffusion_model=stage_2,
699
- scheduler=scheduler,
700
- conditioning=conditioning,
701
- mode="concat",
702
- seg=input_seg,
703
- )
704
- else:
705
- sample = inferer.sample(
706
- input_noise=noise,
707
- autoencoder_model=stage_1,
708
- diffusion_model=stage_2,
709
- scheduler=scheduler,
710
- conditioning=conditioning,
711
- mode="concat",
712
- )
713
- self.assertEqual(sample.shape, input_shape)
708
+ sample = inferer.sample(
709
+ input_noise=noise,
710
+ autoencoder_model=stage_1,
711
+ diffusion_model=stage_2,
712
+ scheduler=scheduler,
713
+ conditioning=conditioning,
714
+ mode="concat",
715
+ )
716
+ self.assertEqual(sample.shape, input_shape)
714
717
 
715
718
  @parameterized.expand(TEST_CASES_DIFF_SHAPES)
716
719
  @skipUnless(has_einops, "Requires einops")
@@ -738,39 +741,39 @@ class TestDiffusionSamplingInferer(unittest.TestCase):
738
741
 
739
742
  input = torch.randn(input_shape).to(device)
740
743
  noise = torch.randn(latent_shape).to(device)
741
- scheduler = DDPMScheduler(num_train_timesteps=10)
742
- # We infer the VAE shape
743
- autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]]
744
- inferer = LatentDiffusionInferer(
745
- scheduler=scheduler,
746
- scale_factor=1.0,
747
- ldm_latent_shape=list(latent_shape[2:]),
748
- autoencoder_latent_shape=autoencoder_latent_shape,
749
- )
750
- scheduler.set_timesteps(num_inference_steps=10)
751
-
752
- timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long()
753
-
754
- if dm_model_type == "SPADEDiffusionModelUNet":
755
- input_shape_seg = list(input_shape)
756
- if "label_nc" in stage_2_params.keys():
757
- input_shape_seg[1] = stage_2_params["label_nc"]
758
- else:
759
- input_shape_seg[1] = autoencoder_params["label_nc"]
760
- input_seg = torch.randn(input_shape_seg).to(device)
761
- prediction = inferer(
762
- inputs=input,
763
- autoencoder_model=stage_1,
764
- diffusion_model=stage_2,
765
- noise=noise,
766
- timesteps=timesteps,
767
- seg=input_seg,
768
- )
769
- else:
770
- prediction = inferer(
771
- inputs=input, autoencoder_model=stage_1, diffusion_model=stage_2, noise=noise, timesteps=timesteps
744
+ for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]:
745
+ # We infer the VAE shape
746
+ autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]]
747
+ inferer = LatentDiffusionInferer(
748
+ scheduler=scheduler,
749
+ scale_factor=1.0,
750
+ ldm_latent_shape=list(latent_shape[2:]),
751
+ autoencoder_latent_shape=autoencoder_latent_shape,
772
752
  )
773
- self.assertEqual(prediction.shape, latent_shape)
753
+ scheduler.set_timesteps(num_inference_steps=10)
754
+
755
+ timesteps = torch.randint(0, scheduler.num_train_timesteps, (input_shape[0],), device=input.device).long()
756
+
757
+ if dm_model_type == "SPADEDiffusionModelUNet":
758
+ input_shape_seg = list(input_shape)
759
+ if "label_nc" in stage_2_params.keys():
760
+ input_shape_seg[1] = stage_2_params["label_nc"]
761
+ else:
762
+ input_shape_seg[1] = autoencoder_params["label_nc"]
763
+ input_seg = torch.randn(input_shape_seg).to(device)
764
+ prediction = inferer(
765
+ inputs=input,
766
+ autoencoder_model=stage_1,
767
+ diffusion_model=stage_2,
768
+ noise=noise,
769
+ timesteps=timesteps,
770
+ seg=input_seg,
771
+ )
772
+ else:
773
+ prediction = inferer(
774
+ inputs=input, autoencoder_model=stage_1, diffusion_model=stage_2, noise=noise, timesteps=timesteps
775
+ )
776
+ self.assertEqual(prediction.shape, latent_shape)
774
777
 
775
778
  @parameterized.expand(TEST_CASES_DIFF_SHAPES)
776
779
  @skipUnless(has_einops, "Requires einops")
@@ -797,40 +800,42 @@ class TestDiffusionSamplingInferer(unittest.TestCase):
797
800
  stage_2.eval()
798
801
 
799
802
  noise = torch.randn(latent_shape).to(device)
800
- scheduler = DDPMScheduler(num_train_timesteps=10)
801
- # We infer the VAE shape
802
- if ae_model_type == "VQVAE":
803
- autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]))) for i in input_shape[2:]]
804
- else:
805
- autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]]
806
-
807
- inferer = LatentDiffusionInferer(
808
- scheduler=scheduler,
809
- scale_factor=1.0,
810
- ldm_latent_shape=list(latent_shape[2:]),
811
- autoencoder_latent_shape=autoencoder_latent_shape,
812
- )
813
- scheduler.set_timesteps(num_inference_steps=10)
814
-
815
- if dm_model_type == "SPADEDiffusionModelUNet" or ae_model_type == "SPADEAutoencoderKL":
816
- input_shape_seg = list(input_shape)
817
- if "label_nc" in stage_2_params.keys():
818
- input_shape_seg[1] = stage_2_params["label_nc"]
803
+ for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]:
804
+ # We infer the VAE shape
805
+ if ae_model_type == "VQVAE":
806
+ autoencoder_latent_shape = [i // (2 ** (len(autoencoder_params["channels"]))) for i in input_shape[2:]]
819
807
  else:
820
- input_shape_seg[1] = autoencoder_params["label_nc"]
821
- input_seg = torch.randn(input_shape_seg).to(device)
822
- prediction, _ = inferer.sample(
823
- autoencoder_model=stage_1,
824
- diffusion_model=stage_2,
825
- input_noise=noise,
826
- save_intermediates=True,
827
- seg=input_seg,
828
- )
829
- else:
830
- prediction = inferer.sample(
831
- autoencoder_model=stage_1, diffusion_model=stage_2, input_noise=noise, save_intermediates=False
808
+ autoencoder_latent_shape = [
809
+ i // (2 ** (len(autoencoder_params["channels"]) - 1)) for i in input_shape[2:]
810
+ ]
811
+
812
+ inferer = LatentDiffusionInferer(
813
+ scheduler=scheduler,
814
+ scale_factor=1.0,
815
+ ldm_latent_shape=list(latent_shape[2:]),
816
+ autoencoder_latent_shape=autoencoder_latent_shape,
832
817
  )
833
- self.assertEqual(prediction.shape, input_shape)
818
+ scheduler.set_timesteps(num_inference_steps=10)
819
+
820
+ if dm_model_type == "SPADEDiffusionModelUNet" or ae_model_type == "SPADEAutoencoderKL":
821
+ input_shape_seg = list(input_shape)
822
+ if "label_nc" in stage_2_params.keys():
823
+ input_shape_seg[1] = stage_2_params["label_nc"]
824
+ else:
825
+ input_shape_seg[1] = autoencoder_params["label_nc"]
826
+ input_seg = torch.randn(input_shape_seg).to(device)
827
+ prediction, _ = inferer.sample(
828
+ autoencoder_model=stage_1,
829
+ diffusion_model=stage_2,
830
+ input_noise=noise,
831
+ save_intermediates=True,
832
+ seg=input_seg,
833
+ )
834
+ else:
835
+ prediction = inferer.sample(
836
+ autoencoder_model=stage_1, diffusion_model=stage_2, input_noise=noise, save_intermediates=False
837
+ )
838
+ self.assertEqual(prediction.shape, input_shape)
834
839
 
835
840
  @skipUnless(has_einops, "Requires einops")
836
841
  def test_incompatible_spade_setup(self):
@@ -866,18 +871,19 @@ class TestDiffusionSamplingInferer(unittest.TestCase):
866
871
  stage_2.eval()
867
872
  noise = torch.randn((1, 3, 4, 4)).to(device)
868
873
  input_seg = torch.randn((1, 3, 8, 8)).to(device)
869
- scheduler = DDPMScheduler(num_train_timesteps=10)
870
- inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
871
- scheduler.set_timesteps(num_inference_steps=10)
872
874
 
873
- with self.assertRaises(ValueError):
874
- _ = inferer.sample(
875
- input_noise=noise,
876
- autoencoder_model=stage_1,
877
- diffusion_model=stage_2,
878
- scheduler=scheduler,
879
- seg=input_seg,
880
- )
875
+ for scheduler in [DDPMScheduler(num_train_timesteps=10), RFlowScheduler(num_train_timesteps=1000)]:
876
+ inferer = LatentDiffusionInferer(scheduler=scheduler, scale_factor=1.0)
877
+ scheduler.set_timesteps(num_inference_steps=10)
878
+
879
+ with self.assertRaises(ValueError):
880
+ _ = inferer.sample(
881
+ input_noise=noise,
882
+ autoencoder_model=stage_1,
883
+ diffusion_model=stage_2,
884
+ scheduler=scheduler,
885
+ seg=input_seg,
886
+ )
881
887
 
882
888
 
883
889
  if __name__ == "__main__":
@@ -76,8 +76,7 @@ class TestBundleRun(unittest.TestCase):
76
76
  )
77
77
  with open(meta_file, "w") as f:
78
78
  json.dump(
79
- {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "1.13.1", "numpy_version": "1.22.2"},
80
- f,
79
+ {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.3.0", "numpy_version": "1.22.2"}, f
81
80
  )
82
81
  cmd = ["coverage", "run", "-m", "monai.bundle"]
83
82
  # test both CLI entry "run" and "run_workflow"
@@ -114,8 +113,7 @@ class TestBundleRun(unittest.TestCase):
114
113
  )
115
114
  with open(meta_file, "w") as f:
116
115
  json.dump(
117
- {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "1.13.1", "numpy_version": "1.22.2"},
118
- f,
116
+ {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.3.0", "numpy_version": "1.22.2"}, f
119
117
  )
120
118
 
121
119
  os.mkdir(scripts_dir)
@@ -166,7 +166,7 @@ def run_inference_test(root_dir, test_x, test_y, device="cuda:0", num_workers=10
166
166
  model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(test_y))).to(device)
167
167
 
168
168
  model_filename = os.path.join(root_dir, "best_metric_model.pth")
169
- model.load_state_dict(torch.load(model_filename))
169
+ model.load_state_dict(torch.load(model_filename, weights_only=True))
170
170
  y_true = []
171
171
  y_pred = []
172
172
  with eval_mode(model):
@@ -186,7 +186,7 @@ class IntegrationFastTrain(DistTestCase):
186
186
  step += 1
187
187
  optimizer.zero_grad()
188
188
  # set AMP for training
189
- with torch.cuda.amp.autocast():
189
+ with torch.autocast("cuda"):
190
190
  outputs = model(batch_data["image"])
191
191
  loss = loss_function(outputs, batch_data["label"])
192
192
  scaler.scale(loss).backward()
@@ -207,7 +207,7 @@ class IntegrationFastTrain(DistTestCase):
207
207
  roi_size = (96, 96, 96)
208
208
  sw_batch_size = 4
209
209
  # set AMP for validation
210
- with torch.cuda.amp.autocast():
210
+ with torch.autocast("cuda"):
211
211
  val_outputs = sliding_window_inference(val_data["image"], roi_size, sw_batch_size, model)
212
212
 
213
213
  val_outputs = [post_pred(i) for i in decollate_batch(val_outputs)]
@@ -216,7 +216,7 @@ def run_inference_test(root_dir, device="cuda:0"):
216
216
  ).to(device)
217
217
 
218
218
  model_filename = os.path.join(root_dir, "best_metric_model.pth")
219
- model.load_state_dict(torch.load(model_filename))
219
+ model.load_state_dict(torch.load(model_filename, weights_only=True))
220
220
  with eval_mode(model):
221
221
  # resampling with align_corners=True or dtype=float64 will generate
222
222
  # slight different results between PyTorch 1.5 an 1.6
@@ -32,7 +32,7 @@ class TestMultiScaleSSIMMetric(unittest.TestCase):
32
32
  metric(preds, target)
33
33
  result = metric.aggregate()
34
34
  expected_value = 0.023176
35
- self.assertTrue(expected_value - result.item() < 0.000001)
35
+ self.assertAlmostEqual(expected_value, result.item(), 4)
36
36
 
37
37
  def test2d_uniform(self):
38
38
  set_determinism(0)
@@ -45,7 +45,7 @@ class TestMultiScaleSSIMMetric(unittest.TestCase):
45
45
  metric(preds, target)
46
46
  result = metric.aggregate()
47
47
  expected_value = 0.022655
48
- self.assertTrue(expected_value - result.item() < 0.000001)
48
+ self.assertAlmostEqual(expected_value, result.item(), 4)
49
49
 
50
50
  def test3d_gaussian(self):
51
51
  set_determinism(0)
@@ -58,7 +58,7 @@ class TestMultiScaleSSIMMetric(unittest.TestCase):
58
58
  metric(preds, target)
59
59
  result = metric.aggregate()
60
60
  expected_value = 0.061796
61
- self.assertTrue(expected_value - result.item() < 0.000001)
61
+ self.assertAlmostEqual(expected_value, result.item(), 4)
62
62
 
63
63
  def input_ill_input_shape2d(self):
64
64
  metric = MultiScaleSSIMMetric(spatial_dims=3, weights=[0.5, 0.5])
@@ -82,7 +82,7 @@ class TestAllSurfaceDiceMetrics(unittest.TestCase):
82
82
  expected_res0[1, 1] = np.nan
83
83
  for b, c in np.ndindex(batch_size, n_class):
84
84
  np.testing.assert_allclose(expected_res0[b, c], res0[b, c].cpu())
85
- np.testing.assert_array_equal(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0))
85
+ np.testing.assert_allclose(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0))
86
86
  np.testing.assert_equal(not_nans.cpu(), torch.tensor(2))
87
87
 
88
88
  def test_tolerance_euclidean_distance(self):
@@ -126,7 +126,7 @@ class TestAllSurfaceDiceMetrics(unittest.TestCase):
126
126
  expected_res0[1, 1] = np.nan
127
127
  for b, c in np.ndindex(batch_size, n_class):
128
128
  np.testing.assert_allclose(expected_res0[b, c], res0[b, c].cpu())
129
- np.testing.assert_array_equal(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0))
129
+ np.testing.assert_allclose(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0))
130
130
  np.testing.assert_equal(not_nans.cpu(), torch.tensor(2))
131
131
 
132
132
  def test_tolerance_euclidean_distance_3d(self):
@@ -173,7 +173,7 @@ class TestAllSurfaceDiceMetrics(unittest.TestCase):
173
173
  expected_res0[1, 1] = np.nan
174
174
  for b, c in np.ndindex(batch_size, n_class):
175
175
  np.testing.assert_allclose(expected_res0[b, c], res0[b, c].cpu())
176
- np.testing.assert_array_equal(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0))
176
+ np.testing.assert_allclose(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0))
177
177
  np.testing.assert_equal(not_nans.cpu(), torch.tensor(2))
178
178
 
179
179
  def test_tolerance_all_distances(self):
@@ -330,7 +330,7 @@ class TestAutoEncoderKL(unittest.TestCase):
330
330
  weight_path = os.path.join(tmpdir, filename)
331
331
  download_url(url=url, filepath=weight_path, hash_val=hash_val, hash_type=hash_type)
332
332
 
333
- net.load_old_state_dict(torch.load(weight_path), verbose=False)
333
+ net.load_old_state_dict(torch.load(weight_path, weights_only=True), verbose=False)
334
334
 
335
335
 
336
336
  if __name__ == "__main__":
@@ -208,7 +208,7 @@ class TestControlNet(unittest.TestCase):
208
208
  weight_path = os.path.join(tmpdir, filename)
209
209
  download_url(url=url, filepath=weight_path, hash_val=hash_val, hash_type=hash_type)
210
210
 
211
- net.load_old_state_dict(torch.load(weight_path), verbose=False)
211
+ net.load_old_state_dict(torch.load(weight_path, weights_only=True), verbose=False)
212
212
 
213
213
 
214
214
  if __name__ == "__main__":
@@ -578,7 +578,7 @@ class TestDiffusionModelUNet3D(unittest.TestCase):
578
578
  weight_path = os.path.join(tmpdir, filename)
579
579
  download_url(url=url, filepath=weight_path, hash_val=hash_val, hash_type=hash_type)
580
580
 
581
- net.load_old_state_dict(torch.load(weight_path), verbose=False)
581
+ net.load_old_state_dict(torch.load(weight_path, weights_only=True), verbose=False)
582
582
 
583
583
 
584
584
  if __name__ == "__main__":
@@ -55,7 +55,7 @@ class TestNetworkConsistency(unittest.TestCase):
55
55
  print("JSON path: " + json_path)
56
56
 
57
57
  # Load data
58
- loaded_data = torch.load(data_path)
58
+ loaded_data = torch.load(data_path, weights_only=True)
59
59
 
60
60
  # Load json from file
61
61
  json_file = open(json_path)