nnpdf 4.1.0__py3-none-any.whl → 4.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. n3fit/backends/keras_backend/MetaModel.py +27 -26
  2. n3fit/backends/keras_backend/callbacks.py +16 -8
  3. n3fit/backends/keras_backend/internal_state.py +13 -2
  4. n3fit/backends/keras_backend/operations.py +26 -26
  5. n3fit/hyper_optimization/hyper_scan.py +3 -9
  6. n3fit/hyper_optimization/penalties.py +11 -8
  7. n3fit/hyper_optimization/rewards.py +65 -34
  8. n3fit/model_gen.py +344 -270
  9. n3fit/model_trainer.py +71 -105
  10. n3fit/performfit.py +2 -7
  11. n3fit/tests/regressions/quickcard_1.json +12 -28
  12. n3fit/tests/regressions/quickcard_3.json +12 -28
  13. n3fit/tests/regressions/quickcard_pol_1.json +10 -26
  14. n3fit/tests/regressions/quickcard_pol_3.json +9 -25
  15. n3fit/tests/regressions/quickcard_qed_1.json +11 -27
  16. n3fit/tests/regressions/quickcard_qed_3.json +11 -27
  17. n3fit/tests/test_hyperopt.py +6 -12
  18. n3fit/tests/test_layers.py +6 -6
  19. n3fit/tests/test_modelgen.py +73 -24
  20. n3fit/tests/test_multireplica.py +52 -16
  21. n3fit/tests/test_penalties.py +7 -8
  22. n3fit/tests/test_preprocessing.py +2 -2
  23. n3fit/tests/test_vpinterface.py +5 -10
  24. n3fit/vpinterface.py +88 -44
  25. {nnpdf-4.1.0.dist-info → nnpdf-4.1.1.dist-info}/METADATA +9 -3
  26. {nnpdf-4.1.0.dist-info → nnpdf-4.1.1.dist-info}/RECORD +105 -67
  27. {nnpdf-4.1.0.dist-info → nnpdf-4.1.1.dist-info}/WHEEL +1 -1
  28. nnpdf_data/_version.py +1 -1
  29. nnpdf_data/commondata/ATLAS_2JET_7TEV_R06/metadata.yaml +16 -5
  30. nnpdf_data/commondata/ATLAS_TTBAR_13P6TEV_TOT/data.yaml +2 -0
  31. nnpdf_data/commondata/ATLAS_TTBAR_13P6TEV_TOT/kinematics.yaml +13 -0
  32. nnpdf_data/commondata/ATLAS_TTBAR_13P6TEV_TOT/metadata.yaml +51 -0
  33. nnpdf_data/commondata/ATLAS_TTBAR_13P6TEV_TOT/uncertainties.yaml +17 -0
  34. nnpdf_data/commondata/ATLAS_TTBAR_5TEV_TOT/data.yaml +2 -0
  35. nnpdf_data/commondata/ATLAS_TTBAR_5TEV_TOT/kinematics.yaml +13 -0
  36. nnpdf_data/commondata/ATLAS_TTBAR_5TEV_TOT/metadata.yaml +52 -0
  37. nnpdf_data/commondata/ATLAS_TTBAR_5TEV_TOT/uncertainties.yaml +22 -0
  38. nnpdf_data/commondata/ATLAS_WPWM_13P6TEV_TOT/data.yaml +3 -0
  39. nnpdf_data/commondata/ATLAS_WPWM_13P6TEV_TOT/kinematics.yaml +17 -0
  40. nnpdf_data/commondata/ATLAS_WPWM_13P6TEV_TOT/metadata.yaml +57 -0
  41. nnpdf_data/commondata/ATLAS_WPWM_13P6TEV_TOT/uncertainties.yaml +8 -0
  42. nnpdf_data/commondata/ATLAS_Z0_13P6TEV_TOT/data.yaml +2 -0
  43. nnpdf_data/commondata/ATLAS_Z0_13P6TEV_TOT/kinematics.yaml +9 -0
  44. nnpdf_data/commondata/ATLAS_Z0_13P6TEV_TOT/metadata.yaml +54 -0
  45. nnpdf_data/commondata/ATLAS_Z0_13P6TEV_TOT/uncertainties.yaml +7 -0
  46. nnpdf_data/commondata/CMS_1JET_8TEV/metadata.yaml +7 -1
  47. nnpdf_data/commondata/CMS_2JET_7TEV/metadata.yaml +16 -19
  48. nnpdf_data/commondata/CMS_TTBAR_13P6TEV_TOT/data.yaml +2 -0
  49. nnpdf_data/commondata/CMS_TTBAR_13P6TEV_TOT/kinematics.yaml +13 -0
  50. nnpdf_data/commondata/CMS_TTBAR_13P6TEV_TOT/metadata.yaml +51 -0
  51. nnpdf_data/commondata/CMS_TTBAR_13P6TEV_TOT/uncertainties.yaml +12 -0
  52. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/data_d2Sig_dmttBar_dyttBar.yaml +17 -0
  53. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/data_dSig_dmttBar.yaml +8 -0
  54. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/data_dSig_dpTt.yaml +8 -0
  55. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/data_dSig_dyt.yaml +11 -0
  56. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/filter.py +260 -0
  57. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/kinematics_d2Sig_dmttBar_dyttBar.yaml +193 -0
  58. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/kinematics_dSig_dmttBar.yaml +57 -0
  59. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/kinematics_dSig_dpTt.yaml +57 -0
  60. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/kinematics_dSig_dyt.yaml +81 -0
  61. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/metadata.yaml +114 -0
  62. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/rawdata/mtt_abs_parton.yaml +828 -0
  63. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/rawdata/mttytt-abs_parton.yaml +1899 -0
  64. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/rawdata/ptt_abs_parton.yaml +828 -0
  65. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/rawdata/submission.yaml +47 -0
  66. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/rawdata/yt_abs_parton.yaml +1179 -0
  67. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/uncertainties_d2Sig_dmttBar_dyttBar.yaml +2282 -0
  68. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/uncertainties_dSig_dmttBar.yaml +1256 -0
  69. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/uncertainties_dSig_dpTt.yaml +1256 -0
  70. nnpdf_data/commondata/CMS_TTBAR_13TEV_2L_138FB-1_DIF/uncertainties_dSig_dyt.yaml +1598 -0
  71. nnpdf_data/commondata/CMS_TTBAR_13TEV_35P9FB-1_TOT/data.yaml +2 -0
  72. nnpdf_data/commondata/CMS_TTBAR_13TEV_35P9FB-1_TOT/kinematics.yaml +13 -0
  73. nnpdf_data/commondata/CMS_TTBAR_13TEV_35P9FB-1_TOT/metadata.yaml +51 -0
  74. nnpdf_data/commondata/CMS_TTBAR_13TEV_35P9FB-1_TOT/uncertainties.yaml +17 -0
  75. nnpdf_data/commondata/CMS_TTBAR_5TEV_TOT/metadata.yaml +1 -1
  76. nnpdf_data/commondata/NNPDF_POS_2P24GEV/metadata.yaml +60 -0
  77. nnpdf_data/commondata/dataset_names.yml +6 -1
  78. nnpdf_data/theory_cards/41000010.yaml +42 -0
  79. nnpdf_data/theory_cards/41000011.yaml +43 -0
  80. nnpdf_data/theory_cards/41000012.yaml +43 -0
  81. nnpdf_data/theory_cards/41000013.yaml +42 -0
  82. nnpdf_data/theory_cards/41000014.yaml +43 -0
  83. nnpdf_data/theory_cards/41000015.yaml +43 -0
  84. validphys/_version.py +1 -1
  85. validphys/config.py +30 -10
  86. validphys/convolution.py +37 -14
  87. validphys/coredata.py +15 -5
  88. validphys/covmats.py +9 -2
  89. validphys/dataplots.py +1 -1
  90. validphys/filters.py +17 -3
  91. validphys/fkparser.py +11 -1
  92. validphys/gridvalues.py +1 -0
  93. validphys/hessian2mc.py +5 -5
  94. validphys/lhaindex.py +5 -0
  95. validphys/loader.py +1 -1
  96. validphys/n3fit_data.py +107 -61
  97. validphys/nnprofile_default.yaml +2 -1
  98. validphys/pineparser.py +12 -2
  99. validphys/scripts/postfit.py +4 -4
  100. validphys/scripts/vp_pdfrename.py +8 -9
  101. validphys/tests/conftest.py +6 -2
  102. validphys/tests/test_hessian2mc.py +7 -5
  103. validphys/utils.py +1 -0
  104. n3fit/tests/regressions/quickcard_pol/filter.yml +0 -80
  105. n3fit/tests/regressions/quickcard_pol/nnfit/input/lockfile.yaml +0 -111
  106. n3fit/tests/regressions/quickcard_pol/nnfit/replica_1/quickcard_pol.exportgrid +0 -572
  107. n3fit/tests/regressions/quickcard_pol/nnfit/replica_1/quickcard_pol.json +0 -71
  108. n3fit/tests/regressions/quickcard_pol/nnfit/replica_3/quickcard_pol.exportgrid +0 -615
  109. n3fit/tests/regressions/quickcard_pol/nnfit/replica_3/quickcard_pol.json +0 -71
  110. n3fit/tests/regressions/weights.weights.h5 +0 -0
  111. n3fit/tests/regressions/weights_pol.weights.h5 +0 -0
  112. n3fit/tests/test +0 -1
  113. nnpdf_data/theory_cards/40000099.yaml +0 -41
  114. nnpdf_data/theory_cards/40000099.yml +0 -41
  115. {nnpdf-4.1.0.dist-info → nnpdf-4.1.1.dist-info}/entry_points.txt +0 -0
  116. {nnpdf-4.1.0.dist-info → nnpdf-4.1.1.dist-info/licenses}/LICENSE +0 -0
n3fit/model_trainer.py CHANGED
@@ -20,11 +20,10 @@ from n3fit.backends import NN_LAYER_ALL_REPLICAS, MetaModel, callbacks, clear_ba
20
20
  from n3fit.backends import operations as op
21
21
  from n3fit.hyper_optimization.hyper_scan import HYPEROPT_STATUSES
22
22
  import n3fit.hyper_optimization.penalties
23
- import n3fit.hyper_optimization.rewards
24
23
  from n3fit.hyper_optimization.rewards import HyperLoss
25
24
  from n3fit.scaler import generate_scaler
26
25
  from n3fit.stopping import Stopping
27
- from n3fit.vpinterface import N3PDF, compute_phi
26
+ from n3fit.vpinterface import N3PDF, compute_hyperopt_metrics
28
27
  from validphys.core import DataGroupSpec
29
28
  from validphys.photon.compute import Photon
30
29
 
@@ -652,71 +651,6 @@ class ModelTrainer:
652
651
  if interpolation_points:
653
652
  self._scaler = generate_scaler(self.input_list, interpolation_points)
654
653
 
655
- def _generate_pdf(
656
- self,
657
- nodes_per_layer,
658
- activation_per_layer,
659
- initializer,
660
- layer_type,
661
- dropout,
662
- regularizer,
663
- regularizer_args,
664
- seed,
665
- photons,
666
- ):
667
- """
668
- Defines the internal variable layer_pdf
669
- this layer takes any input (x) and returns the pdf value for that x
670
-
671
- if the sumrule is being imposed, it also updates input_list with the
672
- integrator_input tensor used to calculate the sumrule
673
-
674
- Parameters:
675
- -----------
676
- nodes_per_layer: list
677
- list of nodes each layer has
678
- activation_per_layer: list
679
- list of the activation function for each layer
680
- initializer: str
681
- initializer for the weights of the NN
682
- layer_type: str
683
- type of layer to be used
684
- dropout: float
685
- dropout to add at the end of the NN
686
- regularizer: str
687
- choice of regularizer to add to the dense layers of the NN
688
- regularizer_args: dict
689
- dictionary of arguments for the regularizer
690
- seed: int
691
- seed for the NN
692
- photons: :py:class:`validphys.photon.compute.Photon`
693
- function to compute the photon PDF
694
- see model_gen.pdfNN_layer_generator for more information
695
-
696
- Returns
697
- -------
698
- pdf_model: MetaModel
699
- pdf model
700
- """
701
- log.info("Generating PDF models")
702
- pdf_model = model_gen.generate_pdf_model(
703
- nodes=nodes_per_layer,
704
- activations=activation_per_layer,
705
- layer_type=layer_type,
706
- flav_info=self.flavinfo,
707
- fitbasis=self.fitbasis,
708
- seed=seed,
709
- initializer_name=initializer,
710
- dropout=dropout,
711
- regularizer=regularizer,
712
- regularizer_args=regularizer_args,
713
- impose_sumrule=self.impose_sumrule,
714
- scaler=self._scaler,
715
- num_replicas=len(self.replicas),
716
- photons=photons,
717
- )
718
- return pdf_model
719
-
720
654
  def _prepare_reporting(self, partition):
721
655
  """Parses the information received by the :py:class:`n3fit.ModelTrainer.ModelTrainer`
722
656
  to select the bits necessary for reporting the chi2.
@@ -847,9 +781,23 @@ class ModelTrainer:
847
781
  exp_chi2 = self.experimental["model"].compute_losses()["loss"] / self.experimental["ndata"]
848
782
  return train_chi2, val_chi2, exp_chi2
849
783
 
850
- def _filter_datagroupspec(self, datasets_partition):
851
- """Takes a list of all input exp datasets as :class:`validphys.core.DataGroupSpec`
852
- and select `DataSetSpec`s whose names are in datasets_partition.
784
+ def _filter_datagroupspec(self, datasets_partition, filter_in=True):
785
+ """Takes a list of strings with dataset names to either filter in or out
786
+ and returns instances of :class:`validphys.core.DataGroupSpec` which contain
787
+ either only the "in" datasets or all datasets minus the "out".
788
+ To control whether the dataset_partition should be selected or deselected
789
+ the ``filter_in`` variable must be set to either True (select) or False (deselect)
790
+
791
+ The use case of this function is to return a modified experiment group object
792
+ following the same criteria that is used during the training, but with only
793
+ a subset of datasets being considered.
794
+
795
+ Parameters
796
+ ----------
797
+ datasets_partition: List[str]
798
+ List with names of the datasets you want to select or deselect.
799
+ filter_in: bool
800
+ Whether the datasets should be selected in (True, default) or out (False)
853
801
 
854
802
  Parameters
855
803
  ----------
@@ -874,7 +822,7 @@ class ModelTrainer:
874
822
  # Now, loop over them
875
823
  for dataset in datagroup.datasets:
876
824
  # Include `DataSetSpec`s whose names are in datasets_partition
877
- if dataset.name in datasets_partition:
825
+ if (dataset.name in datasets_partition) == filter_in:
878
826
  filtered_datasetspec.append(dataset)
879
827
 
880
828
  # List of filtered experiments as `DataGroupSpec`
@@ -939,8 +887,10 @@ class ModelTrainer:
939
887
  # And lists to save hyperopt utilities
940
888
  pdfs_per_fold = []
941
889
  exp_models = []
942
- # phi evaluated over training/validation exp data
943
- trvl_phi_per_fold = []
890
+ # Hyperopt metrics evaluated over training/validation exp data
891
+ trvl_chi2_per_fold = []
892
+ trvl_phi2_per_fold = []
893
+ trvl_logp_per_fold = []
944
894
 
945
895
  # Generate the grid in x, note this is the same for all partitions
946
896
  xinput = self._xgrid_generation()
@@ -952,29 +902,43 @@ class ModelTrainer:
952
902
  )
953
903
  else:
954
904
  photons = None
905
+
906
+ # Prepare the settings for all replica
907
+ replicas_settings = []
908
+ for seed in self._nn_seeds:
909
+ # WIP here the sampling will happen when necessary
910
+ tmp = model_gen.ReplicaSettings(
911
+ seed=seed,
912
+ nodes=params["nodes_per_layer"],
913
+ activations=params["activation_per_layer"],
914
+ initializer=params["initializer"],
915
+ architecture=params["layer_type"],
916
+ dropout_rate=params["dropout"],
917
+ regularizer=params.get("regularizer"),
918
+ regularizer_args=params.get("regularizer_args"),
919
+ )
920
+ replicas_settings.append(tmp)
921
+
955
922
  ### Training loop
956
923
  for k, partition in enumerate(self.kpartitions):
957
- # Each partition of the kfolding needs to have its own separate model
958
- # and the seed needs to be updated accordingly
959
- seeds = self._nn_seeds
924
+
960
925
  if k > 0:
961
- # generate random integers for each k-fold from the input `nnseeds`
962
- # we generate new seeds to avoid the integer overflow that may
963
- # occur when doing k*nnseeds
964
- rngs = [np.random.default_rng(seed=seed) for seed in seeds]
965
- seeds = [generator.integers(1, pow(2, 30)) * k for generator in rngs]
926
+ # When hyperoptimizing every patition takes the exact same model,
927
+ # only the seed needs to be updated,.
928
+ # Generate random integers for each k-fold from the input `nnseeds`
929
+ # this helps avoid the integer overflow that may occur when doing k*nnseeds
930
+ for seed, settings in zip(self._nn_seeds, replicas_settings):
931
+ rng = np.random.default_rng(seed=seed)
932
+ settings.seed = rng.integers(1, pow(2, 30)) * k
966
933
 
967
934
  # Generate the pdf model
968
- pdf_model = self._generate_pdf(
969
- params["nodes_per_layer"],
970
- params["activation_per_layer"],
971
- params["initializer"],
972
- params["layer_type"],
973
- params["dropout"],
974
- params.get("regularizer", None), # regularizer optional
975
- params.get("regularizer_args", None),
976
- seeds,
977
- photons,
935
+ pdf_model = model_gen.generate_pdf_model(
936
+ replicas_settings=replicas_settings,
937
+ flav_info=self.flavinfo,
938
+ fitbasis=self.fitbasis,
939
+ impose_sumrule=self.impose_sumrule,
940
+ scaler=self._scaler,
941
+ photons=photons,
978
942
  )
979
943
 
980
944
  if photons:
@@ -1052,7 +1016,8 @@ class ModelTrainer:
1052
1016
  # Extracting the necessary data to compute phi
1053
1017
  # First, create a list of `validphys.core.DataGroupSpec`
1054
1018
  # containing only exp datasets within the held out fold
1055
- experimental_data = self._filter_datagroupspec(partition["datasets"])
1019
+ folded_datasets = partition["datasets"]
1020
+ experimental_data = self._filter_datagroupspec(folded_datasets)
1056
1021
 
1057
1022
  vplike_pdf = N3PDF(pdf_model.split_replicas())
1058
1023
  if self.boundary_condition is not None:
@@ -1061,7 +1026,7 @@ class ModelTrainer:
1061
1026
  # Compute per replica hyper losses
1062
1027
  hyper_loss = self._hyper_loss.compute_loss(
1063
1028
  penalties=penalties,
1064
- kfold_loss=experimental_loss,
1029
+ experimental_loss=experimental_loss,
1065
1030
  validation_loss=validation_loss,
1066
1031
  pdf_object=vplike_pdf,
1067
1032
  experimental_data=experimental_data,
@@ -1070,20 +1035,17 @@ class ModelTrainer:
1070
1035
 
1071
1036
  # Create another list of `validphys.core.DataGroupSpec`
1072
1037
  # containing now exp datasets that are included in the training/validation dataset
1073
- trvl_partitions = list(self.kpartitions)
1074
- trvl_partitions.pop(k)
1075
- trvl_exp_names = [
1076
- exp_name for item in trvl_partitions for exp_name in item['datasets']
1077
- ]
1078
- trvl_data = self._filter_datagroupspec(trvl_exp_names)
1079
- # evaluate phi on training/validation exp set
1080
- trvl_phi = compute_phi(vplike_pdf, trvl_data)
1038
+ trvl_data = self._filter_datagroupspec(folded_datasets, filter_in=False)
1039
+ # Evaluate the hyperopt metrics on the training/validation experimental sets
1040
+ hyper_metrics = compute_hyperopt_metrics(vplike_pdf, trvl_data)
1081
1041
 
1082
1042
  # Now save all information from this fold
1083
1043
  l_hyper.append(hyper_loss)
1084
1044
  l_valid.append(validation_loss)
1085
1045
  l_exper.append(experimental_loss)
1086
- trvl_phi_per_fold.append(trvl_phi)
1046
+ trvl_chi2_per_fold.append(hyper_metrics.chi2)
1047
+ trvl_phi2_per_fold.append(hyper_metrics.phi2)
1048
+ trvl_logp_per_fold.append(hyper_metrics.logp)
1087
1049
  pdfs_per_fold.append(pdf_model)
1088
1050
  exp_models.append(models["experimental"])
1089
1051
 
@@ -1123,10 +1085,14 @@ class ModelTrainer:
1123
1085
  "experimental_loss": np.average(l_exper),
1124
1086
  "kfold_meta": {
1125
1087
  "validation_losses": l_valid,
1126
- "trvl_losses_phi": np.array(trvl_phi_per_fold),
1088
+ "trvl_losses_chi2": np.array(trvl_chi2_per_fold),
1089
+ "trvl_losses_phi2": np.array(trvl_phi2_per_fold),
1090
+ "trvl_losses_logp": np.array(trvl_logp_per_fold),
1127
1091
  "experimental_losses": l_exper,
1128
- "hyper_losses": np.array(self._hyper_loss.chi2_matrix),
1129
- "hyper_losses_phi": np.array(self._hyper_loss.phi2_vector),
1092
+ "hyper_losses": np.array(self._hyper_loss.exp_chi2_matrix),
1093
+ "hyper_losses_chi2": np.array(self._hyper_loss.hyper_chi2_vector),
1094
+ "hyper_losses_phi2": np.array(self._hyper_loss.hyper_phi2_vector),
1095
+ "hyper_losses_logp": np.array(self._hyper_loss.hyper_logp_vector),
1130
1096
  "penalties": {
1131
1097
  name: np.array(values)
1132
1098
  for name, values in self._hyper_loss.penalties.items()
n3fit/performfit.py CHANGED
@@ -215,16 +215,11 @@ def performfit(
215
215
  if hyperopt:
216
216
  from n3fit.hyper_optimization.hyper_scan import hyper_scan_wrapper
217
217
 
218
- # Note that hyperopt will not run in parallel or with more than one model _for now_
219
218
  replica_path_set = replica_path / f"replica_{replica_idxs[0]}"
220
- true_best = hyper_scan_wrapper(
219
+ hyper_scan_wrapper(
221
220
  replica_path_set, the_model_trainer, hyperscanner, max_evals=hyperopt
222
221
  )
223
- print("##################")
224
- print("Best model found: ")
225
- for k, i in true_best.items():
226
- print(f" {k} : {i} ")
227
-
222
+ log.info("The hyperparameter scan is successfully finished.")
228
223
  # In general after we do the hyperoptimization we do not care about the fit
229
224
  # so just let this die here
230
225
  break
@@ -56,40 +56,24 @@
56
56
  "chi2": 5.2825517654418945,
57
57
  "pos_state": "POS_VETO",
58
58
  "arc_lengths": [
59
- 1.3156869164725282,
59
+ 1.3156869164725287,
60
60
  1.2594413771361879,
61
- 3.6314384756133826,
62
- 1.0790116253109998,
61
+ 3.6314384756133817,
62
+ 1.0790116253110003,
63
63
  2.536624366983763
64
64
  ],
65
65
  "integrability": [
66
- 0.009921472228597905,
67
- 0.009921472228597877,
68
- 4.279186089674314e-07,
69
- 0.14216890558600426,
70
- 0.01550695078913129
66
+ 0.009921472228597898,
67
+ 0.12521867826580985,
68
+ 4.2791860974111806e-07,
69
+ 0.14216890558600428,
70
+ 0.015506950789131738
71
71
  ],
72
- "timing": {
73
- "walltime": {
74
- "Total": 18.872443675994873,
75
- "start": 0.0,
76
- "replica_set": 0.23057246208190918,
77
- "replica_fitted": 18.87218737602234,
78
- "replica_set_to_replica_fitted": 18.64161491394043
79
- },
80
- "cputime": {
81
- "Total": 22.741160551,
82
- "start": 0.0,
83
- "replica_set": 0.22935973400000087,
84
- "replica_fitted": 22.740922403,
85
- "replica_set_to_replica_fitted": 22.511562669
86
- }
87
- },
88
72
  "version": {
89
- "keras": "3.9.0 backend='tensorflow'",
73
+ "keras": "3.10.0 backend='tensorflow'",
90
74
  "tensorflow": "2.19.0",
91
- "numpy": "1.26.4",
92
- "nnpdf": "4.0.10.post204.dev0+8cb29d878",
93
- "validphys": "4.0.10.post204.dev0+8cb29d878"
75
+ "numpy": "2.1.3",
76
+ "nnpdf": "4.1.0.post14.dev0+2ee907633",
77
+ "validphys": "4.1.0.post14.dev0+2ee907633"
94
78
  }
95
79
  }
@@ -56,40 +56,24 @@
56
56
  "chi2": 136.70420837402344,
57
57
  "pos_state": "POS_VETO",
58
58
  "arc_lengths": [
59
- 1.9702585245218012,
59
+ 1.9702585245217996,
60
60
  1.3170459341375724,
61
- 1.270081035506918,
61
+ 1.2700810355069196,
62
62
  9.476632893725345,
63
- 1.3188127820152968
63
+ 1.3188127820152953
64
64
  ],
65
65
  "integrability": [
66
- 0.01508276944514414,
67
- 0.015082769445143501,
68
- 0.0028370417712720586,
69
- 0.05940935388207458,
70
- 0.00011781920693543224
66
+ 0.015082769445141472,
67
+ 0.0022062192147116466,
68
+ 0.002837041771279303,
69
+ 0.05940935388207447,
70
+ 0.00011781920693965109
71
71
  ],
72
- "timing": {
73
- "walltime": {
74
- "Total": 18.97901725769043,
75
- "start": 0.0,
76
- "replica_set": 0.23014259338378906,
77
- "replica_fitted": 18.978914976119995,
78
- "replica_set_to_replica_fitted": 18.748772382736206
79
- },
80
- "cputime": {
81
- "Total": 22.706904895,
82
- "start": 0.0,
83
- "replica_set": 0.22894110699999892,
84
- "replica_fitted": 22.706801655,
85
- "replica_set_to_replica_fitted": 22.477860548000002
86
- }
87
- },
88
72
  "version": {
89
- "keras": "3.9.0 backend='tensorflow'",
73
+ "keras": "3.10.0 backend='tensorflow'",
90
74
  "tensorflow": "2.19.0",
91
- "numpy": "1.26.4",
92
- "nnpdf": "4.0.10.post204.dev0+8cb29d878",
93
- "validphys": "4.0.10.post204.dev0+8cb29d878"
75
+ "numpy": "2.1.3",
76
+ "nnpdf": "4.1.0.post14.dev0+2ee907633",
77
+ "validphys": "4.1.0.post14.dev0+2ee907633"
94
78
  }
95
79
  }
@@ -32,40 +32,24 @@
32
32
  "chi2": 2.1472965372150274,
33
33
  "pos_state": "POS_PASS",
34
34
  "arc_lengths": [
35
- 1.021167697265657,
35
+ 1.0211676972656567,
36
36
  1.03088406913582,
37
37
  0.9899487587939698,
38
38
  0.9899487587939698,
39
39
  0.9899487587939698
40
40
  ],
41
41
  "integrability": [
42
- 1.8041124150158794e-16,
43
- 5.551115123125783e-17,
44
- 5.551115123125783e-17,
45
- 0.0018142980190617164,
46
- 2.498001805406602e-16
42
+ 2.0404275905319958e-16,
43
+ 0.014307497425732396,
44
+ 6.661338147750939e-16,
45
+ 0.0018142980190616331,
46
+ 1.6653345369377348e-16
47
47
  ],
48
- "timing": {
49
- "walltime": {
50
- "Total": 13.375839710235596,
51
- "start": 0.0,
52
- "replica_set": 0.2339179515838623,
53
- "replica_fitted": 13.375744104385376,
54
- "replica_set_to_replica_fitted": 13.141826152801514
55
- },
56
- "cputime": {
57
- "Total": 15.078210552000002,
58
- "start": 0.0,
59
- "replica_set": 0.2326924410000002,
60
- "replica_fitted": 15.078113471000002,
61
- "replica_set_to_replica_fitted": 14.84542103
62
- }
63
- },
64
48
  "version": {
65
- "keras": "3.9.0 backend='tensorflow'",
49
+ "keras": "3.10.0 backend='tensorflow'",
66
50
  "tensorflow": "2.19.0",
67
- "numpy": "1.26.4",
68
- "nnpdf": "4.0.10.post204.dev0+8cb29d878",
69
- "validphys": "4.0.10.post204.dev0+8cb29d878"
51
+ "numpy": "2.1.3",
52
+ "nnpdf": "4.1.0.post14.dev0+2ee907633",
53
+ "validphys": "4.1.0.post14.dev0+2ee907633"
70
54
  }
71
55
  }
@@ -39,33 +39,17 @@
39
39
  0.9899487587939698
40
40
  ],
41
41
  "integrability": [
42
- 6.453171330633722e-16,
43
- 1.3877787807814457e-16,
44
- 1.6653345369377348e-16,
45
- 0.020738880443015445,
46
- 3.3306690738754696e-16
42
+ 4.919598825159773e-17,
43
+ 0.001721592012243471,
44
+ 1.457167719820518e-16,
45
+ 0.02073888044301566,
46
+ 3.469446951953614e-17
47
47
  ],
48
- "timing": {
49
- "walltime": {
50
- "Total": 16.941243171691895,
51
- "start": 0.0,
52
- "replica_set": 0.23375344276428223,
53
- "replica_fitted": 16.94113063812256,
54
- "replica_set_to_replica_fitted": 16.707377195358276
55
- },
56
- "cputime": {
57
- "Total": 19.081625535,
58
- "start": 0.0,
59
- "replica_set": 0.23221380699999994,
60
- "replica_fitted": 19.081511472,
61
- "replica_set_to_replica_fitted": 18.849297665
62
- }
63
- },
64
48
  "version": {
65
- "keras": "3.9.0 backend='tensorflow'",
49
+ "keras": "3.10.0 backend='tensorflow'",
66
50
  "tensorflow": "2.19.0",
67
- "numpy": "1.26.4",
68
- "nnpdf": "4.0.10.post204.dev0+8cb29d878",
69
- "validphys": "4.0.10.post204.dev0+8cb29d878"
51
+ "numpy": "2.1.3",
52
+ "nnpdf": "4.1.0.post14.dev0+2ee907633",
53
+ "validphys": "4.1.0.post14.dev0+2ee907633"
70
54
  }
71
55
  }
@@ -56,40 +56,24 @@
56
56
  "chi2": 29.660354614257812,
57
57
  "pos_state": "POS_VETO",
58
58
  "arc_lengths": [
59
- 1.177279515316311,
59
+ 1.1772795153163105,
60
60
  1.9596727108470247,
61
61
  2.0659497326907528,
62
62
  1.0157677891950458,
63
- 1.3618257886720615
63
+ 1.3618257886720613
64
64
  ],
65
65
  "integrability": [
66
- 0.005202697648201693,
67
- 0.005202697648200694,
68
- 0.00035197582474189026,
69
- 0.10865128412842784,
70
- 0.0005445390415855789
66
+ 0.005202697648200688,
67
+ 0.06640676688402825,
68
+ 0.00035197582474033595,
69
+ 0.10865128412842817,
70
+ 0.0005445390415856899
71
71
  ],
72
- "timing": {
73
- "walltime": {
74
- "Total": 30.97916316986084,
75
- "start": 0.0,
76
- "replica_set": 0.23157668113708496,
77
- "replica_fitted": 30.97898554801941,
78
- "replica_set_to_replica_fitted": 30.747408866882324
79
- },
80
- "cputime": {
81
- "Total": 34.14771338,
82
- "start": 0.0,
83
- "replica_set": 0.23023607500000054,
84
- "replica_fitted": 34.147534493,
85
- "replica_set_to_replica_fitted": 33.917298418
86
- }
87
- },
88
72
  "version": {
89
- "keras": "3.9.0 backend='tensorflow'",
73
+ "keras": "3.10.0 backend='tensorflow'",
90
74
  "tensorflow": "2.19.0",
91
- "numpy": "1.26.4",
92
- "nnpdf": "4.0.10.post204.dev0+8cb29d878",
93
- "validphys": "4.0.10.post204.dev0+8cb29d878"
75
+ "numpy": "2.1.3",
76
+ "nnpdf": "4.1.0.post14.dev0+2ee907633",
77
+ "validphys": "4.1.0.post14.dev0+2ee907633"
94
78
  }
95
79
  }
@@ -56,40 +56,24 @@
56
56
  "chi2": 6.850959300994873,
57
57
  "pos_state": "POS_VETO",
58
58
  "arc_lengths": [
59
- 1.3254256368644568,
59
+ 1.3254256368644552,
60
60
  1.4540914108436402,
61
61
  2.885420751255539,
62
- 1.8141600138524392,
63
- 1.046721750173377
62
+ 1.8141600138524403,
63
+ 1.0467217501733774
64
64
  ],
65
65
  "integrability": [
66
- 0.033585004275664354,
67
- 0.03358500427566452,
68
- 0.00019570513268241108,
66
+ 0.03358500427566258,
67
+ 0.00010966376294052615,
68
+ 0.00019570513267841427,
69
69
  0.06445826729759663,
70
- 0.03394107008353098
70
+ 0.03394107008352876
71
71
  ],
72
- "timing": {
73
- "walltime": {
74
- "Total": 31.162365198135376,
75
- "start": 0.0,
76
- "replica_set": 0.22889018058776855,
77
- "replica_fitted": 31.162174224853516,
78
- "replica_set_to_replica_fitted": 30.933284044265747
79
- },
80
- "cputime": {
81
- "Total": 34.290361569999995,
82
- "start": 0.0,
83
- "replica_set": 0.2273777430000008,
84
- "replica_fitted": 34.290169555,
85
- "replica_set_to_replica_fitted": 34.062791812
86
- }
87
- },
88
72
  "version": {
89
- "keras": "3.9.0 backend='tensorflow'",
73
+ "keras": "3.10.0 backend='tensorflow'",
90
74
  "tensorflow": "2.19.0",
91
- "numpy": "1.26.4",
92
- "nnpdf": "4.0.10.post204.dev0+8cb29d878",
93
- "validphys": "4.0.10.post204.dev0+8cb29d878"
75
+ "numpy": "2.1.3",
76
+ "nnpdf": "4.1.0.post14.dev0+2ee907633",
77
+ "validphys": "4.1.0.post14.dev0+2ee907633"
94
78
  }
95
79
  }
@@ -14,26 +14,20 @@ from numpy.testing import assert_approx_equal
14
14
  import pytest
15
15
 
16
16
  from n3fit.hyper_optimization.rewards import HyperLoss
17
- from n3fit.model_gen import generate_pdf_model
17
+ from n3fit.model_gen import ReplicaSettings, generate_pdf_model
18
18
  from n3fit.vpinterface import N3PDF
19
19
  from validphys.loader import Loader
20
20
  from validphys.tests.conftest import THEORYID
21
21
 
22
22
 
23
- def generate_pdf(seed, num_replicas):
23
+ def generate_pdf(seeds):
24
24
  """Generate generic pdf model."""
25
25
  fake_fl = [
26
26
  {"fl": i, "largex": [0, 1], "smallx": [1, 2]}
27
27
  for i in ["u", "ubar", "d", "dbar", "c", "g", "s", "sbar"]
28
28
  ]
29
- pdf_model = generate_pdf_model(
30
- nodes=[8],
31
- activations=["linear"],
32
- seed=seed,
33
- num_replicas=num_replicas,
34
- flav_info=fake_fl,
35
- fitbasis="FLAVOUR",
36
- )
29
+ rp = [ReplicaSettings(nodes=[8], activations=["linear"], seed=seed) for seed in seeds]
30
+ pdf_model = generate_pdf_model(rp, flav_info=fake_fl, fitbasis="FLAVOUR")
37
31
  return pdf_model
38
32
 
39
33
 
@@ -63,7 +57,7 @@ def test_compute_per_fold_loss(loss_type, replica_statistic, expected_per_fold_l
63
57
  This example assumes a 2 replica calculation with 3 added penalties.
64
58
  """
65
59
  # generate 2 replica pdf model
66
- pdf_model = generate_pdf(seed=[1, 2], num_replicas=2)
60
+ pdf_model = generate_pdf(seeds=[1, 2])
67
61
  # add 3 penalties for a 2 replica model
68
62
  penalties = {
69
63
  'saturation': np.array([0.0, 0.0]),
@@ -81,7 +75,7 @@ def test_compute_per_fold_loss(loss_type, replica_statistic, expected_per_fold_l
81
75
  pdf_object = N3PDF(pdf_model.split_replicas())
82
76
  predicted_per_fold_loss = loss.compute_loss(
83
77
  penalties,
84
- kfold_loss=experimental_loss,
78
+ experimental_loss=experimental_loss,
85
79
  validation_loss=experimental_loss,
86
80
  pdf_object=pdf_object,
87
81
  experimental_data=experimental_data,