libinephany 0.19.0__tar.gz → 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. libinephany-1.0.0/CODE_VERSION.cfg +1 -0
  2. {libinephany-0.19.0/libinephany.egg-info → libinephany-1.0.0}/PKG-INFO +1 -1
  3. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/base_observers.py +20 -8
  4. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/global_observers/gradient_observers.py +15 -16
  5. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/global_observers/hyperparameter_observers.py +26 -18
  6. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/global_observers/model_observers.py +18 -20
  7. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/global_observers/progress_observers.py +7 -1
  8. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/local_observers.py +35 -28
  9. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/statistic_trackers.py +44 -227
  10. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/schemas/tensor_statistics.py +33 -32
  11. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/states/hyperparameter_states.py +32 -30
  12. {libinephany-0.19.0 → libinephany-1.0.0/libinephany.egg-info}/PKG-INFO +1 -1
  13. libinephany-0.19.0/CODE_VERSION.cfg +0 -1
  14. {libinephany-0.19.0 → libinephany-1.0.0}/LICENSE +0 -0
  15. {libinephany-0.19.0 → libinephany-1.0.0}/MANIFEST.in +0 -0
  16. {libinephany-0.19.0 → libinephany-1.0.0}/README.md +0 -0
  17. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/__init__.py +0 -0
  18. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/aws/__init__.py +0 -0
  19. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/aws/s3_functions.py +0 -0
  20. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/__init__.py +0 -0
  21. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observation_utils.py +0 -0
  22. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observer_pipeline.py +0 -0
  23. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/__init__.py +0 -0
  24. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/global_observers/__init__.py +0 -0
  25. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/global_observers/base_classes.py +0 -0
  26. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/global_observers/constants.py +0 -0
  27. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/global_observers/loss_observers.py +0 -0
  28. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/observers/observer_containers.py +0 -0
  29. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/pipeline_coordinator.py +0 -0
  30. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/post_processors/__init__.py +0 -0
  31. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/post_processors/postprocessors.py +0 -0
  32. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/observations/statistic_manager.py +0 -0
  33. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/__init__.py +0 -0
  34. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/configs/__init__.py +0 -0
  35. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/configs/hyperparameter_configs.py +0 -0
  36. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/configs/observer_config.py +0 -0
  37. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/configs/outer_model_config.py +0 -0
  38. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/schemas/__init__.py +0 -0
  39. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/schemas/agent_info.py +0 -0
  40. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/schemas/inner_task_profile.py +0 -0
  41. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/schemas/observation_models.py +0 -0
  42. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/schemas/request_schemas.py +0 -0
  43. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/schemas/response_schemas.py +0 -0
  44. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/pydantic_models/states/__init__.py +0 -0
  45. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/__init__.py +0 -0
  46. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/agent_utils.py +0 -0
  47. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/asyncio_worker.py +0 -0
  48. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/backend_statuses.py +0 -0
  49. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/constants.py +0 -0
  50. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/directory_utils.py +0 -0
  51. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/dropout_utils.py +0 -0
  52. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/enums.py +0 -0
  53. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/error_severities.py +0 -0
  54. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/exceptions.py +0 -0
  55. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/import_utils.py +0 -0
  56. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/optim_utils.py +0 -0
  57. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/random_seeds.py +0 -0
  58. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/samplers.py +0 -0
  59. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/standardizers.py +0 -0
  60. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/torch_distributed_utils.py +0 -0
  61. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/torch_utils.py +0 -0
  62. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/transforms.py +0 -0
  63. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/utils/typing.py +0 -0
  64. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/web_apps/__init__.py +0 -0
  65. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/web_apps/error_logger.py +0 -0
  66. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany/web_apps/web_app_utils.py +0 -0
  67. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany.egg-info/SOURCES.txt +0 -0
  68. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany.egg-info/dependency_links.txt +0 -0
  69. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany.egg-info/requires.txt +0 -0
  70. {libinephany-0.19.0 → libinephany-1.0.0}/libinephany.egg-info/top_level.txt +0 -0
  71. {libinephany-0.19.0 → libinephany-1.0.0}/pyproject.toml +0 -0
  72. {libinephany-0.19.0 → libinephany-1.0.0}/setup.cfg +0 -0
@@ -0,0 +1 @@
1
+ 1.0.0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: libinephany
3
- Version: 0.19.0
3
+ Version: 1.0.0
4
4
  Summary: Inephany library containing code commonly used by multiple subpackages.
5
5
  Author-email: Inephany <info@inephany.com>
6
6
  License: Apache 2.0
@@ -43,15 +43,15 @@ class Observer(ABC):
43
43
  standardizer: Standardizer | None,
44
44
  observer_config: ObserverConfig,
45
45
  should_standardize: bool = True,
46
- skip_statistics: list[str] | None = None,
46
+ include_statistics: list[str] | None = None,
47
47
  **kwargs,
48
48
  ) -> None:
49
49
  """
50
50
  :param standardizer: None or the standardizer to apply to the returned observations.
51
51
  :param global_config: ObserverConfig that can be used to inform various observation calculations.
52
52
  :param should_standardize: Whether standardization should be applied to returned values.
53
- :param skip_statistics: If the observation uses the TensorStatistic model to return observations, names of the
54
- fields in the model to not include in returned observations.
53
+ :param include_statistics: If the observation uses the TensorStatistic model to return observations, names of the
54
+ fields in the model to include in returned observations.
55
55
  :param kwargs: Miscellaneous keyword arguments.
56
56
  """
57
57
 
@@ -63,7 +63,10 @@ class Observer(ABC):
63
63
  self.standardize = standardizer if standardizer is not None else observation_utils.null_standardizer
64
64
  self.should_standardize = should_standardize and self.can_standardize
65
65
 
66
- self.skip_statistics = TensorStatistics.filter_skip_statistics(skip_statistics=skip_statistics)
66
+ self.include_statistics: list[str] | None = None
67
+
68
+ if include_statistics is not None:
69
+ self.include_statistics = TensorStatistics.filter_include_statistics(include_statistics=include_statistics)
67
70
 
68
71
  @final
69
72
  @property
@@ -102,7 +105,10 @@ class Observer(ABC):
102
105
  observation_format = self.observation_format
103
106
 
104
107
  if observation_format is StatisticStorageTypes.TENSOR_STATISTICS:
105
- return len([field for field in TensorStatistics.model_fields.keys() if field not in self.skip_statistics])
108
+ if self.include_statistics is None:
109
+ raise ValueError(f"{self.__class__.__name__} must be provided with include_statistics.")
110
+
111
+ return len([field for field in TensorStatistics.model_fields.keys() if field in self.include_statistics])
106
112
 
107
113
  elif observation_format is StatisticStorageTypes.FLOAT:
108
114
  return 1
@@ -231,10 +237,13 @@ class Observer(ABC):
231
237
  self._cached_observation = deepcopy(observations)
232
238
 
233
239
  if self.observation_format is StatisticStorageTypes.TENSOR_STATISTICS:
240
+ if self.include_statistics is None:
241
+ raise ValueError(f"{self.__class__.__name__} must be provided with include_statistics.")
242
+
234
243
  if return_dict:
235
244
  observations_dict = observations.as_observation_dict() # type: ignore
236
245
 
237
- observations = observations.to_list(skip_statistics=self.skip_statistics) # type: ignore
246
+ observations = observations.to_list(include_statistics=self.include_statistics) # type: ignore
238
247
 
239
248
  observations = [observations] if not isinstance(observations, list) else observations # type: ignore
240
249
 
@@ -256,7 +265,7 @@ class Observer(ABC):
256
265
  def inform(self) -> float | int | dict[str, float] | None:
257
266
  """
258
267
  :return: The cached observation. If the observation format is TensorStatistics then it is converted to a
259
- dictionary with the statistics specified in skip_statistics excluded.
268
+ dictionary with the statistics specified in include_statistics included.
260
269
  """
261
270
 
262
271
  if not self.can_inform:
@@ -269,7 +278,10 @@ class Observer(ABC):
269
278
  )
270
279
 
271
280
  if self.observation_format is StatisticStorageTypes.TENSOR_STATISTICS:
272
- observation = self._cached_observation.model_dump(exclude=set(self.skip_statistics)) # type: ignore
281
+ if self.include_statistics is None:
282
+ raise ValueError(f"{self.__class__.__name__} must be provided with include_statistics.")
283
+
284
+ observation = self._cached_observation.model_dump(include=set(self.include_statistics)) # type: ignore
273
285
 
274
286
  else:
275
287
  observation = self._cached_observation
@@ -53,7 +53,7 @@ class GlobalFirstOrderGradients(GlobalObserver):
53
53
  needed.
54
54
  """
55
55
 
56
- return {statistic_trackers.FirstOrderGradients.__name__: dict(skip_statistics=self.skip_statistics)}
56
+ return {statistic_trackers.FirstOrderGradients.__name__: dict(include_statistics=self.include_statistics)}
57
57
 
58
58
 
59
59
  class GlobalSecondOrderGradients(GlobalObserver):
@@ -110,7 +110,7 @@ class GlobalSecondOrderGradients(GlobalObserver):
110
110
 
111
111
  return {
112
112
  statistic_trackers.SecondOrderGradients.__name__: dict(
113
- skip_statistics=self.skip_statistics, compute_hessian_diagonal=self.compute_hessian_diagonal
113
+ include_statistics=self.include_statistics, compute_hessian_diagonal=self.compute_hessian_diagonal
114
114
  )
115
115
  }
116
116
 
@@ -252,7 +252,7 @@ class LHOPTMomentumGradientRatio(LHOPTBaseObserver):
252
252
 
253
253
  return {
254
254
  statistic_trackers.MomentumGradientRatioStatistics.__name__: dict(
255
- skip_statistics=self.skip_statistics,
255
+ include_statistics=self.include_statistics,
256
256
  sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"],
257
257
  ),
258
258
  }
@@ -269,18 +269,17 @@ class CosineSimilarityObserverOfGradientAndMomentum(LHOPTBaseObserver):
269
269
  def __init__(
270
270
  self,
271
271
  *,
272
- skip_statistics: list[str] | None = None,
272
+ include_statistics: list[str] | None = None,
273
273
  **kwargs,
274
274
  ) -> None:
275
275
  """
276
- :param compute_hessian_diagonal: Whether to compute the Hessian diagonal to determine second order gradients
277
- or use the squared first order gradients as approximations in the same way Adam does.
276
+ :param include_statistics: List of statistics to include.
278
277
  :param kwargs: Miscellaneous keyword arguments.
279
278
  """
280
279
 
281
280
  super().__init__(**kwargs)
282
281
 
283
- self.skip_statistics = skip_statistics
282
+ self.include_statistics = include_statistics
284
283
 
285
284
  def _get_observation_format(self) -> StatisticStorageTypes:
286
285
  """
@@ -338,7 +337,7 @@ class CosineSimilarityObserverOfGradientAndMomentum(LHOPTBaseObserver):
338
337
 
339
338
  return {
340
339
  statistic_trackers.CosineSimilarityObserverOfGradientAndMomentumStatistics.__name__: dict(
341
- skip_statistics=self.skip_statistics,
340
+ include_statistics=self.include_statistics,
342
341
  sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"],
343
342
  )
344
343
  }
@@ -355,17 +354,17 @@ class CosineSimilarityObserverOfGradientAndUpdate(LHOPTBaseObserver):
355
354
  def __init__(
356
355
  self,
357
356
  *,
358
- skip_statistics: list[str] | None = None,
357
+ include_statistics: list[str] | None = None,
359
358
  **kwargs,
360
359
  ) -> None:
361
360
  """
362
- :param skip_statistics: List of statistics to skip.
361
+ :param include_statistics: List of statistics to include.
363
362
  :param kwargs: Miscellaneous keyword arguments.
364
363
  """
365
364
 
366
365
  super().__init__(**kwargs)
367
366
 
368
- self.skip_statistics = skip_statistics
367
+ self.include_statistics = include_statistics
369
368
 
370
369
  def _get_observation_format(self) -> StatisticStorageTypes:
371
370
  """
@@ -423,7 +422,7 @@ class CosineSimilarityObserverOfGradientAndUpdate(LHOPTBaseObserver):
423
422
 
424
423
  return {
425
424
  statistic_trackers.CosineSimilarityObserverOfGradientAndUpdateStatistics.__name__: dict(
426
- skip_statistics=self.skip_statistics,
425
+ include_statistics=self.include_statistics,
427
426
  sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"],
428
427
  )
429
428
  }
@@ -440,16 +439,16 @@ class CosineSimilarityOfGradientAndParameter(LHOPTBaseObserver):
440
439
  def __init__(
441
440
  self,
442
441
  *,
443
- skip_statistics: list[str] | None = None,
442
+ include_statistics: list[str] | None = None,
444
443
  **kwargs,
445
444
  ) -> None:
446
445
  """
447
- :param skip_statistics: List of statistics to skip.
446
+ :param include_statistics: List of statistics to include.
448
447
  :param kwargs: Miscellaneous keyword arguments.
449
448
  """
450
449
  super().__init__(**kwargs)
451
450
 
452
- self.skip_statistics = skip_statistics
451
+ self.include_statistics = include_statistics
453
452
 
454
453
  def _get_observation_format(self) -> StatisticStorageTypes:
455
454
  """
@@ -505,7 +504,7 @@ class CosineSimilarityOfGradientAndParameter(LHOPTBaseObserver):
505
504
 
506
505
  return {
507
506
  statistic_trackers.CosineSimilarityOfGradientAndParameterStatistics.__name__: dict(
508
- skip_statistics=self.skip_statistics,
507
+ include_statistics=self.include_statistics,
509
508
  sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"],
510
509
  )
511
510
  }
@@ -21,18 +21,16 @@ from libinephany.utils.enums import ModelFamilies
21
21
 
22
22
  class InitialHyperparameters(GlobalObserver):
23
23
 
24
- def __init__(self, skip_hparams: list[str] | None = None, pad_with: float = 0.0, **kwargs) -> None:
24
+ def __init__(self, include_hparams: list[str] | None = None, pad_with: float = 0.0, **kwargs) -> None:
25
25
  """
26
- :param skip_hparams: Names of the hyperparameters to not include in the initial values vector returned by
26
+ :param include_hparams: Names of the hyperparameters to include in the initial values vector returned by
27
27
  this observation.
28
28
  :param kwargs: Miscellaneous keyword arguments.
29
29
  """
30
30
 
31
31
  super().__init__(**kwargs)
32
32
 
33
- force_skip = ["samples", "gradient_accumulation"]
34
- skip_hparams = force_skip if skip_hparams is None else skip_hparams + force_skip
35
- self.skip_hparams = [] if skip_hparams is None else skip_hparams
33
+ self.include_hparams = include_hparams
36
34
  self.pad_with = pad_with
37
35
 
38
36
  @property
@@ -41,9 +39,12 @@ class InitialHyperparameters(GlobalObserver):
41
39
  :return: Length of the vector returned by this observation if it returns a vector.
42
40
  """
43
41
 
42
+ if self.include_hparams is None:
43
+ raise ValueError(f"{self.__class__.__name__} must be provided with include_hparams.")
44
+
44
45
  available_hparams = HyperparameterStates.get_all_hyperparameters()
45
46
 
46
- return len([hparam for hparam in available_hparams if hparam not in self.skip_hparams])
47
+ return len([hparam for hparam in available_hparams if hparam in self.include_hparams])
47
48
 
48
49
  @property
49
50
  def can_standardize(self) -> bool:
@@ -85,12 +86,14 @@ class InitialHyperparameters(GlobalObserver):
85
86
  :return: Single float/int, list of floats/ints or TensorStatistics model to add to the observation vector.
86
87
  """
87
88
 
88
- initial_internal_values = hyperparameter_states.get_initial_internal_values(self.skip_hparams)
89
+ assert self.include_hparams is not None
90
+
91
+ initial_internal_values = hyperparameter_states.get_initial_internal_values(self.include_hparams)
89
92
  self._cached_observation = initial_internal_values
90
93
  initial_internal_values_list = [
91
94
  self.pad_with if initial_internal_value is None else initial_internal_value
92
95
  for hparam_name, initial_internal_value in initial_internal_values.items()
93
- if hparam_name not in self.skip_hparams
96
+ if hparam_name in self.include_hparams
94
97
  ]
95
98
  return initial_internal_values_list
96
99
 
@@ -179,7 +182,8 @@ class ModelFamilyOneHot(GlobalObserver):
179
182
  **kwargs,
180
183
  ) -> None:
181
184
  """
182
- :param skip_observations: List of episode boundary observations to ignore.
185
+ :param zero_vector_chance: Chance of the output vector being masked with zeros.
186
+ :param zero_vector_frequency_unit: Unit of time to sample the zero vector.
183
187
  :param kwargs: Miscellaneous keyword arguments.
184
188
  """
185
189
  super().__init__(**kwargs)
@@ -294,17 +298,16 @@ class LHOPTHyperparameterRatio(GlobalObserver):
294
298
  providing insights into how much hyperparameters have changed from their starting values.
295
299
  """
296
300
 
297
- def __init__(self, skip_hparams: list[str] | None = None, pad_with: float = 0.0, **kwargs) -> None:
301
+ def __init__(self, include_hparams: list[str] | None = None, pad_with: float = 0.0, **kwargs) -> None:
298
302
  """
299
- :param skip_hparams: Names of the hyperparameters to not include in the initial values vector returned by
303
+ :param include_hparams: Names of the hyperparameters to include in the initial values vector returned by
300
304
  this observation.
301
305
  :param kwargs: Miscellaneous keyword arguments.
302
306
  """
303
307
 
304
308
  super().__init__(**kwargs)
305
309
 
306
- force_skip = ["samples", "gradient_accumulation"]
307
- self.skip_hparams = force_skip if skip_hparams is None else skip_hparams + force_skip
310
+ self.include_hparams = include_hparams
308
311
  self.pad_with = pad_with
309
312
 
310
313
  @property
@@ -313,9 +316,12 @@ class LHOPTHyperparameterRatio(GlobalObserver):
313
316
  :return: Length of the vector returned by this observation if it returns a vector.
314
317
  """
315
318
 
319
+ if self.include_hparams is None:
320
+ raise ValueError(f"{self.__class__.__name__} must be provided with include_hparams.")
321
+
316
322
  available_hparams = HyperparameterStates.get_all_hyperparameters()
317
323
 
318
- return len([hparam for hparam in available_hparams if hparam not in self.skip_hparams])
324
+ return len([hparam for hparam in available_hparams if hparam in self.include_hparams])
319
325
 
320
326
  @property
321
327
  def can_standardize(self) -> bool:
@@ -357,18 +363,20 @@ class LHOPTHyperparameterRatio(GlobalObserver):
357
363
  :return: Single float/int, list of floats/ints or TensorStatistics model to add to the observation vector.
358
364
  """
359
365
 
366
+ assert self.include_hparams is not None
367
+
360
368
  # Get initial and current hyperparameter values
361
- initial_values = hyperparameter_states.get_initial_internal_values(self.skip_hparams)
369
+ initial_values = hyperparameter_states.get_initial_internal_values(self.include_hparams)
362
370
  initial_values = {
363
371
  hparam_name: self.pad_with if initial_value is None else initial_value
364
372
  for hparam_name, initial_value in initial_values.items()
365
- if hparam_name not in self.skip_hparams
373
+ if hparam_name in self.include_hparams
366
374
  }
367
- current_values = hyperparameter_states.get_current_internal_values(self.skip_hparams)
375
+ current_values = hyperparameter_states.get_current_internal_values(self.include_hparams)
368
376
  current_values = {
369
377
  hparam_name: self.pad_with if current_value is None else current_value
370
378
  for hparam_name, current_value in current_values.items()
371
- if hparam_name not in self.skip_hparams
379
+ if hparam_name in self.include_hparams
372
380
  }
373
381
 
374
382
  ratios = []
@@ -59,7 +59,7 @@ class GlobalActivations(GlobalObserver):
59
59
  needed.
60
60
  """
61
61
 
62
- return {statistic_trackers.ActivationStatistics.__name__: dict(skip_statistics=self.skip_statistics)}
62
+ return {statistic_trackers.ActivationStatistics.__name__: dict(include_statistics=self.include_statistics)}
63
63
 
64
64
 
65
65
  class GlobalParameterUpdates(GlobalObserver):
@@ -98,7 +98,7 @@ class GlobalParameterUpdates(GlobalObserver):
98
98
  needed.
99
99
  """
100
100
 
101
- return {statistic_trackers.ParameterUpdateStatistics.__name__: dict(skip_statistics=self.skip_statistics)}
101
+ return {statistic_trackers.ParameterUpdateStatistics.__name__: dict(include_statistics=self.include_statistics)}
102
102
 
103
103
 
104
104
  class GlobalParameters(GlobalObserver):
@@ -137,7 +137,7 @@ class GlobalParameters(GlobalObserver):
137
137
  needed.
138
138
  """
139
139
 
140
- return {statistic_trackers.ParameterStatistics.__name__: dict(skip_statistics=self.skip_statistics)}
140
+ return {statistic_trackers.ParameterStatistics.__name__: dict(include_statistics=self.include_statistics)}
141
141
 
142
142
 
143
143
  class GlobalLAMBTrustRatio(GlobalObserver):
@@ -385,9 +385,7 @@ class LogRatioOfPreviousAndCurrentParamNormEnvStepObserver(LHOPTBaseObserver):
385
385
  """
386
386
 
387
387
  return {
388
- statistic_trackers.ParameterStatistics.__name__: dict(
389
- skip_statistics=self.skip_statistics,
390
- ),
388
+ statistic_trackers.ParameterStatistics.__name__: dict(include_statistics=self.include_statistics),
391
389
  }
392
390
 
393
391
  def reset(self) -> None:
@@ -443,9 +441,11 @@ class LogRatioOfUpdateAndPreviousParamNormEnvStepObserver(LHOPTBaseObserver):
443
441
  self._compute_cdf_feature(0.0) # default value since we can't compute log ratio yet
444
442
  self._update_time()
445
443
  return [0.0, 0.0]
444
+
446
445
  log_ratio = self._compute_log_ratio(update_norm, self._previous_param_norm)
447
446
  tanh_feature = math.tanh(max(-LHOPT_CONSTANTS["TANH_BOUND"], min(LHOPT_CONSTANTS["TANH_BOUND"], log_ratio)))
448
447
  cdf_feature = self._compute_cdf_feature(log_ratio)
448
+
449
449
  self._update_time()
450
450
  self._previous_param_norm = current_param_norm
451
451
 
@@ -458,12 +458,8 @@ class LogRatioOfUpdateAndPreviousParamNormEnvStepObserver(LHOPTBaseObserver):
458
458
  """
459
459
 
460
460
  return {
461
- statistic_trackers.ParameterUpdateStatistics.__name__: dict(
462
- skip_statistics=self.skip_statistics,
463
- ),
464
- statistic_trackers.ParameterStatistics.__name__: dict(
465
- skip_statistics=self.skip_statistics,
466
- ),
461
+ statistic_trackers.ParameterUpdateStatistics.__name__: dict(include_statistics=self.include_statistics),
462
+ statistic_trackers.ParameterStatistics.__name__: dict(include_statistics=self.include_statistics),
467
463
  }
468
464
 
469
465
  def reset(self) -> None:
@@ -533,7 +529,7 @@ class LHOPTAverageParameterUpdateMagnitudeObserver(LHOPTBaseObserver):
533
529
 
534
530
  return {
535
531
  statistic_trackers.AverageParameterUpdateMagnitudeStatistics.__name__: dict(
536
- skip_statistics=self.skip_statistics, sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"]
532
+ include_statistics=self.include_statistics, sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"]
537
533
  )
538
534
  }
539
535
 
@@ -570,8 +566,8 @@ class LogRatioOfUpdateAndPreviousParamNormInnerStepObserver(LHOPTBaseObserver):
570
566
  :return: List containing [raw_log_ratio, cdf_feature].
571
567
  """
572
568
 
573
- update_statistics = tracked_statistics[statistic_trackers.InnerStepParameterUpdateStatistics.__name__]
574
- param_statistics = tracked_statistics[statistic_trackers.InnerStepParameterStatistics.__name__]
569
+ update_statistics = tracked_statistics[statistic_trackers.LHOPTParameterUpdateStatistics.__name__]
570
+ param_statistics = tracked_statistics[statistic_trackers.LHOPTParameterStatistics.__name__]
575
571
  update_norm = observation_utils.average_tensor_statistics(
576
572
  tensor_statistics=[stats for stats in update_statistics.values() if isinstance(stats, TensorStatistics)]
577
573
  ).norm_
@@ -600,11 +596,11 @@ class LogRatioOfUpdateAndPreviousParamNormInnerStepObserver(LHOPTBaseObserver):
600
596
  """
601
597
 
602
598
  return {
603
- statistic_trackers.InnerStepParameterUpdateStatistics.__name__: dict(
604
- skip_statistics=self.skip_statistics, sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"]
599
+ statistic_trackers.LHOPTParameterUpdateStatistics.__name__: dict(
600
+ include_statistics=self.include_statistics, sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"]
605
601
  ),
606
- statistic_trackers.InnerStepParameterStatistics.__name__: dict(
607
- skip_statistics=self.skip_statistics, sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"]
602
+ statistic_trackers.LHOPTParameterStatistics.__name__: dict(
603
+ include_statistics=self.include_statistics, sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"]
608
604
  ),
609
605
  }
610
606
 
@@ -680,6 +676,8 @@ class LHOPTGlobalLAMBTrustRatio(LHOPTBaseObserver):
680
676
 
681
677
  return {
682
678
  statistic_trackers.LHOPTLAMBTrustRatioStatistics.__name__: dict(
683
- use_log_transform=self.use_log_transform, sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"]
679
+ include_statistics=self.include_statistics,
680
+ use_log_transform=self.use_log_transform,
681
+ sample_frequency=LHOPT_CONSTANTS["DEFAULT_SAMPLE_FREQUENCY"],
684
682
  )
685
683
  }
@@ -1,6 +1,6 @@
1
1
  # ======================================================================================================================
2
2
  #
3
- # imports
3
+ # IMPORTS
4
4
  #
5
5
  # ======================================================================================================================
6
6
 
@@ -13,6 +13,12 @@ from libinephany.pydantic_models.schemas.observation_models import ObservationIn
13
13
  from libinephany.pydantic_models.schemas.tensor_statistics import TensorStatistics
14
14
  from libinephany.pydantic_models.states.hyperparameter_states import HyperparameterStates
15
15
 
16
+ # ======================================================================================================================
17
+ #
18
+ # CLASSES
19
+ #
20
+ # ======================================================================================================================
21
+
16
22
 
17
23
  class TrainingProgress(GlobalObserver):
18
24