libinephany 0.14.1__py3-none-any.whl → 0.15.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- libinephany/observations/observers/global_observers.py +1 -1
- libinephany/observations/statistic_trackers.py +11 -14
- libinephany/pydantic_models/configs/hyperparameter_configs.py +25 -1
- libinephany/pydantic_models/schemas/inner_task_profile.py +34 -0
- libinephany/pydantic_models/states/hyperparameter_states.py +12 -0
- libinephany/utils/constants.py +5 -0
- libinephany/utils/enums.py +2 -0
- libinephany/utils/torch_distributed_utils.py +22 -1
- {libinephany-0.14.1.dist-info → libinephany-0.15.1.dist-info}/METADATA +1 -1
- {libinephany-0.14.1.dist-info → libinephany-0.15.1.dist-info}/RECORD +13 -13
- {libinephany-0.14.1.dist-info → libinephany-0.15.1.dist-info}/WHEEL +0 -0
- {libinephany-0.14.1.dist-info → libinephany-0.15.1.dist-info}/licenses/LICENSE +0 -0
- {libinephany-0.14.1.dist-info → libinephany-0.15.1.dist-info}/top_level.txt +0 -0
@@ -36,7 +36,7 @@ class InitialHyperparameters(GlobalObserver):
|
|
36
36
|
|
37
37
|
super().__init__(**kwargs)
|
38
38
|
|
39
|
-
force_skip = ["samples"]
|
39
|
+
force_skip = ["samples", "gradient_accumulation"]
|
40
40
|
skip_hparams = force_skip if skip_hparams is None else skip_hparams + force_skip
|
41
41
|
self.skip_hparams = [] if skip_hparams is None else skip_hparams
|
42
42
|
self.pad_with = pad_with
|
@@ -213,10 +213,10 @@ class Statistic(ABC):
|
|
213
213
|
|
214
214
|
if torch_distributed_utils.is_scheduler_master_rank():
|
215
215
|
if isinstance(statistic, torch.Tensor):
|
216
|
-
shape = statistic.shape
|
216
|
+
shape = statistic.view(-1).shape
|
217
217
|
|
218
218
|
elif isinstance(statistic, TensorStatistics):
|
219
|
-
shape = statistic.to_tensor().shape
|
219
|
+
shape = statistic.to_tensor().view(-1).shape
|
220
220
|
|
221
221
|
elif statistic is not None:
|
222
222
|
shape = torch.tensor([statistic]).shape
|
@@ -239,23 +239,21 @@ class Statistic(ABC):
|
|
239
239
|
if not torch_distributed_utils.is_distributed():
|
240
240
|
return statistic
|
241
241
|
|
242
|
-
|
243
|
-
shape = self._determine_reduction_shape(statistic=statistic)
|
244
|
-
|
245
|
-
if shape is None:
|
246
|
-
return statistic
|
242
|
+
shape = self._determine_reduction_shape(statistic=statistic)
|
247
243
|
|
248
|
-
|
244
|
+
if statistic is None:
|
245
|
+
to_reduce = torch.zeros(shape, dtype=torch.float64)
|
249
246
|
|
250
247
|
elif isinstance(statistic, torch.Tensor):
|
251
|
-
to_reduce = statistic.clone()
|
248
|
+
to_reduce = statistic.clone().to(torch.float64).view(-1)
|
252
249
|
|
253
250
|
elif isinstance(statistic, TensorStatistics):
|
254
|
-
to_reduce = statistic.to_tensor()
|
251
|
+
to_reduce = statistic.to_tensor().to(torch.float64).view(-1)
|
255
252
|
|
256
253
|
else:
|
257
|
-
to_reduce = torch.tensor([statistic])
|
254
|
+
to_reduce = torch.tensor([statistic], dtype=torch.float64)
|
258
255
|
|
256
|
+
to_reduce = to_reduce.to(torch_distributed_utils.get_local_device())
|
259
257
|
dist.reduce(to_reduce, dst=MASTER_SCHEDULER_RANK, op=ReduceOp.SUM)
|
260
258
|
|
261
259
|
if not torch_distributed_utils.is_scheduler_master_rank():
|
@@ -283,11 +281,13 @@ class Statistic(ABC):
|
|
283
281
|
|
284
282
|
parameter_group = self._find_parameter_group(optimizer=optimizer)
|
285
283
|
parameters = self._get_parameters(parameter_group=parameter_group)
|
284
|
+
self._sample_number += 1
|
286
285
|
|
287
286
|
if self._sample_number % self.sample_frequency == 0:
|
288
287
|
statistic = self._gather(
|
289
288
|
optimizer=optimizer, model=model, parameters=parameters, parameter_group=parameter_group
|
290
289
|
)
|
290
|
+
|
291
291
|
statistic = self._distributed_reduce(statistic=statistic)
|
292
292
|
|
293
293
|
if not torch_distributed_utils.is_scheduler_master_rank():
|
@@ -303,9 +303,6 @@ class Statistic(ABC):
|
|
303
303
|
elif statistic is not None:
|
304
304
|
self._data.append(statistic) # type: ignore
|
305
305
|
|
306
|
-
if torch_distributed_utils.is_scheduler_master_rank():
|
307
|
-
self._sample_number += 1
|
308
|
-
|
309
306
|
@final
|
310
307
|
def fetch(self) -> TensorStatistics | float | None:
|
311
308
|
"""
|
@@ -4,7 +4,7 @@
|
|
4
4
|
#
|
5
5
|
# ======================================================================================================================
|
6
6
|
|
7
|
-
from typing import Any
|
7
|
+
from typing import Any, cast
|
8
8
|
|
9
9
|
from pydantic import BaseModel, ConfigDict, ValidationError, field_serializer, field_validator, model_validator
|
10
10
|
|
@@ -232,6 +232,23 @@ class BatchSizeHParamConfig(HParamConfig):
|
|
232
232
|
sample_discrete_values: list[float | int] | None = None
|
233
233
|
|
234
234
|
|
235
|
+
class GradientAccumulationHParamConfig(HParamConfig):
|
236
|
+
max_hparam_value: float | int = 64
|
237
|
+
min_hparam_value: float | int = 1
|
238
|
+
hparam_dtype: type[float | int] = int
|
239
|
+
initial_value: int = 1
|
240
|
+
initial_delta: float = 0.0
|
241
|
+
scale: float = 1.0
|
242
|
+
|
243
|
+
sampler: str = "DiscreteRangeSampler"
|
244
|
+
sample_initial_values: bool = False
|
245
|
+
sample_lower_bound: int = 1
|
246
|
+
sample_upper_bound: int = 64
|
247
|
+
sample_step: int = 1
|
248
|
+
sample_discrete_values: list[float | int] | None = None
|
249
|
+
force_limit: float | int = 64
|
250
|
+
|
251
|
+
|
235
252
|
class EpochsHParamConfig(HParamConfig):
|
236
253
|
max_hparam_value: float | int = 16
|
237
254
|
min_hparam_value: float | int = 1
|
@@ -289,6 +306,7 @@ class HParamConfigs(BaseModel):
|
|
289
306
|
sgd_momentum_config: HParamConfig = SGDMomentumHParamConfig()
|
290
307
|
|
291
308
|
batch_size_config: HParamConfig = BatchSizeHParamConfig()
|
309
|
+
gradient_accumulation_config: GradientAccumulationHParamConfig = GradientAccumulationHParamConfig()
|
292
310
|
epochs_config: HParamConfig = EpochsHParamConfig()
|
293
311
|
token_config: HParamConfig = TokensHParamConfig()
|
294
312
|
samples_config: HParamConfig = SamplesHParamConfig()
|
@@ -351,6 +369,9 @@ class HParamConfigs(BaseModel):
|
|
351
369
|
case AgentTypes.BatchSize:
|
352
370
|
self.batch_size_config = hparam_config
|
353
371
|
|
372
|
+
case AgentTypes.GradientAccumulationAgent:
|
373
|
+
self.gradient_accumulation_config = cast(GradientAccumulationHParamConfig, hparam_config)
|
374
|
+
|
354
375
|
case AgentTypes.Epochs:
|
355
376
|
self.epochs_config = hparam_config
|
356
377
|
|
@@ -400,6 +421,9 @@ class HParamConfigs(BaseModel):
|
|
400
421
|
case AgentTypes.BatchSize:
|
401
422
|
return self.batch_size_config
|
402
423
|
|
424
|
+
case AgentTypes.GradientAccumulationAgent:
|
425
|
+
return self.gradient_accumulation_config
|
426
|
+
|
403
427
|
case AgentTypes.Epochs:
|
404
428
|
return self.epochs_config
|
405
429
|
|
@@ -140,6 +140,40 @@ class InnerTaskProfiles(BaseModel):
|
|
140
140
|
|
141
141
|
return sum(self.compiled_action_sizes.values())
|
142
142
|
|
143
|
+
@property
|
144
|
+
def max_total_observation_size(self) -> int:
|
145
|
+
"""
|
146
|
+
:return: The summed observation size of all agents with the task that has the most layers.
|
147
|
+
"""
|
148
|
+
|
149
|
+
if not self.profiles:
|
150
|
+
raise ValueError(
|
151
|
+
"No profiles to calculate max total observation size. Ensure profiles have been "
|
152
|
+
"added before executing the training loop"
|
153
|
+
)
|
154
|
+
|
155
|
+
largest_task_name = max(self.profiles, key=lambda k: self.profiles[k].number_of_layers)
|
156
|
+
largest_task = self.profiles[largest_task_name]
|
157
|
+
|
158
|
+
return sum(largest_task.observation_space_sizes.values())
|
159
|
+
|
160
|
+
@property
|
161
|
+
def max_total_action_size(self) -> int:
|
162
|
+
"""
|
163
|
+
:return: The summed action size of all agents with the task that has the most layers.
|
164
|
+
"""
|
165
|
+
|
166
|
+
if not self.profiles:
|
167
|
+
raise ValueError(
|
168
|
+
"No profiles to calculate max total action size. Ensure profiles have been "
|
169
|
+
"added before executing the training loop"
|
170
|
+
)
|
171
|
+
|
172
|
+
largest_task_name = max(self.profiles, key=lambda k: self.profiles[k].number_of_layers)
|
173
|
+
largest_task = self.profiles[largest_task_name]
|
174
|
+
|
175
|
+
return sum(largest_task.action_space_sizes.values())
|
176
|
+
|
143
177
|
@staticmethod
|
144
178
|
def _compile_gym_space_sizes(spaces: dict[str, dict[str, int]]) -> dict[str, int]:
|
145
179
|
"""
|
@@ -20,6 +20,7 @@ from libinephany.utils.constants import (
|
|
20
20
|
DROPOUT,
|
21
21
|
EPOCHS,
|
22
22
|
GRAD_NORM_CLIP,
|
23
|
+
GRADIENT_ACCUMULATION,
|
23
24
|
LEARNING_RATE,
|
24
25
|
SAMPLES,
|
25
26
|
SGD_MOMENTUM,
|
@@ -60,6 +61,7 @@ class UpdateCallbacks(BaseModel):
|
|
60
61
|
sgd_momentum: Callable[..., None]
|
61
62
|
|
62
63
|
batch_size: Callable[..., None] | None
|
64
|
+
gradient_accumulation: Callable[..., None] | None
|
63
65
|
epochs: Callable[..., None] | None
|
64
66
|
|
65
67
|
def __getitem__(self, item: str) -> Callable[..., None] | None:
|
@@ -457,6 +459,7 @@ class ParameterGroupHParams(HyperparameterContainer):
|
|
457
459
|
class GlobalHParams(HyperparameterContainer):
|
458
460
|
|
459
461
|
batch_size: Hyperparameter
|
462
|
+
gradient_accumulation: Hyperparameter
|
460
463
|
epochs: Hyperparameter
|
461
464
|
tokens: Hyperparameter
|
462
465
|
samples: Hyperparameter
|
@@ -550,6 +553,14 @@ class HyperparameterStates(BaseModel):
|
|
550
553
|
"""
|
551
554
|
return self.global_hparams.batch_size
|
552
555
|
|
556
|
+
@computed_field # type: ignore[misc]
|
557
|
+
@property
|
558
|
+
def gradient_accumulation(self) -> Hyperparameter:
|
559
|
+
"""
|
560
|
+
:return: The gradient accumulation steps of the inner model.
|
561
|
+
"""
|
562
|
+
return self.global_hparams.gradient_accumulation
|
563
|
+
|
553
564
|
@computed_field # type: ignore[misc]
|
554
565
|
@property
|
555
566
|
def epochs(self) -> Hyperparameter:
|
@@ -676,6 +687,7 @@ class HyperparameterStates(BaseModel):
|
|
676
687
|
|
677
688
|
return {
|
678
689
|
BATCH_SIZE: hparam_configs.batch_size_config,
|
690
|
+
GRADIENT_ACCUMULATION: hparam_configs.gradient_accumulation_config,
|
679
691
|
EPOCHS: hparam_configs.epochs_config,
|
680
692
|
TOKENS: hparam_configs.token_config,
|
681
693
|
SAMPLES: hparam_configs.samples_config,
|
libinephany/utils/constants.py
CHANGED
@@ -21,6 +21,7 @@ ADAM_BETA_TWO = "adam_beta_two"
|
|
21
21
|
ADAM_EPS = "adam_eps"
|
22
22
|
SGD_MOMENTUM = "sgd_momentum"
|
23
23
|
BATCH_SIZE = "batch_size"
|
24
|
+
GRADIENT_ACCUMULATION = "gradient_accumulation"
|
24
25
|
EPOCHS = "epochs"
|
25
26
|
TOKENS = "tokens"
|
26
27
|
SAMPLES = "samples"
|
@@ -41,6 +42,7 @@ AGENT_PREFIX_EPS = "adam-eps"
|
|
41
42
|
AGENT_PREFIX_SGD_MOMENTUM = "sgd-momentum"
|
42
43
|
|
43
44
|
AGENT_BATCH_SIZE = "batch-size"
|
45
|
+
AGENT_GRADIENT_ACCUMULATION = "gradient-accumulation"
|
44
46
|
|
45
47
|
AGENT_BANDIT_SUFFIX = "bandit-agent"
|
46
48
|
|
@@ -53,6 +55,7 @@ AGENT_TYPES = [
|
|
53
55
|
ADAM_BETA_TWO,
|
54
56
|
ADAM_EPS,
|
55
57
|
SGD_MOMENTUM,
|
58
|
+
GRADIENT_ACCUMULATION,
|
56
59
|
]
|
57
60
|
SUFFIXES = [AGENT_BANDIT_SUFFIX]
|
58
61
|
PREFIXES = [
|
@@ -64,6 +67,7 @@ PREFIXES = [
|
|
64
67
|
AGENT_PREFIX_BETA_TWO,
|
65
68
|
AGENT_PREFIX_EPS,
|
66
69
|
AGENT_PREFIX_SGD_MOMENTUM,
|
70
|
+
AGENT_GRADIENT_ACCUMULATION,
|
67
71
|
]
|
68
72
|
PREFIXES_TO_HPARAMS = {
|
69
73
|
AGENT_PREFIX_LR: LEARNING_RATE,
|
@@ -74,4 +78,5 @@ PREFIXES_TO_HPARAMS = {
|
|
74
78
|
AGENT_PREFIX_BETA_TWO: ADAM_BETA_TWO,
|
75
79
|
AGENT_PREFIX_EPS: ADAM_EPS,
|
76
80
|
AGENT_PREFIX_SGD_MOMENTUM: SGD_MOMENTUM,
|
81
|
+
AGENT_GRADIENT_ACCUMULATION: GRADIENT_ACCUMULATION,
|
77
82
|
}
|
libinephany/utils/enums.py
CHANGED
@@ -14,6 +14,7 @@ from libinephany.utils.constants import (
|
|
14
14
|
DROPOUT,
|
15
15
|
EPOCHS,
|
16
16
|
GRAD_NORM_CLIP,
|
17
|
+
GRADIENT_ACCUMULATION,
|
17
18
|
LEARNING_RATE,
|
18
19
|
SAMPLES,
|
19
20
|
SGD_MOMENTUM,
|
@@ -69,6 +70,7 @@ class AgentTypes(EnumWithIndices):
|
|
69
70
|
AdamBetaTwoAgent = ADAM_BETA_TWO
|
70
71
|
AdamEpsAgent = ADAM_EPS
|
71
72
|
SGDMomentumAgent = SGD_MOMENTUM
|
73
|
+
GradientAccumulationAgent = GRADIENT_ACCUMULATION
|
72
74
|
|
73
75
|
# Deprecated or Non-Agent
|
74
76
|
BatchSize = BATCH_SIZE
|
@@ -4,8 +4,10 @@
|
|
4
4
|
#
|
5
5
|
# ======================================================================================================================
|
6
6
|
|
7
|
+
import os
|
7
8
|
from typing import Any
|
8
9
|
|
10
|
+
import torch
|
9
11
|
import torch.distributed as dist
|
10
12
|
|
11
13
|
# ======================================================================================================================
|
@@ -14,7 +16,11 @@ import torch.distributed as dist
|
|
14
16
|
#
|
15
17
|
# ======================================================================================================================
|
16
18
|
|
19
|
+
CUDA = "cuda"
|
20
|
+
CPU = "cpu"
|
21
|
+
CUDA_PREFIX = f"{CUDA}:"
|
17
22
|
MASTER_SCHEDULER_RANK = 0
|
23
|
+
LOCAL_RANK = "LOCAL_RANK"
|
18
24
|
|
19
25
|
# ======================================================================================================================
|
20
26
|
#
|
@@ -48,7 +54,10 @@ def get_local_rank() -> int:
|
|
48
54
|
:return: Distributed computing rank of this process.
|
49
55
|
"""
|
50
56
|
|
51
|
-
|
57
|
+
if not is_distributed():
|
58
|
+
return MASTER_SCHEDULER_RANK
|
59
|
+
|
60
|
+
return dist.get_rank()
|
52
61
|
|
53
62
|
|
54
63
|
def is_scheduler_master_rank() -> bool:
|
@@ -83,3 +92,15 @@ def barrier() -> None:
|
|
83
92
|
|
84
93
|
if is_distributed():
|
85
94
|
dist.barrier()
|
95
|
+
|
96
|
+
|
97
|
+
def get_local_device() -> torch.device:
|
98
|
+
"""
|
99
|
+
:return: Local device of the current rank.
|
100
|
+
"""
|
101
|
+
|
102
|
+
if not is_distributed():
|
103
|
+
return torch.device(CUDA if torch.cuda.is_available() else CPU)
|
104
|
+
|
105
|
+
local_device_rank = os.environ.get(LOCAL_RANK, MASTER_SCHEDULER_RANK)
|
106
|
+
return torch.device(f"{CUDA_PREFIX}{local_device_rank}" if torch.cuda.is_available() else CPU)
|
@@ -6,36 +6,36 @@ libinephany/observations/observation_utils.py,sha256=wb6EZiaEiPuOqN26zzuT1rHyeho
|
|
6
6
|
libinephany/observations/observer_pipeline.py,sha256=ZhONGXJQSgs2VJJn9d2F7ItkYqntvchl9-JTyxW9eU0,12146
|
7
7
|
libinephany/observations/pipeline_coordinator.py,sha256=FrN3linKaC0pVE5uKjlh_0Fi8Mb1oK91NzH3Fq7PvyM,7420
|
8
8
|
libinephany/observations/statistic_manager.py,sha256=LLg1zSxnJr2oQQepYla3qoUuRy10rsthr9jta4wEbnc,8956
|
9
|
-
libinephany/observations/statistic_trackers.py,sha256=
|
9
|
+
libinephany/observations/statistic_trackers.py,sha256=J444i9EZ30vcYOEYqcDBzz7_UDpEE2hW_ISYBu_hwYc,30180
|
10
10
|
libinephany/observations/observers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
11
|
libinephany/observations/observers/base_observers.py,sha256=RkG5SW0b6Ooy0_oscRHxyB_YFNP7k8fxu37jBZElxIM,15418
|
12
|
-
libinephany/observations/observers/global_observers.py,sha256
|
12
|
+
libinephany/observations/observers/global_observers.py,sha256=3TaiV2AxMOXfDq-kXMU3ZSo-rQENNCFhdWCJtpY99ok,38684
|
13
13
|
libinephany/observations/observers/local_observers.py,sha256=EdivrylOcmxRsu4xiMwZqwmPX8Ru9-IRwoPk6En7qvw,37050
|
14
14
|
libinephany/observations/observers/observer_containers.py,sha256=g73ScbRRVTNbGEBb-Nyk8AQwoDhKZaqTd6OYP8FIcOs,8771
|
15
15
|
libinephany/observations/post_processors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
16
|
libinephany/observations/post_processors/postprocessors.py,sha256=43_e5UaDPr2KbAvqc_w3wLqnlm7bgRjqgCtyQ95-8cM,5913
|
17
17
|
libinephany/pydantic_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
18
|
libinephany/pydantic_models/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
|
-
libinephany/pydantic_models/configs/hyperparameter_configs.py,sha256=
|
19
|
+
libinephany/pydantic_models/configs/hyperparameter_configs.py,sha256=FYl8A2_9L-ohg36aZEW5kREO3tcqIyztYpW62s99tqY,14562
|
20
20
|
libinephany/pydantic_models/configs/observer_config.py,sha256=v_ChzaVXC_rlZ7eDZPuCae1DdG7-PS3mPwC-OaWpGQo,1355
|
21
21
|
libinephany/pydantic_models/configs/outer_model_config.py,sha256=GQ0QBSC2Xht8x8X_TEMfYM2GF_x1kErLuFrA_H6Jhs0,1209
|
22
22
|
libinephany/pydantic_models/schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
23
|
libinephany/pydantic_models/schemas/agent_info.py,sha256=me5gDxvZjP9TNK588mpUvxiiJrPDqy3Z7ZHRzryAYTs,2628
|
24
|
-
libinephany/pydantic_models/schemas/inner_task_profile.py,sha256=
|
24
|
+
libinephany/pydantic_models/schemas/inner_task_profile.py,sha256=Xu0tQmhGwV043tTamFiHekuE1RRXhhrUrGbtymjXo7g,11722
|
25
25
|
libinephany/pydantic_models/schemas/observation_models.py,sha256=YjQmrWZ0r-_LRp92jvhSD8p1grKsMVXCXoou4q15Ue8,1849
|
26
26
|
libinephany/pydantic_models/schemas/request_schemas.py,sha256=VED8eAUvBofxeAx9gWU8DyCZOTVD3QsHRq-TO7kyOqk,1260
|
27
27
|
libinephany/pydantic_models/schemas/response_schemas.py,sha256=SKFuasdjX5aH_I0vT3SwnpwhyMf9cNPB1ZpDeAGgoO8,2158
|
28
28
|
libinephany/pydantic_models/schemas/tensor_statistics.py,sha256=Z-x-Fi_Dm0pLoHI88DnJO1krY671o0zbGRzx-gXPtVY,7534
|
29
29
|
libinephany/pydantic_models/states/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
30
|
-
libinephany/pydantic_models/states/hyperparameter_states.py,sha256=
|
30
|
+
libinephany/pydantic_models/states/hyperparameter_states.py,sha256=fwqUmRbT5WxcnMPK8DmRXkBQOtCs9n6V24BeCyFTFL8,32688
|
31
31
|
libinephany/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
32
32
|
libinephany/utils/agent_utils.py,sha256=_2w1AY5Y4mQ5hes_Rq014VhZXOtIOn-W92mZgeixv3g,2658
|
33
33
|
libinephany/utils/asyncio_worker.py,sha256=Ew23zKIbG1zwyCudcyiObMrw4G0f3p2QXzZfM4mePqI,2751
|
34
34
|
libinephany/utils/backend_statuses.py,sha256=ZbpBPbz0qKmeqxyGGN_ePTrQ7Wrxh7KM6W26UDbPXtQ,644
|
35
|
-
libinephany/utils/constants.py,sha256=
|
35
|
+
libinephany/utils/constants.py,sha256=piawYQa51vCxxAHCH3YoWOgUhTlgqgQxKMCenkoQTsc,2170
|
36
36
|
libinephany/utils/directory_utils.py,sha256=408unVeE_5_Hm-ZYZuxc9sdvfuU0CgYELX7EzPlPieo,1217
|
37
37
|
libinephany/utils/dropout_utils.py,sha256=X43yCW7Dh1cC5sNnivgS5j1fn871K_RCvxCBTT0YHKg,3392
|
38
|
-
libinephany/utils/enums.py,sha256=
|
38
|
+
libinephany/utils/enums.py,sha256=kEECkJO2quKAyVAqzgOzOP-d4qIENE3z_RyymSvyIB8,2420
|
39
39
|
libinephany/utils/error_severities.py,sha256=B9oidqOVaYOe0W6P6GwjpmuDsrkyTX30v1xdiUStCFk,1427
|
40
40
|
libinephany/utils/exceptions.py,sha256=kgwLpHOgy3kciUz_I18xnYsWRtzdonfadUtwG2uDYk8,1823
|
41
41
|
libinephany/utils/import_utils.py,sha256=WzC6V6UIa0nCiU2MekROwG82fWBh9RuVzichtby5EvM,1495
|
@@ -43,15 +43,15 @@ libinephany/utils/optim_utils.py,sha256=-PLqsyuq4ZH3spBy_olNB3yuLwvhnLrCF0384elC
|
|
43
43
|
libinephany/utils/random_seeds.py,sha256=eF-ErrMShu8mp9V_gXrB_iUxR-Lb-OtHypEEUQAGn2Y,1565
|
44
44
|
libinephany/utils/samplers.py,sha256=uyVGAy5cm5bCyWMOuySJmzUc_vFuieO_3zydJciwdv4,12158
|
45
45
|
libinephany/utils/standardizers.py,sha256=pG1K_XL4OR_NjVtT6Hjbln1dk1BtQdDuSK1PQTkA17Y,8014
|
46
|
-
libinephany/utils/torch_distributed_utils.py,sha256=
|
46
|
+
libinephany/utils/torch_distributed_utils.py,sha256=UPMfhdZZwyHX_r3h55AAK4PcB-zFtjK37Z5aawAKNmE,2968
|
47
47
|
libinephany/utils/torch_utils.py,sha256=o5TsqrXe6Id04P6SqB_avGBRZutbu6IBB61llAHQ_PY,2696
|
48
48
|
libinephany/utils/transforms.py,sha256=Ca4pbCs_FbCpXb8M8oPxrP5QOqOAwGSdGpKzy5YUubc,3503
|
49
49
|
libinephany/utils/typing.py,sha256=rGbaPO3MaUndsWiC_wHzReD_TOLYqb43i01pKN-j7Xs,624
|
50
50
|
libinephany/web_apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
51
51
|
libinephany/web_apps/error_logger.py,sha256=gAQIaqerqP4ornXZwFF1cghjnd2mMZEt3aVrTuUCr34,16653
|
52
52
|
libinephany/web_apps/web_app_utils.py,sha256=qiq_lasPipgN1RgRudPJc342kYci8O_4RqppxmIX8NY,4095
|
53
|
-
libinephany-0.
|
54
|
-
libinephany-0.
|
55
|
-
libinephany-0.
|
56
|
-
libinephany-0.
|
57
|
-
libinephany-0.
|
53
|
+
libinephany-0.15.1.dist-info/licenses/LICENSE,sha256=pogfDoMBP07ehIOvWymuWIar8pg2YLUhqOHsJQU3wdc,9250
|
54
|
+
libinephany-0.15.1.dist-info/METADATA,sha256=lsqYtqyJ_k_clascJkzx8rR7gEN75tZ8lCKNqcH1cps,8354
|
55
|
+
libinephany-0.15.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
56
|
+
libinephany-0.15.1.dist-info/top_level.txt,sha256=bYAOXQdJgIoLkO2Ui0kxe7pSYegS_e38u0dMscd7COQ,12
|
57
|
+
libinephany-0.15.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|