libinephany 0.14.0__tar.gz → 0.15.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. libinephany-0.15.0/CODE_VERSION.cfg +1 -0
  2. {libinephany-0.14.0/libinephany.egg-info → libinephany-0.15.0}/PKG-INFO +1 -1
  3. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/configs/hyperparameter_configs.py +25 -1
  4. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/schemas/inner_task_profile.py +37 -0
  5. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/states/hyperparameter_states.py +12 -0
  6. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/constants.py +5 -0
  7. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/enums.py +2 -0
  8. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/samplers.py +30 -16
  9. {libinephany-0.14.0 → libinephany-0.15.0/libinephany.egg-info}/PKG-INFO +1 -1
  10. libinephany-0.14.0/CODE_VERSION.cfg +0 -1
  11. {libinephany-0.14.0 → libinephany-0.15.0}/LICENSE +0 -0
  12. {libinephany-0.14.0 → libinephany-0.15.0}/MANIFEST.in +0 -0
  13. {libinephany-0.14.0 → libinephany-0.15.0}/README.md +0 -0
  14. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/__init__.py +0 -0
  15. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/aws/__init__.py +0 -0
  16. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/aws/s3_functions.py +0 -0
  17. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/__init__.py +0 -0
  18. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/observation_utils.py +0 -0
  19. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/observer_pipeline.py +0 -0
  20. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/observers/__init__.py +0 -0
  21. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/observers/base_observers.py +0 -0
  22. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/observers/global_observers.py +0 -0
  23. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/observers/local_observers.py +0 -0
  24. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/observers/observer_containers.py +0 -0
  25. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/pipeline_coordinator.py +0 -0
  26. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/post_processors/__init__.py +0 -0
  27. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/post_processors/postprocessors.py +0 -0
  28. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/statistic_manager.py +0 -0
  29. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/observations/statistic_trackers.py +0 -0
  30. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/__init__.py +0 -0
  31. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/configs/__init__.py +0 -0
  32. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/configs/observer_config.py +0 -0
  33. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/configs/outer_model_config.py +0 -0
  34. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/schemas/__init__.py +0 -0
  35. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/schemas/agent_info.py +0 -0
  36. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/schemas/observation_models.py +0 -0
  37. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/schemas/request_schemas.py +0 -0
  38. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/schemas/response_schemas.py +0 -0
  39. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/schemas/tensor_statistics.py +0 -0
  40. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/pydantic_models/states/__init__.py +0 -0
  41. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/__init__.py +0 -0
  42. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/agent_utils.py +0 -0
  43. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/asyncio_worker.py +0 -0
  44. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/backend_statuses.py +0 -0
  45. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/directory_utils.py +0 -0
  46. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/dropout_utils.py +0 -0
  47. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/error_severities.py +0 -0
  48. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/exceptions.py +0 -0
  49. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/import_utils.py +0 -0
  50. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/optim_utils.py +0 -0
  51. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/random_seeds.py +0 -0
  52. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/standardizers.py +0 -0
  53. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/torch_distributed_utils.py +0 -0
  54. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/torch_utils.py +0 -0
  55. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/transforms.py +0 -0
  56. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/utils/typing.py +0 -0
  57. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/web_apps/__init__.py +0 -0
  58. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/web_apps/error_logger.py +0 -0
  59. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany/web_apps/web_app_utils.py +0 -0
  60. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany.egg-info/SOURCES.txt +0 -0
  61. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany.egg-info/dependency_links.txt +0 -0
  62. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany.egg-info/requires.txt +0 -0
  63. {libinephany-0.14.0 → libinephany-0.15.0}/libinephany.egg-info/top_level.txt +0 -0
  64. {libinephany-0.14.0 → libinephany-0.15.0}/pyproject.toml +0 -0
  65. {libinephany-0.14.0 → libinephany-0.15.0}/setup.cfg +0 -0
@@ -0,0 +1 @@
1
+ 0.15.0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: libinephany
3
- Version: 0.14.0
3
+ Version: 0.15.0
4
4
  Summary: Inephany library containing code commonly used by multiple subpackages.
5
5
  Author-email: Inephany <info@inephany.com>
6
6
  License: Apache 2.0
@@ -4,7 +4,7 @@
4
4
  #
5
5
  # ======================================================================================================================
6
6
 
7
- from typing import Any
7
+ from typing import Any, cast
8
8
 
9
9
  from pydantic import BaseModel, ConfigDict, ValidationError, field_serializer, field_validator, model_validator
10
10
 
@@ -232,6 +232,23 @@ class BatchSizeHParamConfig(HParamConfig):
232
232
  sample_discrete_values: list[float | int] | None = None
233
233
 
234
234
 
235
+ class GradientAccumulationHParamConfig(HParamConfig):
236
+ max_hparam_value: float | int = 64
237
+ min_hparam_value: float | int = 1
238
+ hparam_dtype: type[float | int] = int
239
+ initial_value: int = 1
240
+ initial_delta: float = 0.0
241
+ scale: float = 1.0
242
+
243
+ sampler: str = "DiscreteRangeSampler"
244
+ sample_initial_values: bool = False
245
+ sample_lower_bound: int = 1
246
+ sample_upper_bound: int = 64
247
+ sample_step: int = 1
248
+ sample_discrete_values: list[float | int] | None = None
249
+ force_limit: float | int = 64
250
+
251
+
235
252
  class EpochsHParamConfig(HParamConfig):
236
253
  max_hparam_value: float | int = 16
237
254
  min_hparam_value: float | int = 1
@@ -289,6 +306,7 @@ class HParamConfigs(BaseModel):
289
306
  sgd_momentum_config: HParamConfig = SGDMomentumHParamConfig()
290
307
 
291
308
  batch_size_config: HParamConfig = BatchSizeHParamConfig()
309
+ gradient_accumulation_config: GradientAccumulationHParamConfig = GradientAccumulationHParamConfig()
292
310
  epochs_config: HParamConfig = EpochsHParamConfig()
293
311
  token_config: HParamConfig = TokensHParamConfig()
294
312
  samples_config: HParamConfig = SamplesHParamConfig()
@@ -351,6 +369,9 @@ class HParamConfigs(BaseModel):
351
369
  case AgentTypes.BatchSize:
352
370
  self.batch_size_config = hparam_config
353
371
 
372
+ case AgentTypes.GradientAccumulationAgent:
373
+ self.gradient_accumulation_config = cast(GradientAccumulationHParamConfig, hparam_config)
374
+
354
375
  case AgentTypes.Epochs:
355
376
  self.epochs_config = hparam_config
356
377
 
@@ -400,6 +421,9 @@ class HParamConfigs(BaseModel):
400
421
  case AgentTypes.BatchSize:
401
422
  return self.batch_size_config
402
423
 
424
+ case AgentTypes.GradientAccumulationAgent:
425
+ return self.gradient_accumulation_config
426
+
403
427
  case AgentTypes.Epochs:
404
428
  return self.epochs_config
405
429
 
@@ -24,6 +24,7 @@ class InnerTaskProfile(BaseModel):
24
24
  number_of_layers: int
25
25
  observation_space_sizes: dict[str, int]
26
26
  action_space_sizes: dict[str, int]
27
+ number_of_parameters: int
27
28
  vram_usage: float
28
29
  idle_vram_usage: float
29
30
 
@@ -139,6 +140,40 @@ class InnerTaskProfiles(BaseModel):
139
140
 
140
141
  return sum(self.compiled_action_sizes.values())
141
142
 
143
+ @property
144
+ def max_total_observation_size(self) -> int:
145
+ """
146
+ :return: The summed observation size of all agents with the task that has the most layers.
147
+ """
148
+
149
+ if not self.profiles:
150
+ raise ValueError(
151
+ "No profiles to calculate max total observation size. Ensure profiles have been "
152
+ "added before executing the training loop"
153
+ )
154
+
155
+ largest_task_name = max(self.profiles, key=lambda k: self.profiles[k].number_of_layers)
156
+ largest_task = self.profiles[largest_task_name]
157
+
158
+ return sum(largest_task.observation_space_sizes.values())
159
+
160
+ @property
161
+ def max_total_action_size(self) -> int:
162
+ """
163
+ :return: The summed action size of all agents with the task that has the most layers.
164
+ """
165
+
166
+ if not self.profiles:
167
+ raise ValueError(
168
+ "No profiles to calculate max total action size. Ensure profiles have been "
169
+ "added before executing the training loop"
170
+ )
171
+
172
+ largest_task_name = max(self.profiles, key=lambda k: self.profiles[k].number_of_layers)
173
+ largest_task = self.profiles[largest_task_name]
174
+
175
+ return sum(largest_task.action_space_sizes.values())
176
+
142
177
  @staticmethod
143
178
  def _compile_gym_space_sizes(spaces: dict[str, dict[str, int]]) -> dict[str, int]:
144
179
  """
@@ -206,6 +241,7 @@ class InnerTaskProfiles(BaseModel):
206
241
  number_of_layers: int,
207
242
  observation_space_sizes: dict[str, int],
208
243
  action_space_sizes: dict[str, int],
244
+ number_of_parameters: int,
209
245
  vram_usage: float,
210
246
  idle_vram_usage: float,
211
247
  ) -> None:
@@ -226,6 +262,7 @@ class InnerTaskProfiles(BaseModel):
226
262
  number_of_layers=number_of_layers,
227
263
  observation_space_sizes=observation_space_sizes,
228
264
  action_space_sizes=action_space_sizes,
265
+ number_of_parameters=number_of_parameters,
229
266
  vram_usage=vram_usage,
230
267
  idle_vram_usage=idle_vram_usage,
231
268
  )
@@ -20,6 +20,7 @@ from libinephany.utils.constants import (
20
20
  DROPOUT,
21
21
  EPOCHS,
22
22
  GRAD_NORM_CLIP,
23
+ GRADIENT_ACCUMULATION,
23
24
  LEARNING_RATE,
24
25
  SAMPLES,
25
26
  SGD_MOMENTUM,
@@ -60,6 +61,7 @@ class UpdateCallbacks(BaseModel):
60
61
  sgd_momentum: Callable[..., None]
61
62
 
62
63
  batch_size: Callable[..., None] | None
64
+ gradient_accumulation: Callable[..., None] | None
63
65
  epochs: Callable[..., None] | None
64
66
 
65
67
  def __getitem__(self, item: str) -> Callable[..., None] | None:
@@ -457,6 +459,7 @@ class ParameterGroupHParams(HyperparameterContainer):
457
459
  class GlobalHParams(HyperparameterContainer):
458
460
 
459
461
  batch_size: Hyperparameter
462
+ gradient_accumulation: Hyperparameter
460
463
  epochs: Hyperparameter
461
464
  tokens: Hyperparameter
462
465
  samples: Hyperparameter
@@ -550,6 +553,14 @@ class HyperparameterStates(BaseModel):
550
553
  """
551
554
  return self.global_hparams.batch_size
552
555
 
556
+ @computed_field # type: ignore[misc]
557
+ @property
558
+ def gradient_accumulation(self) -> Hyperparameter:
559
+ """
560
+ :return: The gradient accumulation steps of the inner model.
561
+ """
562
+ return self.global_hparams.gradient_accumulation
563
+
553
564
  @computed_field # type: ignore[misc]
554
565
  @property
555
566
  def epochs(self) -> Hyperparameter:
@@ -676,6 +687,7 @@ class HyperparameterStates(BaseModel):
676
687
 
677
688
  return {
678
689
  BATCH_SIZE: hparam_configs.batch_size_config,
690
+ GRADIENT_ACCUMULATION: hparam_configs.gradient_accumulation_config,
679
691
  EPOCHS: hparam_configs.epochs_config,
680
692
  TOKENS: hparam_configs.token_config,
681
693
  SAMPLES: hparam_configs.samples_config,
@@ -21,6 +21,7 @@ ADAM_BETA_TWO = "adam_beta_two"
21
21
  ADAM_EPS = "adam_eps"
22
22
  SGD_MOMENTUM = "sgd_momentum"
23
23
  BATCH_SIZE = "batch_size"
24
+ GRADIENT_ACCUMULATION = "gradient_accumulation"
24
25
  EPOCHS = "epochs"
25
26
  TOKENS = "tokens"
26
27
  SAMPLES = "samples"
@@ -41,6 +42,7 @@ AGENT_PREFIX_EPS = "adam-eps"
41
42
  AGENT_PREFIX_SGD_MOMENTUM = "sgd-momentum"
42
43
 
43
44
  AGENT_BATCH_SIZE = "batch-size"
45
+ AGENT_GRADIENT_ACCUMULATION = "gradient-accumulation"
44
46
 
45
47
  AGENT_BANDIT_SUFFIX = "bandit-agent"
46
48
 
@@ -53,6 +55,7 @@ AGENT_TYPES = [
53
55
  ADAM_BETA_TWO,
54
56
  ADAM_EPS,
55
57
  SGD_MOMENTUM,
58
+ GRADIENT_ACCUMULATION,
56
59
  ]
57
60
  SUFFIXES = [AGENT_BANDIT_SUFFIX]
58
61
  PREFIXES = [
@@ -64,6 +67,7 @@ PREFIXES = [
64
67
  AGENT_PREFIX_BETA_TWO,
65
68
  AGENT_PREFIX_EPS,
66
69
  AGENT_PREFIX_SGD_MOMENTUM,
70
+ AGENT_GRADIENT_ACCUMULATION,
67
71
  ]
68
72
  PREFIXES_TO_HPARAMS = {
69
73
  AGENT_PREFIX_LR: LEARNING_RATE,
@@ -74,4 +78,5 @@ PREFIXES_TO_HPARAMS = {
74
78
  AGENT_PREFIX_BETA_TWO: ADAM_BETA_TWO,
75
79
  AGENT_PREFIX_EPS: ADAM_EPS,
76
80
  AGENT_PREFIX_SGD_MOMENTUM: SGD_MOMENTUM,
81
+ AGENT_GRADIENT_ACCUMULATION: GRADIENT_ACCUMULATION,
77
82
  }
@@ -14,6 +14,7 @@ from libinephany.utils.constants import (
14
14
  DROPOUT,
15
15
  EPOCHS,
16
16
  GRAD_NORM_CLIP,
17
+ GRADIENT_ACCUMULATION,
17
18
  LEARNING_RATE,
18
19
  SAMPLES,
19
20
  SGD_MOMENTUM,
@@ -69,6 +70,7 @@ class AgentTypes(EnumWithIndices):
69
70
  AdamBetaTwoAgent = ADAM_BETA_TWO
70
71
  AdamEpsAgent = ADAM_EPS
71
72
  SGDMomentumAgent = SGD_MOMENTUM
73
+ GradientAccumulationAgent = GRADIENT_ACCUMULATION
72
74
 
73
75
  # Deprecated or Non-Agent
74
76
  BatchSize = BATCH_SIZE
@@ -6,6 +6,7 @@
6
6
 
7
7
  import math
8
8
  from abc import abstractmethod
9
+ from typing import Any
9
10
 
10
11
  import numpy as np
11
12
 
@@ -20,14 +21,14 @@ from libinephany.utils import random_seeds
20
21
 
21
22
  class Sampler:
22
23
 
23
- def __init__(self, lower_bound: float | int, upper_bound: float | int, **kwargs) -> None:
24
+ def __init__(self, lower_bound: float | int | None, upper_bound: float | int | None, **kwargs) -> None:
24
25
  """
25
26
  :param lower_bound: Lower bound of the distribution to sample from.
26
27
  :param upper_bound: Upper bound of the distribution to sample from.
27
28
  :param kwargs: Miscellaneous keyword arguments.
28
29
  """
29
30
 
30
- if upper_bound < lower_bound:
31
+ if upper_bound is not None and lower_bound is not None and upper_bound < lower_bound:
31
32
  upper_bound, lower_bound = lower_bound, upper_bound
32
33
 
33
34
  self.lower_bound = lower_bound
@@ -37,11 +38,11 @@ class Sampler:
37
38
  self,
38
39
  number_of_samples: int = 1,
39
40
  seed: int | None = None,
40
- ) -> np.ndarray | float:
41
+ ) -> list[Any] | np.ndarray | float:
41
42
  """
42
43
  :param number_of_samples: Number of samples to make.
43
44
  :param seed: Random seed to use for distribution sampling.
44
- :return: Array of sampled values or a single sampled value.
45
+ :return: List/Array of sampled values or a single sampled value.
45
46
  """
46
47
 
47
48
  if seed is not None:
@@ -49,10 +50,16 @@ class Sampler:
49
50
 
50
51
  sample = self.sample(number_of_samples=number_of_samples)
51
52
 
52
- return sample.item() if sample.size == 1 else sample
53
+ if isinstance(sample, np.ndarray) and sample.size == 1:
54
+ return sample.item()
55
+
56
+ elif isinstance(sample, list) and len(sample) == 1:
57
+ return sample[0]
58
+
59
+ return sample
53
60
 
54
61
  @abstractmethod
55
- def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray:
62
+ def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray | list[Any]:
56
63
  """
57
64
  :param number_of_samples: Number of samples to make.
58
65
  :param kwargs: Miscellaneous keyword arguments.
@@ -64,7 +71,7 @@ class Sampler:
64
71
 
65
72
  class LogUniformSampler(Sampler):
66
73
 
67
- def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray:
74
+ def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray | list[Any]:
68
75
  """
69
76
  :param number_of_samples: Number of samples to make.
70
77
  :param kwargs: Miscellaneous keyword arguments.
@@ -78,7 +85,7 @@ class LogUniformSampler(Sampler):
78
85
 
79
86
  class UniformSampler(Sampler):
80
87
 
81
- def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray:
88
+ def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray | list[Any]:
82
89
  """
83
90
  :param number_of_samples: Number of samples to make.
84
91
  :param kwargs: Miscellaneous keyword arguments.
@@ -113,7 +120,7 @@ class DiscreteRangeSampler(Sampler):
113
120
  self.step = step
114
121
  self.sample_dtype = sample_dtype
115
122
 
116
- def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray:
123
+ def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray | list[Any]:
117
124
  """
118
125
  :param number_of_samples: Number of samples to make.
119
126
  :param kwargs: Miscellaneous keyword arguments.
@@ -160,7 +167,7 @@ class RoundRobinSampler(Sampler):
160
167
 
161
168
  return magnitude_list
162
169
 
163
- def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray:
170
+ def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray | list[Any]:
164
171
  """
165
172
  :param number_of_samples: Number of samples to make.
166
173
  :param kwargs: Miscellaneous keyword arguments.
@@ -187,8 +194,8 @@ class DiscreteValueSampler(Sampler):
187
194
 
188
195
  def __init__(
189
196
  self,
190
- discrete_values: list[float | int],
191
- sample_dtype: type[np.generic | float | int] = np.float64,
197
+ discrete_values: list[float | int | str],
198
+ sample_dtype: type[np.generic | float | int | str] = np.float64,
192
199
  **kwargs,
193
200
  ) -> None:
194
201
  """
@@ -196,12 +203,19 @@ class DiscreteValueSampler(Sampler):
196
203
  :param kwargs: Miscellaneous keyword arguments.
197
204
  """
198
205
 
199
- super().__init__(lower_bound=min(discrete_values), upper_bound=max(discrete_values))
206
+ if sample_dtype == str and any(isinstance(value, str) for value in discrete_values):
207
+ lower_bound, upper_bound = None, None
208
+
209
+ else:
210
+ lower_bound = min(discrete_values)
211
+ upper_bound = max(discrete_values)
212
+
213
+ super().__init__(lower_bound=lower_bound, upper_bound=upper_bound) # type: ignore
200
214
 
201
215
  self.discrete_values = discrete_values
202
216
  self.sample_dtype = sample_dtype
203
217
 
204
- def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray:
218
+ def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray | list[Any]:
205
219
  """
206
220
  :param number_of_samples: Number of samples to make.
207
221
  :param kwargs: Miscellaneous keyword arguments.
@@ -234,7 +248,7 @@ class RoundRobinDiscreteValueSampler(Sampler):
234
248
 
235
249
  self.sampled_elements: set[float] = set()
236
250
 
237
- def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray:
251
+ def sample(self, number_of_samples: int = 1, **kwargs) -> np.ndarray | list[Any]:
238
252
  """
239
253
  :param number_of_samples: Number of samples to make.
240
254
  :param kwargs: Miscellaneous keyword arguments.
@@ -329,7 +343,7 @@ def sample_from_discrete_range(
329
343
  return sample
330
344
 
331
345
 
332
- def sample_from_discrete_values(discrete_values: list[float | int], number_of_samples: int = 1) -> np.ndarray:
346
+ def sample_from_discrete_values(discrete_values: list[float | int | str], number_of_samples: int = 1) -> np.ndarray:
333
347
  """
334
348
  :param discrete_values: List of discrete values to sample from.
335
349
  :param number_of_samples: Number of samples to make.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: libinephany
3
- Version: 0.14.0
3
+ Version: 0.15.0
4
4
  Summary: Inephany library containing code commonly used by multiple subpackages.
5
5
  Author-email: Inephany <info@inephany.com>
6
6
  License: Apache 2.0
@@ -1 +0,0 @@
1
- 0.14.0
File without changes
File without changes
File without changes
File without changes