shepherd-core 2025.4.1__py3-none-any.whl → 2025.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. shepherd_core/calibration_hw_def.py +11 -11
  2. shepherd_core/commons.py +4 -4
  3. shepherd_core/data_models/__init__.py +2 -0
  4. shepherd_core/data_models/base/cal_measurement.py +10 -11
  5. shepherd_core/data_models/base/calibration.py +7 -6
  6. shepherd_core/data_models/base/content.py +1 -1
  7. shepherd_core/data_models/base/shepherd.py +6 -7
  8. shepherd_core/data_models/base/wrapper.py +2 -2
  9. shepherd_core/data_models/content/_external_fixtures.yaml +32 -32
  10. shepherd_core/data_models/content/energy_environment.py +6 -5
  11. shepherd_core/data_models/content/firmware.py +9 -7
  12. shepherd_core/data_models/content/virtual_harvester.py +34 -26
  13. shepherd_core/data_models/content/virtual_harvester_fixture.yaml +2 -2
  14. shepherd_core/data_models/content/virtual_source.py +20 -17
  15. shepherd_core/data_models/content/virtual_source_fixture.yaml +3 -3
  16. shepherd_core/data_models/experiment/experiment.py +15 -15
  17. shepherd_core/data_models/experiment/observer_features.py +109 -16
  18. shepherd_core/data_models/experiment/target_config.py +17 -12
  19. shepherd_core/data_models/task/__init__.py +11 -8
  20. shepherd_core/data_models/task/emulation.py +32 -17
  21. shepherd_core/data_models/task/firmware_mod.py +11 -11
  22. shepherd_core/data_models/task/harvest.py +7 -6
  23. shepherd_core/data_models/task/observer_tasks.py +7 -7
  24. shepherd_core/data_models/task/programming.py +13 -12
  25. shepherd_core/data_models/task/testbed_tasks.py +8 -8
  26. shepherd_core/data_models/testbed/cape.py +7 -6
  27. shepherd_core/data_models/testbed/gpio.py +8 -7
  28. shepherd_core/data_models/testbed/mcu.py +8 -7
  29. shepherd_core/data_models/testbed/mcu_fixture.yaml +4 -4
  30. shepherd_core/data_models/testbed/observer.py +9 -7
  31. shepherd_core/data_models/testbed/target.py +9 -7
  32. shepherd_core/data_models/testbed/testbed.py +11 -10
  33. shepherd_core/data_models/virtual_source_doc.txt +3 -3
  34. shepherd_core/decoder_waveform/uart.py +5 -5
  35. shepherd_core/fw_tools/converter.py +10 -6
  36. shepherd_core/fw_tools/patcher.py +14 -15
  37. shepherd_core/fw_tools/validation.py +11 -6
  38. shepherd_core/inventory/__init__.py +6 -6
  39. shepherd_core/inventory/python.py +1 -1
  40. shepherd_core/inventory/system.py +11 -8
  41. shepherd_core/inventory/target.py +3 -3
  42. shepherd_core/logger.py +2 -2
  43. shepherd_core/reader.py +105 -78
  44. shepherd_core/testbed_client/client_abc_fix.py +22 -16
  45. shepherd_core/testbed_client/client_web.py +18 -11
  46. shepherd_core/testbed_client/fixtures.py +21 -22
  47. shepherd_core/testbed_client/user_model.py +6 -5
  48. shepherd_core/version.py +1 -1
  49. shepherd_core/vsource/target_model.py +3 -3
  50. shepherd_core/vsource/virtual_converter_model.py +3 -3
  51. shepherd_core/vsource/virtual_harvester_model.py +7 -9
  52. shepherd_core/vsource/virtual_harvester_simulation.py +7 -6
  53. shepherd_core/vsource/virtual_source_model.py +6 -5
  54. shepherd_core/vsource/virtual_source_simulation.py +8 -7
  55. shepherd_core/writer.py +37 -39
  56. {shepherd_core-2025.4.1.dist-info → shepherd_core-2025.5.2.dist-info}/METADATA +2 -3
  57. shepherd_core-2025.5.2.dist-info/RECORD +81 -0
  58. {shepherd_core-2025.4.1.dist-info → shepherd_core-2025.5.2.dist-info}/WHEEL +1 -1
  59. shepherd_core-2025.4.1.dist-info/RECORD +0 -81
  60. {shepherd_core-2025.4.1.dist-info → shepherd_core-2025.5.2.dist-info}/top_level.txt +0 -0
  61. {shepherd_core-2025.4.1.dist-info → shepherd_core-2025.5.2.dist-info}/zip-safe +0 -0
shepherd_core/reader.py CHANGED
@@ -9,14 +9,11 @@ import math
9
9
  import os
10
10
  from itertools import product
11
11
  from pathlib import Path
12
+ from types import MappingProxyType
12
13
  from typing import TYPE_CHECKING
14
+ from typing import Annotated
13
15
  from typing import Any
14
- from typing import ClassVar
15
- from typing import Dict
16
- from typing import Generator
17
- from typing import List
18
16
  from typing import Optional
19
- from typing import Type
20
17
  from typing import Union
21
18
 
22
19
  import h5py
@@ -25,14 +22,18 @@ import yaml
25
22
  from pydantic import validate_call
26
23
  from tqdm import trange
27
24
  from typing_extensions import Self
25
+ from typing_extensions import deprecated
28
26
 
29
- from .commons import samplerate_sps_default
27
+ from .commons import SAMPLERATE_SPS_DEFAULT
30
28
  from .data_models.base.calibration import CalibrationPair
31
29
  from .data_models.base.calibration import CalibrationSeries
32
30
  from .data_models.content.energy_environment import EnergyDType
33
31
  from .decoder_waveform import Uart
34
32
 
35
33
  if TYPE_CHECKING:
34
+ from collections.abc import Generator
35
+ from collections.abc import Mapping
36
+ from collections.abc import Sequence
36
37
  from types import TracebackType
37
38
 
38
39
 
@@ -46,28 +47,27 @@ class Reader:
46
47
 
47
48
  """
48
49
 
49
- samples_per_buffer: int = 10_000
50
+ CHUNK_SAMPLES_N: int = 10_000
50
51
 
51
- mode_dtype_dict: ClassVar[dict] = {
52
- "harvester": [
53
- EnergyDType.ivsample,
54
- EnergyDType.ivcurve,
55
- EnergyDType.isc_voc,
56
- ],
57
- "emulator": [EnergyDType.ivsample],
58
- }
52
+ MODE_TO_DTYPE: Mapping[str, Sequence[EnergyDType]] = MappingProxyType(
53
+ {
54
+ "harvester": (
55
+ EnergyDType.ivsample,
56
+ EnergyDType.ivcurve,
57
+ EnergyDType.isc_voc,
58
+ ),
59
+ "emulator": (EnergyDType.ivsample,),
60
+ }
61
+ )
59
62
 
60
63
  @validate_call
61
64
  def __init__(
62
65
  self,
63
- file_path: Optional[Path],
66
+ file_path: Path,
64
67
  *,
65
- verbose: Optional[bool] = True,
68
+ verbose: bool = True,
66
69
  ) -> None:
67
- if not hasattr(self, "file_path"):
68
- self.file_path: Optional[Path] = None
69
- if isinstance(file_path, (Path, str)):
70
- self.file_path = Path(file_path).resolve()
70
+ self.file_path: Path = file_path.resolve()
71
71
 
72
72
  if not hasattr(self, "_logger"):
73
73
  self._logger: logging.Logger = logging.getLogger("SHPCore.Reader")
@@ -75,7 +75,7 @@ class Reader:
75
75
  self._logger.setLevel(logging.DEBUG if verbose else logging.INFO)
76
76
 
77
77
  if not hasattr(self, "samplerate_sps"):
78
- self.samplerate_sps: int = samplerate_sps_default
78
+ self.samplerate_sps: int = SAMPLERATE_SPS_DEFAULT
79
79
  self.sample_interval_ns: int = round(10**9 // self.samplerate_sps)
80
80
  self.sample_interval_s: float = 1 / self.samplerate_sps
81
81
 
@@ -84,10 +84,13 @@ class Reader:
84
84
 
85
85
  # init stats
86
86
  self.runtime_s: float = 0
87
- self.buffers_n: int = 0
87
+ self.samples_n: int = 0
88
+ self.chunks_n: int = 0
88
89
  self.file_size: int = 0
89
90
  self.data_rate: float = 0
90
91
 
92
+ self.buffers_n: Annotated[int, deprecated("use .chunk_n instead")] = 0
93
+
91
94
  # open file (if not already done by writer)
92
95
  self._reader_opened: bool = False
93
96
  if not hasattr(self, "h5file"):
@@ -115,7 +118,8 @@ class Reader:
115
118
  )
116
119
 
117
120
  if not isinstance(self.h5file, h5py.File):
118
- raise TypeError("Type of opened file is not h5py.File, for %s", self.file_path.name)
121
+ msg = (f"Type of opened file is not h5py.File, for {self.file_path.name}",)
122
+ raise TypeError(msg)
119
123
 
120
124
  self.ds_time: h5py.Dataset = self.h5file["data"]["time"]
121
125
  self.ds_voltage: h5py.Dataset = self.h5file["data"]["voltage"]
@@ -155,7 +159,7 @@ class Reader:
155
159
 
156
160
  def __exit__(
157
161
  self,
158
- typ: Optional[Type[BaseException]] = None,
162
+ typ: Optional[type[BaseException]] = None,
159
163
  exc: Optional[BaseException] = None,
160
164
  tb: Optional[TracebackType] = None,
161
165
  extra_arg: int = 0,
@@ -171,59 +175,63 @@ class Reader:
171
175
  def _refresh_file_stats(self) -> None:
172
176
  """Update internal states, helpful after resampling or other changes in data-group."""
173
177
  self.h5file.flush()
174
- sample_count = self.ds_time.shape[0]
178
+ self.samples_n = min(
179
+ self.ds_time.shape[0], self.ds_current.shape[0], self.ds_voltage.shape[0]
180
+ )
175
181
  duration_raw = (
176
- (int(self.ds_time[sample_count - 1]) - int(self.ds_time[0])) if sample_count > 0 else 0
182
+ (int(self.ds_time[self.samples_n - 1]) - int(self.ds_time[0]))
183
+ if self.samples_n > 0
184
+ else 0
177
185
  )
178
186
  # above's typecasting prevents overflow in u64-format
179
- if (sample_count > 0) and (duration_raw > 0):
180
- # this assumes iso-chronous sampling
187
+ if (self.samples_n > 0) and (duration_raw > 0):
188
+ # this assumes iso-chronous sampling, TODO: not the best choice?
181
189
  duration_s = self._cal.time.raw_to_si(duration_raw)
182
- self.sample_interval_s = duration_s / sample_count
190
+ self.sample_interval_s = duration_s / self.samples_n
183
191
  self.sample_interval_ns = round(10**9 * self.sample_interval_s)
184
- self.samplerate_sps = max(round((sample_count - 1) / duration_s), 1)
185
- self.runtime_s = round(self.ds_voltage.shape[0] / self.samplerate_sps, 1)
186
- self.buffers_n = int(self.ds_voltage.shape[0] // self.samples_per_buffer)
192
+ self.samplerate_sps = max(round((self.samples_n - 1) / duration_s), 1)
193
+ self.runtime_s = round(self.samples_n / self.samplerate_sps, 1)
194
+ self.chunks_n = self.buffers_n = int(self.samples_n // self.CHUNK_SAMPLES_N)
187
195
  if isinstance(self.file_path, Path):
188
196
  self.file_size = self.file_path.stat().st_size
189
197
  else:
190
198
  self.file_size = 0
191
199
  self.data_rate = self.file_size / self.runtime_s if self.runtime_s > 0 else 0
192
200
 
193
- def read_buffers(
201
+ def read(
194
202
  self,
195
203
  start_n: int = 0,
196
204
  end_n: Optional[int] = None,
197
- n_samples_per_buffer: Optional[int] = None,
205
+ n_samples_per_chunk: Optional[int] = None,
198
206
  *,
199
207
  is_raw: bool = False,
200
- omit_ts: bool = False,
208
+ omit_timestamps: bool = False,
201
209
  ) -> Generator[tuple, None, None]:
202
- """Read the specified range of buffers from the hdf5 file.
210
+ """Read the specified range of chunks from the hdf5 file.
203
211
 
204
212
  Generator - can be configured on first call
205
213
 
206
214
  Args:
207
215
  ----
208
- :param start_n: (int) Index of first buffer to be read
209
- :param end_n: (int) Index of last buffer to be read
210
- :param n_samples_per_buffer: (int) allows changing
216
+ :param start_n: (int) Index of first chunk to be read
217
+ :param end_n: (int) Index of last chunk to be read
218
+ :param n_samples_per_chunk: (int) allows changing
211
219
  :param is_raw: (bool) output original data, not transformed to SI-Units
212
- :param omit_ts: (bool) optimize reading if timestamp is never used
213
- Yields: Buffers between start and end (tuple with time, voltage, current)
220
+ :param omit_timestamps: (bool) optimize reading if timestamp is never used
221
+ Yields: chunks between start and end (tuple with time, voltage, current)
214
222
 
215
223
  """
216
- if n_samples_per_buffer is None:
217
- n_samples_per_buffer = self.samples_per_buffer
218
- end_max = int(self.ds_voltage.shape[0] // n_samples_per_buffer)
224
+ if n_samples_per_chunk is None:
225
+ n_samples_per_chunk = self.CHUNK_SAMPLES_N
226
+ end_max = int(self.samples_n // n_samples_per_chunk)
219
227
  end_n = end_max if end_n is None else min(end_n, end_max)
220
- self._logger.debug("Reading blocks %d to %d from source-file", start_n, end_n)
228
+ self._logger.debug("Reading chunk %d to %d from source-file", start_n, end_n)
221
229
  _raw = is_raw
222
- _wts = not omit_ts
230
+ _wts = not omit_timestamps
223
231
 
224
232
  for i in range(start_n, end_n):
225
- idx_start = i * n_samples_per_buffer
226
- idx_end = idx_start + n_samples_per_buffer
233
+ idx_start = i * n_samples_per_chunk
234
+ idx_end = idx_start + n_samples_per_chunk
227
235
  if _raw:
228
236
  yield (
229
237
  self.ds_time[idx_start:idx_end] if _wts else None,
@@ -237,6 +245,24 @@ class Reader:
237
245
  self._cal.current.raw_to_si(self.ds_current[idx_start:idx_end]),
238
246
  )
239
247
 
248
+ @deprecated("use .read() instead")
249
+ def read_buffers(
250
+ self,
251
+ start_n: int = 0,
252
+ end_n: Optional[int] = None,
253
+ n_samples_per_buffer: Optional[int] = None,
254
+ *,
255
+ is_raw: bool = False,
256
+ omit_ts: bool = False,
257
+ ) -> Generator[tuple, None, None]:
258
+ return self.read(
259
+ start_n=start_n,
260
+ end_n=end_n,
261
+ n_samples_per_chunk=n_samples_per_buffer,
262
+ is_raw=is_raw,
263
+ omit_timestamps=omit_ts,
264
+ )
265
+
240
266
  def get_calibration_data(self) -> CalibrationSeries:
241
267
  """Read calibration-data from hdf5 file.
242
268
 
@@ -254,7 +280,7 @@ class Reader:
254
280
  return self.h5file.attrs["mode"]
255
281
  return ""
256
282
 
257
- def get_config(self) -> Dict:
283
+ def get_config(self) -> dict:
258
284
  if "config" in self.h5file["data"].attrs:
259
285
  return yaml.safe_load(self.h5file["data"].attrs["config"])
260
286
  return {}
@@ -329,7 +355,7 @@ class Reader:
329
355
  self.file_path.name,
330
356
  )
331
357
  return False
332
- if self.h5file.attrs["mode"] not in self.mode_dtype_dict:
358
+ if self.h5file.attrs["mode"] not in self.MODE_TO_DTYPE:
333
359
  self._logger.error(
334
360
  "[FileValidation] unsupported mode '%s' in '%s'",
335
361
  attr,
@@ -361,7 +387,7 @@ class Reader:
361
387
  self.file_path.name,
362
388
  )
363
389
  return False
364
- if self.get_datatype() not in self.mode_dtype_dict[self.get_mode()]:
390
+ if self.get_datatype() not in self.MODE_TO_DTYPE[self.get_mode()]:
365
391
  self._logger.error(
366
392
  "[FileValidation] unsupported type '%s' for mode '%s' in '%s'",
367
393
  self.get_datatype(),
@@ -386,23 +412,22 @@ class Reader:
386
412
  self.file_path.name,
387
413
  )
388
414
  # same length of datasets:
389
- ds_volt_size = self.h5file["data"]["voltage"].shape[0]
390
415
  for dset in ["current", "time"]:
391
416
  ds_size = self.h5file["data"][dset].shape[0]
392
- if ds_volt_size != ds_size:
417
+ if ds_size != self.samples_n:
393
418
  self._logger.warning(
394
419
  "[FileValidation] dataset '%s' has different size (=%d), "
395
- "compared to time-ds (=%d), in '%s'",
420
+ "compared to smallest set (=%d), in '%s'",
396
421
  dset,
397
422
  ds_size,
398
- ds_volt_size,
423
+ self.samples_n,
399
424
  self.file_path.name,
400
425
  )
401
- # dataset-length should be multiple of buffersize
402
- remaining_size = ds_volt_size % self.samples_per_buffer
426
+ # dataset-length should be multiple of chunk-size
427
+ remaining_size = self.samples_n % self.CHUNK_SAMPLES_N
403
428
  if remaining_size != 0:
404
429
  self._logger.warning(
405
- "[FileValidation] datasets are not aligned with buffer-size in '%s'",
430
+ "[FileValidation] datasets are not aligned with chunk-size in '%s'",
406
431
  self.file_path.name,
407
432
  )
408
433
  # check compression
@@ -457,10 +482,10 @@ class Reader:
457
482
 
458
483
  :return: sampled energy in Ws (watt-seconds)
459
484
  """
460
- iterations = math.ceil(self.ds_voltage.shape[0] / self.max_elements)
485
+ iterations = math.ceil(self.samples_n / self.max_elements)
461
486
  job_iter = trange(
462
487
  0,
463
- self.ds_voltage.shape[0],
488
+ self.samples_n,
464
489
  self.max_elements,
465
490
  desc="energy",
466
491
  leave=False,
@@ -468,7 +493,7 @@ class Reader:
468
493
  )
469
494
 
470
495
  def _calc_energy(idx_start: int) -> float:
471
- idx_stop = min(idx_start + self.max_elements, self.ds_voltage.shape[0])
496
+ idx_stop = min(idx_start + self.max_elements, self.samples_n)
472
497
  vol_v = self._cal.voltage.raw_to_si(self.ds_voltage[idx_start:idx_stop])
473
498
  cur_a = self._cal.current.raw_to_si(self.ds_current[idx_start:idx_stop])
474
499
  return (vol_v[:] * cur_a[:]).sum() * self.sample_interval_s
@@ -478,7 +503,7 @@ class Reader:
478
503
 
479
504
  def _dset_statistics(
480
505
  self, dset: h5py.Dataset, cal: Optional[CalibrationPair] = None
481
- ) -> Dict[str, float]:
506
+ ) -> dict[str, float]:
482
507
  """Create basic stats for a provided dataset.
483
508
 
484
509
  :param dset: dataset to evaluate
@@ -511,7 +536,7 @@ class Reader:
511
536
  if len(stats_list) < 1:
512
537
  return {}
513
538
  stats_nd = np.stack(stats_list)
514
- stats: Dict[str, float] = {
539
+ stats: dict[str, float] = {
515
540
  # TODO: wrong calculation for ndim-datasets with n>1
516
541
  "mean": float(stats_nd[:, 0].mean()),
517
542
  "min": float(stats_nd[:, 1].min()),
@@ -521,17 +546,19 @@ class Reader:
521
546
  }
522
547
  return stats
523
548
 
524
- def _data_timediffs(self) -> List[float]:
525
- """Calculate list of unique time-deltas [s] between buffers.
549
+ def _data_timediffs(self) -> list[float]:
550
+ """Calculate list of unique time-deltas [s] between chunks.
526
551
 
527
- Optimized version that only looks at the start of each buffer.
552
+ Optimized version that only looks at the start of each chunk.
553
+ Timestamps get converted to signed (it still fits > 100 years)
554
+ to allow calculating negative diffs.
528
555
 
529
- :return: list of (unique) time-deltas between buffers [s]
556
+ :return: list of (unique) time-deltas between chunks [s]
530
557
  """
531
- iterations = math.ceil(self.ds_time.shape[0] / self.max_elements)
558
+ iterations = math.ceil(self.samples_n / self.max_elements)
532
559
  job_iter = trange(
533
560
  0,
534
- self.h5file["data"]["time"].shape[0],
561
+ self.samples_n,
535
562
  self.max_elements,
536
563
  desc="timediff",
537
564
  leave=False,
@@ -540,14 +567,14 @@ class Reader:
540
567
 
541
568
  def calc_timediffs(idx_start: int) -> list:
542
569
  ds_time = self.ds_time[
543
- idx_start : (idx_start + self.max_elements) : self.samples_per_buffer
544
- ]
570
+ idx_start : (idx_start + self.max_elements) : self.CHUNK_SAMPLES_N
571
+ ].astype(np.int64)
545
572
  diffs_np = np.unique(ds_time[1:] - ds_time[0:-1], return_counts=False)
546
573
  return list(np.array(diffs_np))
547
574
 
548
575
  diffs_ll = [calc_timediffs(i) for i in job_iter]
549
576
  diffs = {
550
- round(self._cal.time.raw_to_si(j) / self.samples_per_buffer, 6)
577
+ round(self._cal.time.raw_to_si(j) / self.CHUNK_SAMPLES_N, 6)
551
578
  for i in diffs_ll
552
579
  for j in i
553
580
  }
@@ -565,7 +592,7 @@ class Reader:
565
592
  self._logger.warning(
566
593
  "Time-jumps detected -> expected equal steps, but got: %s s", diffs
567
594
  )
568
- return (len(diffs) <= 1) and diffs[0] == round(0.1 / self.samples_per_buffer, 6)
595
+ return (len(diffs) <= 1) and diffs[0] == round(0.1 / self.CHUNK_SAMPLES_N, 6)
569
596
 
570
597
  def count_errors_in_log(self, group_name: str = "sheep", min_level: int = 40) -> int:
571
598
  if group_name not in self.h5file:
@@ -583,7 +610,7 @@ class Reader:
583
610
  node: Union[h5py.Dataset, h5py.Group, None] = None,
584
611
  *,
585
612
  minimal: bool = False,
586
- ) -> Dict[str, dict]:
613
+ ) -> dict[str, dict]:
587
614
  """Recursive FN to capture the structure of the file.
588
615
 
589
616
  :param node: starting node, leave free to go through whole file
@@ -594,7 +621,7 @@ class Reader:
594
621
  self._refresh_file_stats()
595
622
  return self.get_metadata(self.h5file, minimal=minimal)
596
623
 
597
- metadata: Dict[str, dict] = {}
624
+ metadata: dict[str, dict] = {}
598
625
  if isinstance(node, h5py.Dataset) and not minimal:
599
626
  metadata["_dataset_info"] = {
600
627
  "datatype": str(node.dtype),
@@ -616,7 +643,7 @@ class Reader:
616
643
  with contextlib.suppress(yaml.YAMLError):
617
644
  attr_value = yaml.safe_load(attr_value)
618
645
  elif "int" in str(type(attr_value)):
619
- # TODO: why not isinstance? can it be List[int] other complex type?
646
+ # TODO: why not isinstance? can it be list[int] other complex type?
620
647
  attr_value = int(attr_value)
621
648
  else:
622
649
  attr_value = float(attr_value)
@@ -675,7 +702,7 @@ class Reader:
675
702
  return data != data_1
676
703
 
677
704
  def gpio_to_waveforms(self, name: Optional[str] = None) -> dict:
678
- waveforms: Dict[str, np.ndarray] = {}
705
+ waveforms: dict[str, np.ndarray] = {}
679
706
  if "gpio" not in self.h5file:
680
707
  return waveforms
681
708
 
@@ -17,11 +17,12 @@ TODO: Comfort functions missing
17
17
 
18
18
  from abc import ABC
19
19
  from abc import abstractmethod
20
- from typing import List
20
+ from typing import Any
21
21
  from typing import Optional
22
22
 
23
- from ..data_models.base.shepherd import ShpModel
24
- from ..data_models.base.wrapper import Wrapper
23
+ from shepherd_core.data_models.base.shepherd import ShpModel
24
+ from shepherd_core.data_models.base.wrapper import Wrapper
25
+
25
26
  from .fixtures import Fixtures
26
27
 
27
28
 
@@ -40,11 +41,11 @@ class AbcClient(ABC):
40
41
  """
41
42
 
42
43
  @abstractmethod
43
- def query_ids(self, model_type: str) -> List[int]:
44
+ def query_ids(self, model_type: str) -> list[int]:
44
45
  pass
45
46
 
46
47
  @abstractmethod
47
- def query_names(self, model_type: str) -> List[str]:
48
+ def query_names(self, model_type: str) -> list[str]:
48
49
  pass
49
50
 
50
51
  @abstractmethod
@@ -54,11 +55,15 @@ class AbcClient(ABC):
54
55
  pass
55
56
 
56
57
  @abstractmethod
57
- def try_inheritance(self, model_type: str, values: dict) -> (dict, list):
58
+ def try_inheritance(
59
+ self, model_type: str, values: dict[str, Any]
60
+ ) -> tuple[dict[str, Any], list[str]]:
58
61
  # TODO: maybe internal? yes
59
62
  pass
60
63
 
61
- def try_completing_model(self, model_type: str, values: dict) -> (dict, list):
64
+ def try_completing_model(
65
+ self, model_type: str, values: dict[str, Any]
66
+ ) -> tuple[dict[str, Any], list[str]]:
62
67
  """Init by name/id, for none existing instances raise Exception.
63
68
 
64
69
  This is the main entry-point for querying a model (used be the core-lib).
@@ -67,13 +72,12 @@ class AbcClient(ABC):
67
72
  try:
68
73
  values = self.query_item(model_type, name=values.get("name"), uid=values.get("id"))
69
74
  except ValueError as err:
70
- raise ValueError(
71
- "Query %s by name / ID failed - %s is unknown!", model_type, values
72
- ) from err
75
+ msg = f"Query {model_type} by name / ID failed - {values} is unknown!"
76
+ raise ValueError(msg) from err
73
77
  return self.try_inheritance(model_type, values)
74
78
 
75
79
  @abstractmethod
76
- def fill_in_user_data(self, values: dict) -> dict:
80
+ def fill_in_user_data(self, values: dict[str, Any]) -> dict[str, Any]:
77
81
  # TODO: is it really helpful and needed?
78
82
  pass
79
83
 
@@ -83,7 +87,7 @@ class FixturesClient(AbcClient):
83
87
 
84
88
  def __init__(self) -> None:
85
89
  super().__init__()
86
- self._fixtures: Optional[Fixtures] = Fixtures()
90
+ self._fixtures: Fixtures = Fixtures()
87
91
 
88
92
  def insert(self, data: ShpModel) -> bool:
89
93
  wrap = Wrapper(
@@ -93,10 +97,10 @@ class FixturesClient(AbcClient):
93
97
  self._fixtures.insert_model(wrap)
94
98
  return True
95
99
 
96
- def query_ids(self, model_type: str) -> List[int]:
100
+ def query_ids(self, model_type: str) -> list[int]:
97
101
  return list(self._fixtures[model_type].elements_by_id.keys())
98
102
 
99
- def query_names(self, model_type: str) -> List[str]:
103
+ def query_names(self, model_type: str) -> list[str]:
100
104
  return list(self._fixtures[model_type].elements_by_name.keys())
101
105
 
102
106
  def query_item(
@@ -108,10 +112,12 @@ class FixturesClient(AbcClient):
108
112
  return self._fixtures[model_type].query_name(name)
109
113
  raise ValueError("Query needs either uid or name of object")
110
114
 
111
- def try_inheritance(self, model_type: str, values: dict) -> (dict, list):
115
+ def try_inheritance(
116
+ self, model_type: str, values: dict[str, Any]
117
+ ) -> tuple[dict[str, Any], list[str]]:
112
118
  return self._fixtures[model_type].inheritance(values)
113
119
 
114
- def fill_in_user_data(self, values: dict) -> dict:
120
+ def fill_in_user_data(self, values: dict[str, Any]) -> dict[str, Any]:
115
121
  """Add fake user-data when offline-client is used.
116
122
 
117
123
  Hotfix until WebClient is working.
@@ -2,15 +2,16 @@
2
2
 
3
3
  from importlib import import_module
4
4
  from pathlib import Path
5
- from typing import List
5
+ from typing import Any
6
6
  from typing import Optional
7
7
  from typing import Union
8
8
 
9
9
  from pydantic import validate_call
10
10
 
11
- from ..commons import testbed_server_default
12
- from ..data_models.base.shepherd import ShpModel
13
- from ..data_models.base.wrapper import Wrapper
11
+ from shepherd_core.commons import TESTBED_SERVER_URI
12
+ from shepherd_core.data_models.base.shepherd import ShpModel
13
+ from shepherd_core.data_models.base.wrapper import Wrapper
14
+
14
15
  from .client_abc_fix import AbcClient
15
16
  from .user_model import User
16
17
 
@@ -37,7 +38,7 @@ class WebClient(AbcClient):
37
38
  if not hasattr(self, "_token"):
38
39
  # add default values
39
40
  self._token: str = "basic_public_access" # noqa: S105
40
- self._server: str = testbed_server_default
41
+ self._server: str = TESTBED_SERVER_URI
41
42
  self._user: Optional[User] = None
42
43
  self._key: Optional[str] = None
43
44
  self._connected: bool = False
@@ -49,6 +50,8 @@ class WebClient(AbcClient):
49
50
  # ABC Functions below
50
51
 
51
52
  def insert(self, data: ShpModel) -> bool:
53
+ if self._req is None:
54
+ return False
52
55
  wrap = Wrapper(
53
56
  datatype=type(data).__name__,
54
57
  parameters=data.model_dump(),
@@ -57,10 +60,10 @@ class WebClient(AbcClient):
57
60
  r.raise_for_status()
58
61
  return True
59
62
 
60
- def query_ids(self, model_type: str) -> List[int]:
63
+ def query_ids(self, model_type: str) -> list[int]:
61
64
  raise NotImplementedError("TODO")
62
65
 
63
- def query_names(self, model_type: str) -> List[str]:
66
+ def query_names(self, model_type: str) -> list[str]:
64
67
  raise NotImplementedError("TODO")
65
68
 
66
69
  def query_item(
@@ -68,10 +71,14 @@ class WebClient(AbcClient):
68
71
  ) -> dict:
69
72
  raise NotImplementedError("TODO")
70
73
 
71
- def try_inheritance(self, model_type: str, values: dict) -> (dict, list):
74
+ def try_inheritance(
75
+ self, model_type: str, values: dict[str, Any]
76
+ ) -> tuple[dict[str, Any], list[str]]:
72
77
  raise NotImplementedError("TODO")
73
78
 
74
- def fill_in_user_data(self, values: dict) -> dict:
79
+ def fill_in_user_data(self, values: dict[str, Any]) -> dict[str, Any]:
80
+ if self._user is None:
81
+ return values
75
82
  if values.get("owner") is None:
76
83
  values["owner"] = self._user.name
77
84
  if values.get("group") is None:
@@ -105,7 +112,7 @@ class WebClient(AbcClient):
105
112
  return self._query_user_data()
106
113
 
107
114
  def _query_session_key(self) -> bool:
108
- if self._server:
115
+ if self._server and self._req is not None:
109
116
  r = self._req.get(self._server + "/session_key", timeout=2)
110
117
  r.raise_for_status()
111
118
  self._key = r.json()["value"] # TODO: not finished
@@ -113,7 +120,7 @@ class WebClient(AbcClient):
113
120
  return False
114
121
 
115
122
  def _query_user_data(self) -> bool:
116
- if self._server:
123
+ if self._server and self._req is not None:
117
124
  r = self._req.get(self._server + "/user?token=" + self._token, timeout=2)
118
125
  # TODO: possibly a security nightmare (send via json or encrypted via public key?)
119
126
  r.raise_for_status()