mainsequence 2.0.4rc0__py3-none-any.whl → 3.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. mainsequence/cli/cli.py +4 -7
  2. mainsequence/cli/ssh_utils.py +17 -2
  3. mainsequence/client/__init__.py +3 -3
  4. mainsequence/client/base.py +3 -3
  5. mainsequence/client/data_sources_interfaces/timescale.py +20 -19
  6. mainsequence/client/exceptions.py +11 -0
  7. mainsequence/client/models_helpers.py +2 -2
  8. mainsequence/client/models_tdag.py +104 -87
  9. mainsequence/client/models_vam.py +9 -9
  10. mainsequence/dashboards/streamlit/core/theme.py +128 -109
  11. mainsequence/dashboards/streamlit/scaffold.py +3 -0
  12. mainsequence/instruments/__init__.py +1 -1
  13. mainsequence/instruments/data_interface/__init__.py +1 -1
  14. mainsequence/instruments/data_interface/data_interface.py +31 -11
  15. mainsequence/instruments/instruments/bond.py +8 -0
  16. mainsequence/instruments/pricing_models/indices.py +26 -14
  17. mainsequence/instruments/settings.py +2 -162
  18. mainsequence/tdag/config.py +2 -2
  19. mainsequence/tdag/data_nodes/build_operations.py +3 -3
  20. mainsequence/tdag/data_nodes/data_nodes.py +23 -23
  21. mainsequence/tdag/data_nodes/persist_managers.py +121 -121
  22. mainsequence/tdag/data_nodes/run_operations.py +25 -25
  23. mainsequence/virtualfundbuilder/contrib/apps/portfolio_report_app.py +1 -1
  24. mainsequence/virtualfundbuilder/contrib/prices/data_nodes.py +2 -2
  25. mainsequence/virtualfundbuilder/data_nodes.py +1 -1
  26. mainsequence/virtualfundbuilder/portfolio_interface.py +7 -7
  27. mainsequence/virtualfundbuilder/utils.py +2 -2
  28. {mainsequence-2.0.4rc0.dist-info → mainsequence-3.0.2.dist-info}/METADATA +1 -1
  29. {mainsequence-2.0.4rc0.dist-info → mainsequence-3.0.2.dist-info}/RECORD +33 -32
  30. {mainsequence-2.0.4rc0.dist-info → mainsequence-3.0.2.dist-info}/WHEEL +0 -0
  31. {mainsequence-2.0.4rc0.dist-info → mainsequence-3.0.2.dist-info}/entry_points.txt +0 -0
  32. {mainsequence-2.0.4rc0.dist-info → mainsequence-3.0.2.dist-info}/licenses/LICENSE +0 -0
  33. {mainsequence-2.0.4rc0.dist-info → mainsequence-3.0.2.dist-info}/top_level.txt +0 -0
@@ -5,12 +5,12 @@ import os
5
5
  from mainsequence.logconf import logger
6
6
 
7
7
 
8
- from mainsequence.client import (LocalTimeSerie, UniqueIdentifierRangeMap,
8
+ from mainsequence.client import (DataNodeUpdate, UniqueIdentifierRangeMap,
9
9
  LocalTimeSeriesDoesNotExist,
10
- DynamicTableDoesNotExist, DynamicTableDataSource, TDAG_CONSTANTS as CONSTANTS, DynamicTableMetaData,
10
+ DynamicTableDoesNotExist, DynamicTableDataSource, TDAG_CONSTANTS as CONSTANTS, DataNodeStorage,
11
11
  UpdateStatistics, DoesNotExist)
12
12
 
13
- from mainsequence.client.models_tdag import LocalTimeSerieUpdateDetails
13
+ from mainsequence.client.models_tdag import DataNodeUpdateDetails
14
14
  import mainsequence.client as ms_client
15
15
  import json
16
16
  import threading
@@ -74,7 +74,7 @@ def get_data_node_source_code_git_hash(DataNodeClass: "DataNode") -> str:
74
74
  class APIPersistManager:
75
75
  """
76
76
  Manages persistence for time series data accessed via an API.
77
- It handles asynchronous fetching of metadata to avoid blocking operations.
77
+ It handles asynchronous fetching of data_node_storage to avoid blocking operations.
78
78
  """
79
79
 
80
80
  def __init__(self, data_source_id: int, storage_hash: str):
@@ -91,40 +91,40 @@ class APIPersistManager:
91
91
  logger.debug(f"Initializing Time Serie {self.storage_hash} as APIDataNode")
92
92
 
93
93
  # Create a Future to hold the local metadata when ready.
94
- self._metadata_future = Future()
94
+ self._data_node_storage_future = Future()
95
95
  # Register the future globally.
96
- future_registry.add_future(self._metadata_future)
96
+ future_registry.add_future(self._data_node_storage_future)
97
97
  # Launch the REST request in a separate, non-daemon thread.
98
- thread = threading.Thread(target=self._init_metadata,
99
- name=f"ApiMetaDataThread-{self.storage_hash}",
98
+ thread = threading.Thread(target=self._init_data_node_storage,
99
+ name=f"ApiDataNodeStorageThread-{self.storage_hash}",
100
100
  daemon=False)
101
101
  thread.start()
102
102
 
103
103
 
104
104
  @property
105
- def metadata(self) -> DynamicTableMetaData:
105
+ def data_node_storage(self) -> DataNodeStorage:
106
106
  """Lazily block and cache the result if needed."""
107
- if not hasattr(self, '_metadata_cached'):
107
+ if not hasattr(self, '_data_node_storage_cached'):
108
108
  # This call blocks until the future is resolved.
109
- self._metadata_cached = self._metadata_future.result()
110
- return self._metadata_cached
109
+ self._data_node_storage_cached = self._data_node_storage_future.result()
110
+ return self._data_node_storage_cached
111
111
 
112
- def _init_metadata(self) -> None:
112
+ def _init_data_node_storage(self) -> None:
113
113
  """
114
- Performs the REST request to fetch local metadata asynchronously.
114
+ Performs the REST request to fetch local data_node_storage asynchronously.
115
115
  Sets the result or exception on the future object.
116
116
  """
117
117
  try:
118
- result = DynamicTableMetaData.get_or_none(storage_hash=self.storage_hash,
118
+ result = DataNodeStorage.get_or_none(storage_hash=self.storage_hash,
119
119
  data_source__id=self.data_source_id,
120
120
  include_relations_detail=True
121
121
  )
122
- self._metadata_future.set_result(result)
122
+ self._data_node_storage_future.set_result(result)
123
123
  except Exception as exc:
124
- self._metadata_future.set_exception(exc)
124
+ self._data_node_storage_future.set_exception(exc)
125
125
  finally:
126
126
  # Remove the future from the global registry once done.
127
- future_registry.remove_future(self._metadata_future)
127
+ future_registry.remove_future(self._data_node_storage_future)
128
128
 
129
129
  def get_df_between_dates(self, *args, **kwargs) -> pd.DataFrame:
130
130
  """
@@ -133,12 +133,12 @@ class APIPersistManager:
133
133
  Returns:
134
134
  A pandas DataFrame with the requested data.
135
135
  """
136
- filtered_data = self.metadata.get_data_between_dates_from_api(*args, **kwargs)
136
+ filtered_data = self.data_node_storage.get_data_between_dates_from_api(*args, **kwargs)
137
137
  if filtered_data.empty:
138
138
  return filtered_data
139
139
 
140
140
  # fix types
141
- stc = self.metadata.sourcetableconfiguration
141
+ stc = self.data_node_storage.sourcetableconfiguration
142
142
  filtered_data[stc.time_index_name] = pd.to_datetime(filtered_data[stc.time_index_name], utc=True)
143
143
  column_filter = kwargs.get("columns") or stc.column_dtypes_map.keys()
144
144
  for c in column_filter:
@@ -158,8 +158,8 @@ class PersistManager:
158
158
  update_hash: str,
159
159
  description: Optional[str] = None,
160
160
  class_name: Optional[str] = None,
161
- metadata: Optional[Dict] = None,
162
- local_metadata: Optional[LocalTimeSerie] = None
161
+ data_node_storage: Optional[Dict] = None,
162
+ data_node_update: Optional[DataNodeUpdate] = None
163
163
  ):
164
164
  """
165
165
  Initializes the PersistManager.
@@ -169,14 +169,14 @@ class PersistManager:
169
169
  update_hash: The local hash identifier for the time series.
170
170
  description: An optional description for the time series.
171
171
  class_name: The name of the DataNode class.
172
- metadata: Optional remote metadata dictionary.
173
- local_metadata: Optional local metadata object.
172
+ data_node_storage: Optional remote data_node_storage dictionary.
173
+ data_node_update: Optional local data_node_storage object.
174
174
  """
175
175
  self.data_source: DynamicTableDataSource = data_source
176
176
  self.update_hash: str = update_hash
177
- if local_metadata is not None and metadata is None:
177
+ if data_node_update is not None and data_node_storage is None:
178
178
  # query remote storage_hash
179
- metadata = local_metadata.remote_table
179
+ data_node_storage = data_node_update.data_node_storage
180
180
  self.description: Optional[str] = description
181
181
  self.logger = logger
182
182
 
@@ -184,19 +184,19 @@ class PersistManager:
184
184
  self.class_name: Optional[str] = class_name
185
185
 
186
186
  # Private members for managing lazy asynchronous retrieval.
187
- self._local_metadata_future: Optional[Future] = None
188
- self._local_metadata_cached: Optional[LocalTimeSerie] = None
189
- self._local_metadata_lock = threading.Lock()
190
- self._metadata_cached: Optional[DynamicTableMetaData] = None
187
+ self._data_node_update_future: Optional[Future] = None
188
+ self._data_node_update_cached: Optional[DataNodeUpdate] = None
189
+ self._data_node_update_lock = threading.Lock()
190
+ self._data_node_storage_cached: Optional[DataNodeStorage] = None
191
191
 
192
192
  if self.update_hash is not None:
193
- self.synchronize_metadata(local_metadata=local_metadata)
193
+ self.synchronize_data_node_update(data_node_update=data_node_update)
194
194
 
195
- def synchronize_metadata(self, local_metadata: Optional[LocalTimeSerie]) -> None:
196
- if local_metadata is not None:
197
- self.set_local_metadata(local_metadata)
195
+ def synchronize_data_node_update(self, data_node_update: Optional[DataNodeUpdate]) -> None:
196
+ if data_node_update is not None:
197
+ self.set_data_node_update(data_node_update)
198
198
  else:
199
- self.set_local_metadata_lazy(force_registry=True, include_relations_detail=True)
199
+ self.set_data_node_update_lazy(force_registry=True, include_relations_detail=True)
200
200
 
201
201
  @classmethod
202
202
  def get_from_data_type(cls, data_source: DynamicTableDataSource, *args, **kwargs) -> 'PersistManager':
@@ -215,53 +215,53 @@ class PersistManager:
215
215
  else:
216
216
  return TimeScaleLocalPersistManager(data_source=data_source, *args, **kwargs)
217
217
 
218
- def set_local_metadata(self, local_metadata: LocalTimeSerie) -> None:
218
+ def set_data_node_update(self, data_node_update: DataNodeUpdate) -> None:
219
219
  """
220
- Caches the local metadata object for lazy queries
220
+ Caches the local data_node_storage object for lazy queries
221
221
 
222
222
  Args:
223
- local_metadata: The LocalTimeSerie object to cache.
223
+ data_node_update: The DataNodeUpdate object to cache.
224
224
  """
225
- self._local_metadata_cached = local_metadata
225
+ self._data_node_update_cached = data_node_update
226
226
 
227
227
  @property
228
- def local_metadata(self) -> LocalTimeSerie:
228
+ def data_node_update(self) -> DataNodeUpdate:
229
229
  """Lazily block and retrieve the local metadata, caching the result."""
230
- with self._local_metadata_lock:
231
- if self._local_metadata_cached is None:
232
- if self._local_metadata_future is None:
230
+ with self._data_node_update_lock:
231
+ if self._data_node_update_cached is None:
232
+ if self._data_node_update_future is None:
233
233
  # If no future is running, start one.
234
- self.set_local_metadata_lazy(force_registry=True)
234
+ self.set_data_node_update_lazy(force_registry=True)
235
235
  # Block until the future completes and cache its result.
236
- local_metadata = self._local_metadata_future.result()
237
- self.set_local_metadata(local_metadata)
238
- return self._local_metadata_cached
236
+ data_node_update = self._data_node_update_future.result()
237
+ self.set_data_node_update(data_node_update)
238
+ return self._data_node_update_cached
239
239
 
240
- # Define a callback that will launch set_local_metadata_lazy after the remote update is complete.
240
+ # Define a callback that will launch set_local_data_node_lazy after the remote update is complete.
241
241
  @property
242
- def metadata(self) -> Optional[DynamicTableMetaData]:
242
+ def data_node_storage(self) -> Optional[DataNodeStorage]:
243
243
  """
244
- Lazily retrieves and returns the remote metadata.
244
+ Lazily retrieves and returns the remote data_node_storage.
245
245
  """
246
- if self.local_metadata is None:
246
+ if self.data_node_update is None:
247
247
  return None
248
- if self.local_metadata.remote_table is not None:
249
- if self.local_metadata.remote_table.sourcetableconfiguration is not None:
250
- if self.local_metadata.remote_table.build_meta_data.get("initialize_with_default_partitions",True) == False:
251
- if self.local_metadata.remote_table.data_source.related_resource_class_type in CONSTANTS.DATA_SOURCE_TYPE_TIMESCALEDB:
248
+ if self.data_node_update.data_node_storage is not None:
249
+ if self.data_node_update.data_node_storage.sourcetableconfiguration is not None:
250
+ if self.data_node_update.data_node_storage.build_meta_data.get("initialize_with_default_partitions",True) == False:
251
+ if self.data_node_update.data_node_storage.data_source.related_resource_class_type in CONSTANTS.DATA_SOURCE_TYPE_TIMESCALEDB:
252
252
  self.logger.warning("Default Partitions will not be initialized ")
253
253
 
254
- return self.local_metadata.remote_table
254
+ return self.data_node_update.data_node_storage
255
255
 
256
256
  @property
257
257
  def local_build_configuration(self) -> Dict:
258
- return self.local_metadata.build_configuration
258
+ return self.data_node_update.build_configuration
259
259
 
260
260
  @property
261
261
  def local_build_metadata(self) -> Dict:
262
- return self.local_metadata.build_meta_data
262
+ return self.data_node_update.build_meta_data
263
263
 
264
- def set_local_metadata_lazy_callback(self, fut: Future) -> None:
264
+ def set_data_node_update_lazy_callback(self, fut: Future) -> None:
265
265
  """
266
266
  Callback to handle the result of an asynchronous task and trigger a metadata refresh.
267
267
  """
@@ -273,29 +273,29 @@ class PersistManager:
273
273
  # For example: logger.error("Remote build update failed: %s", exc)
274
274
  raise exc
275
275
  # Launch the local metadata update regardless of the outcome.
276
- self.set_local_metadata_lazy(force_registry=True)
276
+ self.set_data_node_update_lazy(force_registry=True)
277
277
 
278
- def set_local_metadata_lazy(self, force_registry: bool = True, include_relations_detail: bool = True) -> None:
278
+ def set_data_node_update_lazy(self, force_registry: bool = True, include_relations_detail: bool = True) -> None:
279
279
  """
280
- Initiates a lazy, asynchronous fetch of the local metadata.
280
+ Initiates a lazy, asynchronous fetch of the local data_node_update.
281
281
 
282
282
  Args:
283
283
  force_registry: If True, forces a refresh even if cached data exists.
284
284
  include_relations_detail: If True, includes relationship details in the fetch.
285
285
  """
286
- with self._local_metadata_lock:
286
+ with self._data_node_update_lock:
287
287
  if force_registry:
288
- self._local_metadata_cached = None
288
+ self._data_node_update_cached = None
289
289
  # Capture the new future in a local variable.
290
290
  new_future = Future()
291
- self._local_metadata_future = new_future
291
+ self._data_node_update_future = new_future
292
292
  # Register the new future.
293
293
  future_registry.add_future(new_future)
294
294
 
295
- def _get_or_none_local_metadata():
295
+ def _get_or_none_data_node_update():
296
296
  """Perform the REST request asynchronously."""
297
297
  try:
298
- result = LocalTimeSerie.get_or_none(
298
+ result = DataNodeUpdate.get_or_none(
299
299
  update_hash=self.update_hash,
300
300
  remote_table__data_source__id=self.data_source.id,
301
301
  include_relations_detail=include_relations_detail
@@ -309,8 +309,8 @@ class PersistManager:
309
309
  # Remove the future from the global registry once done.
310
310
  future_registry.remove_future(new_future)
311
311
 
312
- thread = threading.Thread(target=_get_or_none_local_metadata,
313
- name=f"LocalMetadataThreadPM-{self.update_hash}",
312
+ thread = threading.Thread(target=_get_or_none_data_node_update,
313
+ name=f"LocalDataNodeStorageThreadPM-{self.update_hash}",
314
314
  daemon=False)
315
315
  thread.start()
316
316
 
@@ -325,10 +325,10 @@ class PersistManager:
325
325
  is_api: True if the target is an APIDataNode
326
326
  """
327
327
  if not is_api:
328
- self.local_metadata.depends_on_connect(target_time_serie_id=new_ts.local_time_serie.id)
328
+ self.data_node_update.depends_on_connect(target_time_serie_id=new_ts.data_node_update.id)
329
329
  else:
330
330
  try:
331
- self.local_metadata.depends_on_connect_to_api_table(target_table_id=new_ts.local_persist_manager.metadata.id)
331
+ self.data_node_update.depends_on_connect_to_api_table(target_table_id=new_ts.local_persist_manager.data_node_storage.id)
332
332
  except Exception as exc:
333
333
  raise exc
334
334
 
@@ -435,21 +435,21 @@ class PersistManager:
435
435
  Returns:
436
436
  A pandas DataFrame with dependency and priority information.
437
437
  """
438
- depth_df = self.local_metadata.get_all_dependencies_update_priority()
438
+ depth_df = self.data_node_update.get_all_dependencies_update_priority()
439
439
  return depth_df
440
440
 
441
441
  def set_ogm_dependencies_linked(self) -> None:
442
- self.local_metadata.patch(ogm_dependencies_linked=True)
442
+ self.data_node_update.patch(ogm_dependencies_linked=True)
443
443
 
444
444
  @property
445
- def update_details(self) -> Optional[LocalTimeSerieUpdateDetails]:
445
+ def update_details(self) -> Optional[DataNodeUpdateDetails]:
446
446
  """Returns the update details associated with the local time series."""
447
- return self.local_metadata.localtimeserieupdatedetails
447
+ return self.data_node_update.update_details
448
448
 
449
449
  @property
450
450
  def run_configuration(self) -> Optional[Dict]:
451
451
  """Returns the run configuration from the local metadata."""
452
- return self.local_metadata.run_configuration
452
+ return self.data_node_update.run_configuration
453
453
 
454
454
  @property
455
455
  def source_table_configuration(self) -> Optional[Dict]:
@@ -462,7 +462,7 @@ class PersistManager:
462
462
  """
463
463
  Updates the source code and git hash for the remote table.
464
464
  """
465
- self.local_metadata.remote_table = self.metadata.patch(
465
+ self.data_node_update.data_node_storage = self.metadata.patch(
466
466
  time_serie_source_code_git_hash=git_hash_id,
467
467
  time_serie_source_code=source_code,
468
468
  )
@@ -471,8 +471,8 @@ class PersistManager:
471
471
 
472
472
  def add_tags(self, tags: List[str]) -> None:
473
473
  """Adds tags to the local time series metadata if they don't already exist."""
474
- if any([t not in self.local_metadata.tags for t in tags]) == True:
475
- self.local_metadata.add_tags(tags=tags)
474
+ if any([t not in self.data_node_update.tags for t in tags]) == True:
475
+ self.data_node_update.add_tags(tags=tags)
476
476
 
477
477
  @property
478
478
  def persist_size(self) -> int:
@@ -498,16 +498,16 @@ class PersistManager:
498
498
  remote_configuration: The build configuration for the remote table.
499
499
  remote_build_metadata: The build metadata for the remote table.
500
500
  """
501
- # This ensures that later accesses to local_metadata will block for the new value.
502
- with self._local_metadata_lock:
503
- self._local_metadata_future = Future()
504
- future_registry.add_future(self._local_metadata_future)
501
+ # This ensures that later accesses to data_node_update will block for the new value.
502
+ with self._data_node_update_lock:
503
+ self._data_node_update_future = Future()
504
+ future_registry.add_future(self._data_node_update_future)
505
505
 
506
506
  kwargs = dict(
507
507
  build_configuration=remote_configuration, )
508
508
 
509
509
 
510
- local_metadata_kwargs = dict(update_hash=self.update_hash,
510
+ data_node_update_kwargs = dict(update_hash=self.update_hash,
511
511
  build_configuration=local_configuration,
512
512
  )
513
513
 
@@ -518,12 +518,12 @@ class PersistManager:
518
518
  def _patch_build_configuration():
519
519
  """Helper function for patching build configuration asynchronously."""
520
520
  try:
521
- # Execute the patch operation; this method is expected to return a LocalTimeSerie-like instance.
522
- result = DynamicTableMetaData.patch_build_configuration(
521
+ # Execute the patch operation; this method is expected to return a DataNodeUpdate-like instance.
522
+ result = DataNodeStorage.patch_build_configuration(
523
523
  remote_table_patch=kwargs,
524
524
  data_source_id=self.data_source.id,
525
525
  build_meta_data=remote_build_metadata,
526
- local_table_patch=local_metadata_kwargs,
526
+ local_table_patch=data_node_update_kwargs,
527
527
  )
528
528
  patch_future.set_result(True) #success
529
529
  except Exception as exc:
@@ -539,7 +539,7 @@ class PersistManager:
539
539
  )
540
540
  thread.start()
541
541
 
542
- patch_future.add_done_callback(self.set_local_metadata_lazy_callback)
542
+ patch_future.add_done_callback(self.set_data_node_update_lazy_callback)
543
543
 
544
544
 
545
545
  def local_persist_exist_set_config(
@@ -579,13 +579,13 @@ class PersistManager:
579
579
  )
580
580
 
581
581
 
582
- dtd_metadata = DynamicTableMetaData.get_or_create(**kwargs)
582
+ dtd_metadata = DataNodeStorage.get_or_create(**kwargs)
583
583
  storage_hash = dtd_metadata.storage_hash
584
584
  except Exception as e:
585
585
  self.logger.exception(f"{storage_hash} Could not set meta data in DB for P")
586
586
  raise e
587
587
  else:
588
- self.set_local_metadata_lazy(force_registry=True, include_relations_detail=True)
588
+ self.set_data_node_update_lazy(force_registry=True, include_relations_detail=True)
589
589
  storage_hash = self.metadata.storage_hash
590
590
 
591
591
  local_table_exist = self._verify_local_ts_exists(storage_hash=storage_hash, local_configuration=local_configuration)
@@ -597,12 +597,12 @@ class PersistManager:
597
597
  Verifies that the local time series exists in the ORM, creating it if necessary.
598
598
  """
599
599
  local_build_configuration = None
600
- if self.local_metadata is not None:
600
+ if self.data_node_update is not None:
601
601
  local_build_configuration, local_build_metadata = self.local_build_configuration, self.local_build_metadata
602
602
  if local_build_configuration is None:
603
603
 
604
- logger.debug(f"local_metadata {self.update_hash} does not exist creating")
605
- local_update = LocalTimeSerie.get_or_none(update_hash=self.update_hash,
604
+ logger.debug(f"data_node_update {self.update_hash} does not exist creating")
605
+ local_update = DataNodeUpdate.get_or_none(update_hash=self.update_hash,
606
606
  remote_table__data_source__id=self.data_source.id)
607
607
  if local_update is None:
608
608
  local_build_metadata = local_configuration[
@@ -615,11 +615,11 @@ class PersistManager:
615
615
  data_source_id=self.data_source.id
616
616
  )
617
617
 
618
- local_metadata = LocalTimeSerie.get_or_create(**metadata_kwargs,)
618
+ data_node_update = DataNodeUpdate.get_or_create(**metadata_kwargs,)
619
619
  else:
620
- local_metadata = local_update
620
+ data_node_update = local_update
621
621
 
622
- self.set_local_metadata(local_metadata=local_metadata)
622
+ self.set_data_node_update(data_node_update=data_node_update)
623
623
 
624
624
 
625
625
  def _verify_insertion_format(self, temp_df: pd.DataFrame) -> None:
@@ -634,12 +634,12 @@ class PersistManager:
634
634
  Asynchronously builds or updates the update details for the time series.
635
635
  """
636
636
  update_kwargs=dict(source_class_name=source_class_name,
637
- local_metadata=json.loads(self.local_metadata.model_dump_json())
637
+ local_metadata=json.loads(self.data_node_update.model_dump_json())
638
638
  )
639
- # This ensures that later accesses to local_metadata will block for the new value.
640
- with self._local_metadata_lock:
641
- self._local_metadata_future = Future()
642
- future_registry.add_future(self._local_metadata_future)
639
+ # This ensures that later accesses to data_node_update will block for the new value.
640
+ with self._data_node_update_lock:
641
+ self._data_node_update_future = Future()
642
+ future_registry.add_future(self._data_node_update_future)
643
643
 
644
644
  # Create a future for the remote update task and register it.
645
645
  future = Future()
@@ -648,7 +648,7 @@ class PersistManager:
648
648
  def _update_task():
649
649
  try:
650
650
  # Run the remote build/update details task.
651
- self.local_metadata.remote_table.build_or_update_update_details(**update_kwargs)
651
+ self.data_node_update.data_node_storage.build_or_update_update_details(**update_kwargs)
652
652
  future.set_result(True) # Signal success
653
653
  except Exception as exc:
654
654
  future.set_exception(exc)
@@ -664,7 +664,7 @@ class PersistManager:
664
664
  thread.start()
665
665
 
666
666
  # Attach the callback to the future.
667
- future.add_done_callback(self.set_local_metadata_lazy_callback)
667
+ future.add_done_callback(self.set_data_node_update_lazy_callback)
668
668
 
669
669
  def patch_table(self, **kwargs) -> None:
670
670
  """Patches the remote metadata table with the given keyword arguments."""
@@ -676,8 +676,8 @@ class PersistManager:
676
676
 
677
677
  def open_for_everyone(self, open_for_everyone: bool = True) -> None:
678
678
  """Sets the 'open_for_everyone' flag on local, remote, and source table configurations."""
679
- if not self.local_metadata.open_for_everyone:
680
- self.local_metadata.patch(open_for_everyone=open_for_everyone)
679
+ if not self.data_node_update.open_for_everyone:
680
+ self.data_node_update.patch(open_for_everyone=open_for_everyone)
681
681
 
682
682
  if not self.metadata.open_for_everyone:
683
683
  self.metadata.patch(open_for_everyone=open_for_everyone)
@@ -694,7 +694,7 @@ class PersistManager:
694
694
  Retrieves a DataFrame from the data source between specified dates.
695
695
  """
696
696
  filtered_data = self.data_source.get_data_by_time_index(
697
- local_metadata=self.local_metadata,
697
+ data_node_update=self.data_node_update,
698
698
  *args, **kwargs
699
699
  )
700
700
  return filtered_data
@@ -702,14 +702,14 @@ class PersistManager:
702
702
  def set_column_metadata(self,
703
703
  columns_metadata: Optional[List[ms_client.ColumnMetaData]]
704
704
  ) -> None:
705
- if self.metadata:
706
- if self.metadata.sourcetableconfiguration != None:
707
- if self.metadata.sourcetableconfiguration.columns_metadata is not None:
705
+ if self.data_node_storage:
706
+ if self.data_node_storage.sourcetableconfiguration != None:
707
+ if self.data_node_storage.sourcetableconfiguration.columns_metadata is not None:
708
708
  if columns_metadata is None:
709
709
  self.logger.info(f"get_column_metadata method not implemented")
710
710
  return
711
711
 
712
- self.metadata.sourcetableconfiguration.set_or_update_columns_metadata(
712
+ self.data_node_storage.sourcetableconfiguration.set_or_update_columns_metadata(
713
713
  columns_metadata=columns_metadata)
714
714
 
715
715
  def set_table_metadata(self,
@@ -722,7 +722,7 @@ class PersistManager:
722
722
  including its description, frequency, and associated assets, based on the
723
723
  configuration returned by `_get_time_series_meta_details`.
724
724
  """
725
- if not (self.metadata):
725
+ if not (self.data_node_storage):
726
726
  self.logger.warning("metadata not set")
727
727
  return
728
728
 
@@ -731,15 +731,15 @@ class PersistManager:
731
731
  return
732
732
 
733
733
  # 2. Get or create the MarketsTimeSeriesDetails object in the backend.
734
- source_table_id = self.metadata.patch(**table_metadata.model_dump())
734
+ source_table_id = self.data_node_storage.patch(**table_metadata.model_dump())
735
735
 
736
736
  def delete_table(self) -> None:
737
737
  if self.data_source.related_resource.class_type == "duck_db":
738
738
  from mainsequence.client.data_sources_interfaces.duckdb import DuckDBInterface
739
739
  db_interface = DuckDBInterface()
740
- db_interface.drop_table(self.metadata.storage_hash)
740
+ db_interface.drop_table(self.data_node_storage.storage_hash)
741
741
 
742
- self.metadata.delete()
742
+ self.data_node_storage.delete()
743
743
 
744
744
  @tracer.start_as_current_span("TS: Persist Data")
745
745
  def persist_updated_data(self,
@@ -760,7 +760,7 @@ class PersistManager:
760
760
  if overwrite == True:
761
761
  self.logger.warning(f"Values will be overwritten")
762
762
 
763
- self._local_metadata_cached = self.local_metadata.upsert_data_into_table(
763
+ self._data_node_update_cached = self.data_node_update.upsert_data_into_table(
764
764
  data=temp_df,
765
765
  data_source=self.data_source,
766
766
 
@@ -780,17 +780,17 @@ class PersistManager:
780
780
  Returns:
781
781
  A UpdateStatistics object with the latest statistics.
782
782
  """
783
- if isinstance(self.metadata, int):
784
- self.set_local_metadata_lazy(force_registry=True, include_relations_detail=True)
783
+ if isinstance(self.data_node_storage, int):
784
+ self.set_data_node_update_lazy(force_registry=True, include_relations_detail=True)
785
785
 
786
- if self.metadata.sourcetableconfiguration is None:
786
+ if self.data_node_storage.sourcetableconfiguration is None:
787
787
  return ms_client.UpdateStatistics()
788
788
 
789
- update_stats = self.metadata.sourcetableconfiguration.get_data_updates()
789
+ update_stats = self.data_node_storage.sourcetableconfiguration.get_data_updates()
790
790
  return update_stats
791
791
 
792
792
  def is_local_relation_tree_set(self) -> bool:
793
- return self.local_metadata.ogm_dependencies_linked
793
+ return self.data_node_update.ogm_dependencies_linked
794
794
 
795
795
 
796
796
 
@@ -806,7 +806,7 @@ class TimeScaleLocalPersistManager(PersistManager):
806
806
  Main Controler to interacti with backend
807
807
  """
808
808
  def get_table_schema(self,table_name):
809
- return self.metadata["sourcetableconfiguration"]["column_dtypes_map"]
809
+ return self.data_node_storage["sourcetableconfiguration"]["column_dtypes_map"]
810
810
 
811
811
 
812
812