omnata-plugin-runtime 0.3.28a85__tar.gz → 0.4.0__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: omnata-plugin-runtime
3
- Version: 0.3.28a85
3
+ Version: 0.4.0
4
4
  Summary: Classes and common runtime components for building and running Omnata Plugins
5
5
  Author: James Weakley
6
6
  Author-email: james.weakley@omnata.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "omnata-plugin-runtime"
3
- version = "0.3.28-a85"
3
+ version = "0.4.0"
4
4
  description = "Classes and common runtime components for building and running Omnata Plugins"
5
5
  authors = ["James Weakley <james.weakley@omnata.com>"]
6
6
  readme = "README.md"
@@ -66,8 +66,9 @@ class PluginMessageStreamProgressUpdate(BaseModel):
66
66
  message_type: Literal["stream_record_counts"] = "stream_record_counts"
67
67
  stream_total_counts: Dict[str, int]
68
68
  completed_streams: List[str]
69
- # older runtime versions didn't have this, so the sync engine can't expect it
69
+ # older runtime versions didn't have these, so the sync engine can't expect it
70
70
  stream_errors: Optional[Dict[str,str]]
71
+ total_records_estimate: Optional[Dict[str,int]]
71
72
 
72
73
 
73
74
  class PluginMessageCancelledStreams(BaseModel):
@@ -906,6 +906,7 @@ class InboundSyncRequest(SyncRequest):
906
906
  self._stream_record_counts: Dict[str, int] = {
907
907
  stream_name: 0 for stream_name in self._streams_dict.keys()
908
908
  }
909
+ self._total_records_estimate: Optional[Dict[str,int]] = {}
909
910
  self._stream_change_counts: Dict[str, int] = {
910
911
  stream_name: 0 for stream_name in self._streams_dict.keys()
911
912
  }
@@ -933,6 +934,7 @@ class InboundSyncRequest(SyncRequest):
933
934
  ] = self._stream_record_counts[stream_name] + total_length
934
935
  results.extend(non_empty_dfs) # remove any None/empty dataframes
935
936
  stream_names.append(stream_name)
937
+ self._session.sql("begin").collect()
936
938
  if len(results) > 0:
937
939
  logger.info(
938
940
  f"Applying {len(results)} batches of queued results"
@@ -943,6 +945,7 @@ class InboundSyncRequest(SyncRequest):
943
945
  self._apply_results_dataframe(stream_names, all_dfs)
944
946
  # update the stream state object too
945
947
  self._apply_latest_states()
948
+ self._session.sql("commit").collect()
946
949
  for stream_name in stream_names:
947
950
  self._apply_results[stream_name] = None
948
951
  self._apply_results = {}
@@ -975,7 +978,8 @@ class InboundSyncRequest(SyncRequest):
975
978
  new_progress_update = PluginMessageStreamProgressUpdate(
976
979
  stream_total_counts=self._stream_record_counts,
977
980
  completed_streams=self._completed_streams,
978
- stream_errors=self._omnata_log_handler.stream_global_errors
981
+ stream_errors=self._omnata_log_handler.stream_global_errors,
982
+ total_records_estimate=self._total_records_estimate
979
983
  )
980
984
  if self._last_stream_progress_update is None or new_progress_update != self._last_stream_progress_update:
981
985
  self._plugin_message(
@@ -1109,6 +1113,12 @@ class InboundSyncRequest(SyncRequest):
1109
1113
  self._completed_streams.append(stream_name)
1110
1114
  # dedup just in case it's called twice
1111
1115
  self._completed_streams = list(set(self._completed_streams))
1116
+
1117
+ def set_stream_record_count(self, stream_name: str, count: int):
1118
+ """
1119
+ Sets the record count for a stream, used to provide progress updates.
1120
+ """
1121
+ self._stream_record_counts[stream_name] = count
1112
1122
 
1113
1123
  def _enqueue_state(self, stream_name: str, new_state: Any):
1114
1124
  """
@@ -132,19 +132,15 @@ class ApiLimits(SubscriptableBaseModel):
132
132
  ):
133
133
  longest_wait = rate_limit_state.wait_until
134
134
  for request_rate in self.request_rates:
135
- print(request_rate)
136
135
  if rate_limit_state.previous_request_timestamps is not None and len(rate_limit_state.previous_request_timestamps) > 0:
137
136
  previous_request_timestamps = rate_limit_state.get_relevant_history(request_rate)
138
- print(f"previous_request_timestamps: {len(previous_request_timestamps)}")
139
137
  request_index = request_rate.request_count - 1
140
138
  if request_index > len(previous_request_timestamps) - 1:
141
139
  continue # we have not yet made enough requests to hit this rate limit
142
140
  request_index = len(previous_request_timestamps) - 1
143
141
  timestamp_at_horizon = previous_request_timestamps[request_index]
144
- print("timestamp_at_horizon: " + str(timestamp_at_horizon))
145
142
  now = datetime.datetime.now().astimezone(datetime.timezone.utc)
146
143
  seconds_since_horizon = (timestamp_at_horizon - now).total_seconds()
147
- print("seconds_since_horizon: " + str(seconds_since_horizon))
148
144
  next_allowed_request = timestamp_at_horizon + datetime.timedelta(
149
145
  seconds=request_rate.number_of_seconds()
150
146
  )