omnata-plugin-runtime 0.4.4__tar.gz → 0.4.5a100__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: omnata-plugin-runtime
3
- Version: 0.4.4
3
+ Version: 0.4.5a100
4
4
  Summary: Classes and common runtime components for building and running Omnata Plugins
5
5
  Author: James Weakley
6
6
  Author-email: james.weakley@omnata.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "omnata-plugin-runtime"
3
- version = "0.4.4"
3
+ version = "0.4.5-a100"
4
4
  description = "Classes and common runtime components for building and running Omnata Plugins"
5
5
  authors = ["James Weakley <james.weakley@omnata.com>"]
6
6
  readme = "README.md"
@@ -12,7 +12,7 @@ from pydantic import BaseModel, parse_obj_as # pylint: disable=no-name-in-modul
12
12
  import pydantic.json
13
13
  from snowflake.snowpark import Session
14
14
 
15
- from .api import SyncRequestPayload, handle_proc_result
15
+ from .api import PluginMessageStreamProgressUpdate, SyncRequestPayload, handle_proc_result
16
16
  from .configuration import (
17
17
  ConnectionConfigurationParameters,
18
18
  InboundSyncConfigurationParameters,
@@ -218,11 +218,22 @@ class PluginEntrypoint:
218
218
  inbound_sync_request.update_activity("Staging remaining records")
219
219
  logger.info("Calling apply_results_queue")
220
220
  inbound_sync_request.apply_results_queue()
221
- logger.info("Calling apply_rate_limit_state")
222
- inbound_sync_request.apply_rate_limit_state()
223
- logger.info("Calling apply_progress_updates")
224
- # we can't ignore errors here, because the sync engine needs to get the progress updates before we return
225
- inbound_sync_request.apply_progress_updates(ignore_errors=False)
221
+ try:
222
+ # this is not critical, we wouldn't fail the sync over rate limit usage capture
223
+ logger.info("Calling apply_rate_limit_state")
224
+ inbound_sync_request.apply_rate_limit_state()
225
+ except Exception as e:
226
+ logger.error(f"Error applying rate limit state: {str(e)}")
227
+ # here we used to do a final inbound_sync_request.apply_progress_updates(ignore_errors=False)
228
+ # but it was erroring too much since there was usually a lot of DDL activity on the Snowflake side
229
+ # so instead, we'll provide a final progress update via a return value from the proc
230
+ final_progress_update = PluginMessageStreamProgressUpdate(
231
+ stream_total_counts=inbound_sync_request._stream_record_counts,
232
+ completed_streams=inbound_sync_request._completed_streams,
233
+ stream_errors=omnata_log_handler.stream_global_errors,
234
+ total_records_estimate=inbound_sync_request._total_records_estimate
235
+ )
236
+ return_dict["final_progress_update"] = final_progress_update.dict()
226
237
  if inbound_sync_request.deadline_reached:
227
238
  # if we actually hit the deadline, this is flagged by the cancellation checking worker and the cancellation
228
239
  # token is set. We throw it here as an error since that's currently how it flows back to the engine with a DELAYED state
@@ -239,11 +250,6 @@ class PluginEntrypoint:
239
250
  inbound_sync_request._rate_limit_update_task.join() # pylint: disable=protected-access
240
251
  except Exception as e:
241
252
  logger.error(f"Error cleaning up threading: {str(e)}")
242
- return_dict["errored_streams"] = list(
243
- omnata_log_handler.stream_global_errors.keys()
244
- )
245
- # we need to calculate counts for:
246
- # CHANGED_COUNT by counting up the records in INBOUND_STREAM_RECORD_COUNTS
247
253
  logger.info("Finished applying records")
248
254
  return return_dict
249
255