omnata-plugin-runtime 0.5.10a162__py3-none-any.whl → 0.6.0a163__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- omnata_plugin_runtime/omnata_plugin.py +56 -65
- omnata_plugin_runtime/plugin_entrypoints.py +1 -1
- omnata_plugin_runtime/rate_limiting.py +1 -1
- {omnata_plugin_runtime-0.5.10a162.dist-info → omnata_plugin_runtime-0.6.0a163.dist-info}/METADATA +1 -1
- omnata_plugin_runtime-0.6.0a163.dist-info/RECORD +12 -0
- omnata_plugin_runtime-0.5.10a162.dist-info/RECORD +0 -12
- {omnata_plugin_runtime-0.5.10a162.dist-info → omnata_plugin_runtime-0.6.0a163.dist-info}/LICENSE +0 -0
- {omnata_plugin_runtime-0.5.10a162.dist-info → omnata_plugin_runtime-0.6.0a163.dist-info}/WHEEL +0 -0
@@ -343,7 +343,7 @@ class SyncRequest(ABC):
|
|
343
343
|
self._thread_exception_thrown = args
|
344
344
|
logger.error("Thread exception", exc_info=True)
|
345
345
|
self._thread_cancellation_token.set() # this will tell the other threads to stop working
|
346
|
-
logger.
|
346
|
+
logger.debug(
|
347
347
|
f"thread_cancellation_token: {self._thread_cancellation_token.is_set()}"
|
348
348
|
)
|
349
349
|
|
@@ -384,7 +384,7 @@ class SyncRequest(ABC):
|
|
384
384
|
Designed to be run in a thread, this method polls the results every 20 seconds and sends them back to Snowflake.
|
385
385
|
"""
|
386
386
|
while not cancellation_token.is_set():
|
387
|
-
logger.
|
387
|
+
logger.debug("apply results worker checking for results")
|
388
388
|
self.apply_results_queue()
|
389
389
|
cancellation_token.wait(20)
|
390
390
|
logger.info("apply results worker exiting")
|
@@ -411,7 +411,7 @@ class SyncRequest(ABC):
|
|
411
411
|
workers, but that meant it's not being checked when the plugin doesn't use those decorators.
|
412
412
|
"""
|
413
413
|
while not cancellation_token.is_set():
|
414
|
-
logger.
|
414
|
+
logger.debug("cancel checking worker checking for cancellation")
|
415
415
|
if (datetime.datetime.now(datetime.timezone.utc) > self._run_deadline): # pylint: disable=protected-access
|
416
416
|
# if we've reached the deadline for the run, end it
|
417
417
|
self.deadline_reached = True
|
@@ -454,7 +454,7 @@ class SyncRequest(ABC):
|
|
454
454
|
api_limits_for_category = [x for x in self.api_limits if x.endpoint_category == endpoint_category]
|
455
455
|
if len(api_limits_for_category) > 0:
|
456
456
|
self.rate_limit_state_this_sync_and_branch[endpoint_category].prune_history(api_limits_for_category[0].request_rates)
|
457
|
-
logger.
|
457
|
+
logger.debug(f"Updating rate limit state for sync {self._run_id}")
|
458
458
|
update_rate_limit_result = self._plugin_message(
|
459
459
|
PluginMessageRateLimitState(rate_limit_state=self.rate_limit_state_this_sync_and_branch)
|
460
460
|
)
|
@@ -466,7 +466,7 @@ class SyncRequest(ABC):
|
|
466
466
|
self.rate_limit_state_all = rate_limit_state_all
|
467
467
|
self.rate_limit_state_this_sync_and_branch = rate_limit_state_this_branch
|
468
468
|
else:
|
469
|
-
logger.
|
469
|
+
logger.debug("No rate limit state to update")
|
470
470
|
|
471
471
|
@abstractmethod
|
472
472
|
def apply_cancellation(self):
|
@@ -512,7 +512,7 @@ class SyncRequest(ABC):
|
|
512
512
|
# if the rate limiting is going to require us to wait past the run deadline, we bail out now
|
513
513
|
raise DeadlineReachedException()
|
514
514
|
time_now = datetime.datetime.now(datetime.timezone.utc)
|
515
|
-
logger.
|
515
|
+
logger.debug(
|
516
516
|
f"calculated wait until date was {wait_until}, comparing to {time_now}"
|
517
517
|
)
|
518
518
|
|
@@ -542,7 +542,7 @@ class SyncRequest(ABC):
|
|
542
542
|
Keep this to a very consise string, like 'Fetching records from API'.
|
543
543
|
Avoid lengthy diagnostic messages, anything like this should be logged the normal way.
|
544
544
|
"""
|
545
|
-
logger.
|
545
|
+
logger.debug(f"Activity update: {current_activity}")
|
546
546
|
return self._plugin_message(
|
547
547
|
PluginMessageCurrentActivity(current_activity=current_activity)
|
548
548
|
)
|
@@ -551,7 +551,7 @@ class SyncRequest(ABC):
|
|
551
551
|
"""
|
552
552
|
Sends a message back to the plugin. This is used to send back the results of a sync run.
|
553
553
|
"""
|
554
|
-
logger.
|
554
|
+
logger.debug(f"Sending plugin message: {message}")
|
555
555
|
with self._snowflake_query_lock:
|
556
556
|
try:
|
557
557
|
# this is not ideal, but "Bind variable in stored procedure is not supported yet"
|
@@ -687,23 +687,22 @@ class OutboundSyncRequest(SyncRequest):
|
|
687
687
|
"""
|
688
688
|
Merges all of the queued results and applies them
|
689
689
|
"""
|
690
|
-
logger.
|
690
|
+
logger.debug("OutboundSyncRequest apply_results_queue")
|
691
691
|
if self._apply_results is not None:
|
692
692
|
with self._apply_results_lock:
|
693
693
|
self._apply_results = [
|
694
694
|
x for x in self._apply_results if x is not None and len(x) > 0
|
695
695
|
] # remove any None/empty dataframes
|
696
696
|
if len(self._apply_results) > 0:
|
697
|
-
logger.
|
697
|
+
logger.debug(
|
698
698
|
f"Applying {len(self._apply_results)} batches of queued results"
|
699
699
|
)
|
700
700
|
# upload all cached apply results
|
701
701
|
all_dfs = pandas.concat(self._apply_results)
|
702
|
-
#logger.info(f"applying: {all_dfs}")
|
703
702
|
self._apply_results_dataframe(all_dfs)
|
704
703
|
self._apply_results.clear()
|
705
704
|
else:
|
706
|
-
logger.
|
705
|
+
logger.debug("No queued results to apply")
|
707
706
|
|
708
707
|
def apply_cancellation(self):
|
709
708
|
"""
|
@@ -729,7 +728,7 @@ class OutboundSyncRequest(SyncRequest):
|
|
729
728
|
"""
|
730
729
|
Adds some results to the queue for applying asynchronously
|
731
730
|
"""
|
732
|
-
logger.
|
731
|
+
logger.debug(f"Enqueueing {len(results)} results for upload")
|
733
732
|
for required_column in ["IDENTIFIER", "RESULT", "SUCCESS"]:
|
734
733
|
if required_column not in results.columns:
|
735
734
|
raise ValueError(
|
@@ -763,7 +762,7 @@ class OutboundSyncRequest(SyncRequest):
|
|
763
762
|
results_df.set_index("IDENTIFIER", inplace=True, drop=False)
|
764
763
|
results_df["APPLY_STATE_DATETIME"] = str(datetime.datetime.now().astimezone())
|
765
764
|
if results_df is not None:
|
766
|
-
logger.
|
765
|
+
logger.debug(
|
767
766
|
f"Applying a queued results dataframe of {len(results_df)} records"
|
768
767
|
)
|
769
768
|
# change the success flag to an appropriate APPLY STATUS
|
@@ -803,7 +802,7 @@ class OutboundSyncRequest(SyncRequest):
|
|
803
802
|
"""
|
804
803
|
Applies results for an outbound sync. This involves merging back onto the record state table
|
805
804
|
"""
|
806
|
-
logger.
|
805
|
+
logger.debug("applying results to table")
|
807
806
|
# use a random table name with a random string to avoid collisions
|
808
807
|
with self._snowflake_query_lock:
|
809
808
|
for attempt in Retrying(stop=stop_after_attempt(30),wait=wait_fixed(2),reraise=True,retry=retry_if_exception_message(match=".*(is being|was) committed.*")):
|
@@ -819,7 +818,7 @@ class OutboundSyncRequest(SyncRequest):
|
|
819
818
|
raise ValueError(
|
820
819
|
f"Failed to write results to table {self._full_results_table_name}"
|
821
820
|
)
|
822
|
-
logger.
|
821
|
+
logger.debug(
|
823
822
|
f"Wrote {nrows} rows and {nchunks} chunks to table {self._full_results_table_name}"
|
824
823
|
)
|
825
824
|
|
@@ -831,12 +830,7 @@ class OutboundSyncRequest(SyncRequest):
|
|
831
830
|
Parses the JSON in the transformed record column (Snowflake passes it as a string).
|
832
831
|
Also when the mapper is a jinja template, renders it.
|
833
832
|
"""
|
834
|
-
|
835
|
-
# logger.info(
|
836
|
-
# "Dataframe wrapper skipping pre-processing as dataframe is None"
|
837
|
-
# )
|
838
|
-
# return None
|
839
|
-
logger.info(
|
833
|
+
logger.debug(
|
840
834
|
f"Dataframe wrapper pre-processing {len(data_frame)} records"
|
841
835
|
)
|
842
836
|
if len(data_frame) > 0:
|
@@ -865,24 +859,24 @@ class OutboundSyncRequest(SyncRequest):
|
|
865
859
|
render_jinja
|
866
860
|
and "jinja_template" in data_frame.iloc[0]["TRANSFORMED_RECORD"]
|
867
861
|
):
|
868
|
-
logger.
|
862
|
+
logger.debug("Rendering jinja template")
|
869
863
|
env = Environment()
|
870
864
|
# examine the plugin instance for jinja_filter decorated methods
|
871
865
|
if self.plugin_instance is not None:
|
872
866
|
for name in dir(self.plugin_instance):
|
873
867
|
member = getattr(self.plugin_instance, name)
|
874
868
|
if callable(member) and hasattr(member, "is_jinja_filter"):
|
875
|
-
logger.
|
869
|
+
logger.debug(f"Adding jinja filter to environment: {name}")
|
876
870
|
env.filters[name] = member
|
877
871
|
|
878
872
|
def do_jinja_render(jinja_env, row_value):
|
879
|
-
logger.
|
873
|
+
logger.debug(f"do_jinja_render: {row_value}")
|
880
874
|
jinja_template = jinja_env.from_string(row_value["jinja_template"])
|
881
875
|
try:
|
882
876
|
rendered_result = jinja_template.render(
|
883
877
|
{"row": row_value["source_record"]}
|
884
878
|
)
|
885
|
-
logger.
|
879
|
+
logger.debug(
|
886
880
|
f"Individual jinja rendering result: {rendered_result}"
|
887
881
|
)
|
888
882
|
return rendered_result
|
@@ -1043,7 +1037,7 @@ class InboundSyncRequest(SyncRequest):
|
|
1043
1037
|
"""
|
1044
1038
|
Merges all of the queued results and applies them
|
1045
1039
|
"""
|
1046
|
-
logger.
|
1040
|
+
logger.debug("InboundSyncRequest apply_results_queue ")
|
1047
1041
|
if self._apply_results is not None:
|
1048
1042
|
with self._apply_results_lock:
|
1049
1043
|
results:List[pandas.DataFrame] = []
|
@@ -1061,12 +1055,11 @@ class InboundSyncRequest(SyncRequest):
|
|
1061
1055
|
results.extend(non_empty_dfs) # remove any None/empty dataframes
|
1062
1056
|
stream_names.append(stream_name)
|
1063
1057
|
if len(results) > 0:
|
1064
|
-
logger.
|
1058
|
+
logger.debug(
|
1065
1059
|
f"Applying {len(results)} batches of queued results"
|
1066
1060
|
)
|
1067
1061
|
# upload all cached apply results
|
1068
1062
|
all_dfs = pandas.concat(results)
|
1069
|
-
#logger.info(f"applying: {all_dfs}")
|
1070
1063
|
self._apply_results_dataframe(stream_names, all_dfs)
|
1071
1064
|
# update the stream state object too
|
1072
1065
|
self._apply_latest_states()
|
@@ -1083,12 +1076,11 @@ class InboundSyncRequest(SyncRequest):
|
|
1083
1076
|
x for x in stream_results if x is not None and len(x) > 0
|
1084
1077
|
])
|
1085
1078
|
if len(results) > 0:
|
1086
|
-
logger.
|
1079
|
+
logger.debug(
|
1087
1080
|
f"Applying {len(results)} batches of queued criteria deletes"
|
1088
1081
|
)
|
1089
1082
|
# upload all cached apply results
|
1090
1083
|
all_dfs = pandas.concat(results)
|
1091
|
-
#logger.info(f"applying: {all_dfs}")
|
1092
1084
|
self._apply_criteria_deletes_dataframe(all_dfs)
|
1093
1085
|
# clear the delete requests
|
1094
1086
|
self._apply_results_criteria_deletes = {}
|
@@ -1191,7 +1183,7 @@ class InboundSyncRequest(SyncRequest):
|
|
1191
1183
|
# first, don't bother if the count is less than 10000, since it's unlikely to be even close
|
1192
1184
|
if combined_length > 10000:
|
1193
1185
|
if sum([x.memory_usage(index=True).sum() for x in all_dfs]) > 200000000:
|
1194
|
-
logger.
|
1186
|
+
logger.debug(f"Applying results queue immediately due to combined dataframe size")
|
1195
1187
|
self.apply_results_queue()
|
1196
1188
|
|
1197
1189
|
def delete_by_criteria(self, stream_name: str, criteria: Dict[str, Any]):
|
@@ -1215,7 +1207,7 @@ class InboundSyncRequest(SyncRequest):
|
|
1215
1207
|
# STREAM_NAME: string
|
1216
1208
|
# DELETE_CRITERIA: object
|
1217
1209
|
with self._apply_results_lock:
|
1218
|
-
logger.
|
1210
|
+
logger.debug(
|
1219
1211
|
f"Enqueuing {len(criteria)} delete criteria for stream {stream_name} for upload"
|
1220
1212
|
)
|
1221
1213
|
existing_results: List[pandas.DataFrame] = []
|
@@ -1237,7 +1229,7 @@ class InboundSyncRequest(SyncRequest):
|
|
1237
1229
|
# first, don't both if the count is less than 10000, since it's unlikely to be even close
|
1238
1230
|
if combined_length > 10000:
|
1239
1231
|
if sum([x.memory_usage(index=True).sum() for x in all_dfs]) > 200000000:
|
1240
|
-
logger.
|
1232
|
+
logger.debug(f"Applying criteria deletes queue immediately due to combined dataframe size")
|
1241
1233
|
self.apply_results_queue()
|
1242
1234
|
|
1243
1235
|
def mark_stream_complete(self, stream_name: str):
|
@@ -1332,7 +1324,7 @@ class InboundSyncRequest(SyncRequest):
|
|
1332
1324
|
raise ValueError(
|
1333
1325
|
f"Cannot preprocess results for stream {stream_name} as its configuration doesn't exist"
|
1334
1326
|
)
|
1335
|
-
logger.
|
1327
|
+
logger.debug(f"preprocessing for stream: {self._streams_dict[stream_name]}")
|
1336
1328
|
if len(results) > 0:
|
1337
1329
|
if isinstance(is_delete, list):
|
1338
1330
|
if len(results) != len(is_delete):
|
@@ -1469,7 +1461,7 @@ class InboundSyncRequest(SyncRequest):
|
|
1469
1461
|
with self._snowflake_query_lock:
|
1470
1462
|
for attempt in Retrying(stop=stop_after_attempt(30),wait=wait_fixed(2),reraise=True,retry=retry_if_exception_message(match=".*(is being|was) committed.*")):
|
1471
1463
|
with attempt:
|
1472
|
-
logger.
|
1464
|
+
logger.debug(
|
1473
1465
|
f"Applying {len(results_df)} results to {self._full_results_table_name}"
|
1474
1466
|
)
|
1475
1467
|
# try setting parquet engine here, since the engine parameter does not seem to make it through to the write_pandas function
|
@@ -1485,7 +1477,7 @@ class InboundSyncRequest(SyncRequest):
|
|
1485
1477
|
raise ValueError(
|
1486
1478
|
f"Failed to write results to table {self._full_results_table_name}"
|
1487
1479
|
)
|
1488
|
-
logger.
|
1480
|
+
logger.debug(
|
1489
1481
|
f"Wrote {nrows} rows and {nchunks} chunks to table {self._full_results_table_name}"
|
1490
1482
|
)
|
1491
1483
|
# temp tables aren't allowed
|
@@ -1498,7 +1490,7 @@ class InboundSyncRequest(SyncRequest):
|
|
1498
1490
|
for stream_name in stream_names:
|
1499
1491
|
self._results_exist[stream_name] = True
|
1500
1492
|
else:
|
1501
|
-
logger.
|
1493
|
+
logger.debug("Results dataframe is empty, not applying")
|
1502
1494
|
|
1503
1495
|
def _apply_latest_states(self):
|
1504
1496
|
"""
|
@@ -1516,7 +1508,7 @@ class InboundSyncRequest(SyncRequest):
|
|
1516
1508
|
with self._snowflake_query_lock:
|
1517
1509
|
for attempt in Retrying(stop=stop_after_attempt(30),wait=wait_fixed(2),reraise=True,retry=retry_if_exception_message(match=".*(is being|was) committed.*")):
|
1518
1510
|
with attempt:
|
1519
|
-
logger.
|
1511
|
+
logger.debug(
|
1520
1512
|
f"Applying {len(results_df)} criteria deletes to {self._criteria_deletes_table_name}"
|
1521
1513
|
)
|
1522
1514
|
# try setting parquet engine here, since the engine parameter does not seem to make it through to the write_pandas function
|
@@ -1531,12 +1523,12 @@ class InboundSyncRequest(SyncRequest):
|
|
1531
1523
|
raise ValueError(
|
1532
1524
|
f"Failed to write results to table {self._criteria_deletes_table_name}"
|
1533
1525
|
)
|
1534
|
-
logger.
|
1526
|
+
logger.debug(
|
1535
1527
|
f"Wrote {nrows} rows and {nchunks} chunks to table {self._criteria_deletes_table_name}"
|
1536
1528
|
)
|
1537
1529
|
return
|
1538
1530
|
else:
|
1539
|
-
logger.
|
1531
|
+
logger.debug("Results dataframe is empty, not applying")
|
1540
1532
|
|
1541
1533
|
|
1542
1534
|
class ConnectResponse(SubscriptableBaseModel):
|
@@ -1870,7 +1862,7 @@ class FixedSizeGenerator:
|
|
1870
1862
|
self.generator = generator
|
1871
1863
|
# handle dataframe as well as a dataframe generator, just to be more flexible
|
1872
1864
|
if self.generator.__class__.__name__ == "DataFrame":
|
1873
|
-
logger.
|
1865
|
+
logger.debug(
|
1874
1866
|
f"Wrapping a dataframe of length {len(self.generator)} in a map so it acts as a generator"
|
1875
1867
|
)
|
1876
1868
|
self.generator = map(lambda x: x, [self.generator])
|
@@ -1880,14 +1872,14 @@ class FixedSizeGenerator:
|
|
1880
1872
|
|
1881
1873
|
def __next__(self):
|
1882
1874
|
with self.thread_lock:
|
1883
|
-
logger.
|
1875
|
+
logger.debug(f"initial leftovers: {self.leftovers}")
|
1884
1876
|
records_df = self.leftovers
|
1885
1877
|
self.leftovers = None
|
1886
1878
|
try:
|
1887
1879
|
# build up a dataframe until we reach the batch size
|
1888
1880
|
while records_df is None or len(records_df) < self.batch_size:
|
1889
1881
|
current_count = 0 if records_df is None else len(records_df)
|
1890
|
-
logger.
|
1882
|
+
logger.debug(
|
1891
1883
|
f"fetching another dataframe from the generator, got {current_count} out of a desired {self.batch_size}"
|
1892
1884
|
)
|
1893
1885
|
next_df = next(self.generator)
|
@@ -1901,32 +1893,31 @@ class FixedSizeGenerator:
|
|
1901
1893
|
f"Dataframe generator provided an unexpected object, type {next_df.__class__.__name__}"
|
1902
1894
|
)
|
1903
1895
|
if next_df is None and records_df is None:
|
1904
|
-
logger.
|
1896
|
+
logger.debug(
|
1905
1897
|
"Original and next dataframes were None, returning None"
|
1906
1898
|
)
|
1907
1899
|
return None
|
1908
1900
|
records_df = pandas.concat([records_df, next_df])
|
1909
|
-
logger.
|
1901
|
+
logger.debug(
|
1910
1902
|
f"after concatenation, dataframe has {len(records_df)} records"
|
1911
1903
|
)
|
1912
1904
|
except StopIteration:
|
1913
|
-
logger.
|
1905
|
+
logger.debug("FixedSizeGenerator consumed the last pandas batch")
|
1914
1906
|
|
1915
1907
|
if records_df is None:
|
1916
|
-
logger.
|
1908
|
+
logger.debug("No records left, returning None")
|
1917
1909
|
return None
|
1918
1910
|
elif records_df is not None and len(records_df) > self.batch_size:
|
1919
|
-
logger.
|
1911
|
+
logger.debug(
|
1920
1912
|
f"putting {len(records_df[self.batch_size:])} records back ({len(records_df)} > {self.batch_size})"
|
1921
1913
|
)
|
1922
1914
|
self.leftovers = records_df[self.batch_size :].reset_index(drop=True)
|
1923
1915
|
records_df = records_df[0 : self.batch_size].reset_index(drop=True)
|
1924
1916
|
else:
|
1925
1917
|
current_count = 0 if records_df is None else len(records_df)
|
1926
|
-
logger.
|
1918
|
+
logger.debug(
|
1927
1919
|
f"{current_count} records does not exceed batch size, not putting any back"
|
1928
1920
|
)
|
1929
|
-
#logger.info(f"FixedSizeGenerator about to return dataframe {records_df}")
|
1930
1921
|
return records_df
|
1931
1922
|
|
1932
1923
|
def __iter__(self):
|
@@ -1948,7 +1939,7 @@ def __managed_outbound_processing_worker(
|
|
1948
1939
|
Consumes a fixed sized set of records by passing them to the wrapped function,
|
1949
1940
|
while adhering to the defined API constraints.
|
1950
1941
|
"""
|
1951
|
-
logger.
|
1942
|
+
logger.debug(
|
1952
1943
|
f"worker {worker_index} processing. Cancelled: {cancellation_token.is_set()}"
|
1953
1944
|
)
|
1954
1945
|
while not cancellation_token.is_set():
|
@@ -1959,13 +1950,13 @@ def __managed_outbound_processing_worker(
|
|
1959
1950
|
records_df = next(dataframe_generator)
|
1960
1951
|
#logger.info(f"records returned from dataframe generator: {records_df}")
|
1961
1952
|
if records_df is None:
|
1962
|
-
logger.
|
1953
|
+
logger.debug(f"worker {worker_index} has no records left to process")
|
1963
1954
|
return
|
1964
1955
|
elif len(records_df) == 0:
|
1965
|
-
logger.
|
1956
|
+
logger.debug(f"worker {worker_index} has 0 records left to process")
|
1966
1957
|
return
|
1967
1958
|
|
1968
|
-
logger.
|
1959
|
+
logger.debug(
|
1969
1960
|
f"worker {worker_index} fetched {len(records_df)} records for processing"
|
1970
1961
|
)
|
1971
1962
|
# threads block while waiting for their allocation of records, it's possible there's been
|
@@ -1975,7 +1966,7 @@ def __managed_outbound_processing_worker(
|
|
1975
1966
|
f"worker {worker_index} exiting before applying records, due to cancellation"
|
1976
1967
|
)
|
1977
1968
|
return
|
1978
|
-
logger.
|
1969
|
+
logger.debug(f"worker {worker_index} processing {len(records_df)} records")
|
1979
1970
|
# restore the first argument, was originally the dataframe/generator but now it's the appropriately sized dataframe
|
1980
1971
|
try:
|
1981
1972
|
results_df = method(
|
@@ -1988,7 +1979,7 @@ def __managed_outbound_processing_worker(
|
|
1988
1979
|
f"worker {worker_index} interrupted while waiting for rate limiting, exiting"
|
1989
1980
|
)
|
1990
1981
|
return
|
1991
|
-
logger.
|
1982
|
+
logger.debug(
|
1992
1983
|
f"worker {worker_index} received {len(results_df)} results, enqueueing"
|
1993
1984
|
)
|
1994
1985
|
|
@@ -2000,7 +1991,7 @@ def __managed_outbound_processing_worker(
|
|
2000
1991
|
outbound_sync_request.enqueue_results(
|
2001
1992
|
results_df
|
2002
1993
|
) # pylint: disable=protected-access
|
2003
|
-
logger.
|
1994
|
+
logger.debug(
|
2004
1995
|
f"worker {worker_index} enqueueing results"
|
2005
1996
|
)
|
2006
1997
|
|
@@ -2058,7 +2049,7 @@ def managed_outbound_processing(concurrency: int, batch_size: int):
|
|
2058
2049
|
# put the record iterator on the queue, ready for the first task to read it
|
2059
2050
|
fixed_size_generator = FixedSizeGenerator(dataframe_arg, batch_size=batch_size)
|
2060
2051
|
tasks:List[threading.Thread] = []
|
2061
|
-
logger.
|
2052
|
+
logger.debug(f"Creating {concurrency} worker(s) for applying records")
|
2062
2053
|
# just in case
|
2063
2054
|
threading.excepthook = self._sync_request.thread_exception_hook
|
2064
2055
|
for i in range(concurrency):
|
@@ -2132,17 +2123,17 @@ def __managed_inbound_processing_worker(
|
|
2132
2123
|
"""
|
2133
2124
|
while not cancellation_token.is_set():
|
2134
2125
|
# Get our generator object out of the queue
|
2135
|
-
logger.
|
2126
|
+
logger.debug(
|
2136
2127
|
f"worker {worker_index} processing. Cancelled: {cancellation_token.is_set()}. Method args: {len(method_args)}. Method kwargs: {len(method_kwargs.keys())} ({','.join(method_kwargs.keys())})"
|
2137
2128
|
)
|
2138
2129
|
try:
|
2139
2130
|
stream: StoredStreamConfiguration = streams_queue.get_nowait()
|
2140
|
-
logger.
|
2131
|
+
logger.debug(f"stream returned from queue: {stream}")
|
2141
2132
|
# restore the first argument, was originally the dataframe/generator but now it's the appropriately sized dataframe
|
2142
2133
|
try:
|
2143
|
-
logger.
|
2134
|
+
logger.debug(f"worker {worker_index} processing stream {stream.stream_name}, invoking plugin class method {method.__name__}")
|
2144
2135
|
result = method(plugin_class_obj, *(stream, *method_args), **method_kwargs)
|
2145
|
-
logger.
|
2136
|
+
logger.debug(f"worker {worker_index} completed processing stream {stream.stream_name}")
|
2146
2137
|
if result is not None and result is False:
|
2147
2138
|
logger.info(f"worker {worker_index} requested that {stream.stream_name} be not marked as complete")
|
2148
2139
|
else:
|
@@ -2168,7 +2159,7 @@ def __managed_inbound_processing_worker(
|
|
2168
2159
|
exc_info=True,
|
2169
2160
|
extra={'stream_name':stream.stream_name})
|
2170
2161
|
except queue.Empty:
|
2171
|
-
logger.
|
2162
|
+
logger.debug("streams queue is empty")
|
2172
2163
|
return
|
2173
2164
|
|
2174
2165
|
|
@@ -2196,7 +2187,7 @@ def managed_inbound_processing(concurrency: int):
|
|
2196
2187
|
raise ValueError(
|
2197
2188
|
"To use the managed_inbound_processing decorator, you must attach an apply request to the plugin instance (via the _sync_request property)"
|
2198
2189
|
)
|
2199
|
-
logger.
|
2190
|
+
logger.debug(f"managed_inbound_processing invoked with {len(method_args)} positional arguments and {len(method_kwargs)} named arguments ({','.join(method_kwargs.keys())})")
|
2200
2191
|
if self._sync_request.development_mode is True:
|
2201
2192
|
concurrency_to_use = 1 # disable concurrency when running in development mode, it interferes with pyvcr
|
2202
2193
|
else:
|
@@ -322,7 +322,7 @@ class PluginEntrypoint:
|
|
322
322
|
sync_parameters: Dict,
|
323
323
|
selected_streams: Optional[List[str]], # None to return all streams without requiring schema
|
324
324
|
):
|
325
|
-
logger.
|
325
|
+
logger.debug("Entered list_streams method")
|
326
326
|
oauth_secret_name = normalise_nulls(oauth_secret_name)
|
327
327
|
other_secrets_name = normalise_nulls(other_secrets_name)
|
328
328
|
connection_secrets = get_secrets(oauth_secret_name, other_secrets_name)
|
@@ -122,7 +122,7 @@ class ApiLimits(SubscriptableBaseModel):
|
|
122
122
|
The resulting timestamp is when the next request can be made (if it's in the past, it can be done immediately)
|
123
123
|
If multiple rate limits exist, the maximum timestamp is used (i.e. the most restrictive rate limit applies)
|
124
124
|
"""
|
125
|
-
logger.
|
125
|
+
logger.debug(
|
126
126
|
f"calculating wait time, given previous requests as {rate_limit_state.previous_request_timestamps}"
|
127
127
|
)
|
128
128
|
if self.request_rates is None:
|
@@ -0,0 +1,12 @@
|
|
1
|
+
omnata_plugin_runtime/__init__.py,sha256=MS9d1whnfT_B3-ThqZ7l63QeC_8OEKTuaYV5wTwRpBA,1576
|
2
|
+
omnata_plugin_runtime/api.py,sha256=FxzTqri4no8ClkOm7vZADG8aD47jcGBCTTQDEORmOJM,6326
|
3
|
+
omnata_plugin_runtime/configuration.py,sha256=TI6GaVFhewVawBCaYN34GujY57qEP6q2nik4YpSEk5s,38100
|
4
|
+
omnata_plugin_runtime/forms.py,sha256=GzSPEwcijsoPCXEO1mHiE8ylvX_KSE5TkhwqkymA2Ss,19755
|
5
|
+
omnata_plugin_runtime/logging.py,sha256=bn7eKoNWvtuyTk7RTwBS9UARMtqkiICtgMtzq3KA2V0,3272
|
6
|
+
omnata_plugin_runtime/omnata_plugin.py,sha256=_vcDgenjpbSyUc4LVdwjCG_iZbJ815GM12lapidAchs,128574
|
7
|
+
omnata_plugin_runtime/plugin_entrypoints.py,sha256=-mkIpfB_pTl1yDBMmTnioW44RPj-V8dvlhBiU7ekvkQ,27976
|
8
|
+
omnata_plugin_runtime/rate_limiting.py,sha256=JukA0l7x7Klqz2b54mR-poP7NRxpUHgWSGp6h0B8u6Q,25612
|
9
|
+
omnata_plugin_runtime-0.6.0a163.dist-info/LICENSE,sha256=IMF9i4xIpgCADf0U-V1cuf9HBmqWQd3qtI3FSuyW4zE,26526
|
10
|
+
omnata_plugin_runtime-0.6.0a163.dist-info/METADATA,sha256=IkxT_gXso4RzRQraD302cEH2sxzpGVMwLmIAhgeRLyk,1985
|
11
|
+
omnata_plugin_runtime-0.6.0a163.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
12
|
+
omnata_plugin_runtime-0.6.0a163.dist-info/RECORD,,
|
@@ -1,12 +0,0 @@
|
|
1
|
-
omnata_plugin_runtime/__init__.py,sha256=MS9d1whnfT_B3-ThqZ7l63QeC_8OEKTuaYV5wTwRpBA,1576
|
2
|
-
omnata_plugin_runtime/api.py,sha256=FxzTqri4no8ClkOm7vZADG8aD47jcGBCTTQDEORmOJM,6326
|
3
|
-
omnata_plugin_runtime/configuration.py,sha256=TI6GaVFhewVawBCaYN34GujY57qEP6q2nik4YpSEk5s,38100
|
4
|
-
omnata_plugin_runtime/forms.py,sha256=GzSPEwcijsoPCXEO1mHiE8ylvX_KSE5TkhwqkymA2Ss,19755
|
5
|
-
omnata_plugin_runtime/logging.py,sha256=bn7eKoNWvtuyTk7RTwBS9UARMtqkiICtgMtzq3KA2V0,3272
|
6
|
-
omnata_plugin_runtime/omnata_plugin.py,sha256=PDPaUHbYuhbWfY3xj0g__EejO9bOGv7xt3weuUDM_bo,128957
|
7
|
-
omnata_plugin_runtime/plugin_entrypoints.py,sha256=PFSLsYEVnWHVvSoOYTtTK2JY6pp6_8_eYP53WqLRiPE,27975
|
8
|
-
omnata_plugin_runtime/rate_limiting.py,sha256=DVQ_bc-mVLBkrU1PTns1MWXhHiLpSB5HkWCcdePtJ2A,25611
|
9
|
-
omnata_plugin_runtime-0.5.10a162.dist-info/LICENSE,sha256=IMF9i4xIpgCADf0U-V1cuf9HBmqWQd3qtI3FSuyW4zE,26526
|
10
|
-
omnata_plugin_runtime-0.5.10a162.dist-info/METADATA,sha256=YPmxPXisjonM8SbFPgoWzzugZca9J-dp99CPJn98wZo,1986
|
11
|
-
omnata_plugin_runtime-0.5.10a162.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
12
|
-
omnata_plugin_runtime-0.5.10a162.dist-info/RECORD,,
|
{omnata_plugin_runtime-0.5.10a162.dist-info → omnata_plugin_runtime-0.6.0a163.dist-info}/LICENSE
RENAMED
File without changes
|
{omnata_plugin_runtime-0.5.10a162.dist-info → omnata_plugin_runtime-0.6.0a163.dist-info}/WHEEL
RENAMED
File without changes
|