omnata-plugin-runtime 0.4.6a102__tar.gz → 0.4.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: omnata-plugin-runtime
3
- Version: 0.4.6a102
3
+ Version: 0.4.7
4
4
  Summary: Classes and common runtime components for building and running Omnata Plugins
5
5
  Author: James Weakley
6
6
  Author-email: james.weakley@omnata.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "omnata-plugin-runtime"
3
- version = "0.4.6-a102"
3
+ version = "0.4.7"
4
4
  description = "Classes and common runtime components for building and running Omnata Plugins"
5
5
  authors = ["James Weakley <james.weakley@omnata.com>"]
6
6
  readme = "README.md"
@@ -706,7 +706,7 @@ class OutboundSyncRequest(SyncRequest):
706
706
  logger.info("applying results to table")
707
707
  # use a random table name with a random string to avoid collisions
708
708
  with self._snowflake_query_lock:
709
- for attempt in Retrying(stop=stop_after_attempt(30),wait=wait_fixed(2),reraise=True,retry=retry_if_exception_message(match=".*is being committed.*")):
709
+ for attempt in Retrying(stop=stop_after_attempt(30),wait=wait_fixed(2),reraise=True,retry=retry_if_exception_message(match=".*(is being|was) committed.*")):
710
710
  with attempt:
711
711
  success, nchunks, nrows, _ = write_pandas(
712
712
  conn=self._session._conn._cursor.connection, # pylint: disable=protected-access
@@ -1345,7 +1345,7 @@ class InboundSyncRequest(SyncRequest):
1345
1345
  """
1346
1346
  if len(results_df) > 0:
1347
1347
  with self._snowflake_query_lock:
1348
- for attempt in Retrying(stop=stop_after_attempt(30),wait=wait_fixed(2),reraise=True,retry=retry_if_exception_message(match=".*is being committed.*")):
1348
+ for attempt in Retrying(stop=stop_after_attempt(30),wait=wait_fixed(2),reraise=True,retry=retry_if_exception_message(match=".*(is being|was) committed.*")):
1349
1349
  with attempt:
1350
1350
  logger.info(
1351
1351
  f"Applying {len(results_df)} results to {self._full_results_table_name}"
@@ -1392,7 +1392,7 @@ class InboundSyncRequest(SyncRequest):
1392
1392
  """
1393
1393
  if len(results_df) > 0:
1394
1394
  with self._snowflake_query_lock:
1395
- for attempt in Retrying(stop=stop_after_attempt(30),wait=wait_fixed(2),reraise=True,retry=retry_if_exception_message(match=".*is being committed.*")):
1395
+ for attempt in Retrying(stop=stop_after_attempt(30),wait=wait_fixed(2),reraise=True,retry=retry_if_exception_message(match=".*(is being|was) committed.*")):
1396
1396
  with attempt:
1397
1397
  logger.info(
1398
1398
  f"Applying {len(results_df)} criteria deletes to {self._criteria_deletes_table_name}"
@@ -2097,50 +2097,61 @@ def managed_inbound_processing(concurrency: int):
2097
2097
 
2098
2098
  tasks:List[threading.Thread] = []
2099
2099
  logger.info(f"Creating {concurrency_to_use} worker(s) for retrieving records")
2100
-
2101
- for i in range(concurrency_to_use):
2102
- # the dataframe/generator was put on the queue, so we remove it from the method args
2103
- task = threading.Thread(
2104
- target=__managed_inbound_processing_worker,
2105
- name=f"managed_inbound_processing_worker_{i}",
2106
- args=(
2107
- self,
2108
- method,
2109
- i,
2110
- streams_queue,
2111
- self._sync_request._thread_cancellation_token,
2112
- method_args,
2113
- method_kwargs,
2114
- ),
2100
+ # if concurrency is set to 1, we don't need to use threads at all
2101
+ if concurrency_to_use == 1:
2102
+ __managed_inbound_processing_worker(
2103
+ self,
2104
+ method,
2105
+ 0,
2106
+ streams_queue,
2107
+ self._sync_request._thread_cancellation_token,
2108
+ method_args,
2109
+ method_kwargs,
2115
2110
  )
2116
- tasks.append(task)
2117
- task.start()
2118
-
2119
- # wait for workers to finish
2120
- while tasks:
2121
- for task in tasks[:]: # shallow copy so we can remove items from the list while iterating
2122
- if not task.is_alive():
2123
- task.join() # Ensure the thread is fully finished
2124
- tasks.remove(task)
2125
- logger.info(f"Thread {task.name} has completed processing")
2126
- time.sleep(1) # Avoid busy waiting
2127
- logger.info("All workers completed processing")
2128
-
2129
- # it's possible that some records weren't applied, since they are processed asynchronously on a timer
2130
- #if self._sync_request.development_mode is False:
2131
- # self._sync_request.apply_results_queue()
2132
- #self._sync_request._thread_cancellation_token.set()
2133
- ## the thread cancellation should be detected by the apply results tasks, so it finishes gracefully
2134
- #if (
2135
- # self._sync_request.development_mode is False
2136
- # and self._sync_request._apply_results_task is not None
2137
- #):
2138
- # self._sync_request._apply_results_task.join()
2139
- if self._sync_request._thread_exception_thrown:
2140
- logger.info("Raising thread exception")
2141
- raise self._sync_request._thread_exception_thrown.exc_value
2142
2111
  else:
2143
- logger.info("No thread exception thrown")
2112
+ for i in range(concurrency_to_use):
2113
+ # the dataframe/generator was put on the queue, so we remove it from the method args
2114
+ task = threading.Thread(
2115
+ target=__managed_inbound_processing_worker,
2116
+ name=f"managed_inbound_processing_worker_{i}",
2117
+ args=(
2118
+ self,
2119
+ method,
2120
+ i,
2121
+ streams_queue,
2122
+ self._sync_request._thread_cancellation_token,
2123
+ method_args,
2124
+ method_kwargs,
2125
+ ),
2126
+ )
2127
+ tasks.append(task)
2128
+ task.start()
2129
+
2130
+ # wait for workers to finish
2131
+ while tasks:
2132
+ for task in tasks[:]: # shallow copy so we can remove items from the list while iterating
2133
+ if not task.is_alive():
2134
+ task.join() # Ensure the thread is fully finished
2135
+ tasks.remove(task)
2136
+ logger.info(f"Thread {task.name} has completed processing")
2137
+ time.sleep(1) # Avoid busy waiting
2138
+ logger.info("All workers completed processing")
2139
+
2140
+ # it's possible that some records weren't applied, since they are processed asynchronously on a timer
2141
+ #if self._sync_request.development_mode is False:
2142
+ # self._sync_request.apply_results_queue()
2143
+ #self._sync_request._thread_cancellation_token.set()
2144
+ ## the thread cancellation should be detected by the apply results tasks, so it finishes gracefully
2145
+ #if (
2146
+ # self._sync_request.development_mode is False
2147
+ # and self._sync_request._apply_results_task is not None
2148
+ #):
2149
+ # self._sync_request._apply_results_task.join()
2150
+ if self._sync_request._thread_exception_thrown:
2151
+ logger.info("Raising thread exception")
2152
+ raise self._sync_request._thread_exception_thrown.exc_value
2153
+ else:
2154
+ logger.info("No thread exception thrown")
2144
2155
  logger.info("Main managed_inbound_processing thread completing")
2145
2156
  return
2146
2157
 
@@ -354,10 +354,15 @@ class RetryWithLogging(Retry):
354
354
  """
355
355
  Adding extra logs before making a retry request
356
356
  """
357
- def __init__(self, *args: Any, thread_cancellation_token:threading.Event, **kwargs: Any) -> Any:
358
- self.thread_cancellation_token = thread_cancellation_token
357
+ def __init__(self, *args: Any, **kwargs: Any) -> Any:
358
+ self.thread_cancellation_token:Optional[threading.Event] = None
359
359
  return super().__init__(*args, **kwargs)
360
360
 
361
+ def new(self, **kw):
362
+ new_retry = super().new(**kw)
363
+ new_retry.thread_cancellation_token = self.thread_cancellation_token
364
+ return new_retry
365
+
361
366
  def sleep_for_retry(self, response=None):
362
367
  retry_after = self.get_retry_after(response)
363
368
  if retry_after:
@@ -406,13 +411,13 @@ class RateLimitedSession(requests.Session):
406
411
  self.statuses_to_include = statuses_to_include
407
412
 
408
413
  retry_strategy = RetryWithLogging(
409
- thread_cancellation_token=thread_cancellation_token,
410
414
  total=max_retries,
411
415
  backoff_factor=backoff_factor,
412
416
  status_forcelist=statuses_to_include,
413
417
  allowed_methods=["HEAD", "GET", "OPTIONS", "POST", "PUT", "DELETE"],
414
418
  respect_retry_after_header=respect_retry_after_header
415
419
  )
420
+ retry_strategy.thread_cancellation_token = thread_cancellation_token
416
421
  adapter = HTTPAdapter(max_retries=retry_strategy)
417
422
  self.mount("https://", adapter)
418
423
  self.mount("http://", adapter)