omnata-plugin-runtime 0.3.26__tar.gz → 0.3.27a80__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: omnata-plugin-runtime
3
- Version: 0.3.26
3
+ Version: 0.3.27a80
4
4
  Summary: Classes and common runtime components for building and running Omnata Plugins
5
5
  Author: James Weakley
6
6
  Author-email: james.weakley@omnata.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "omnata-plugin-runtime"
3
- version = "0.3.26"
3
+ version = "0.3.27-a80"
4
4
  description = "Classes and common runtime components for building and running Omnata Plugins"
5
5
  authors = ["James Weakley <james.weakley@omnata.com>"]
6
6
  readme = "README.md"
@@ -289,7 +289,7 @@ class SyncRequest(ABC):
289
289
  self.deadline_reached = True
290
290
  self.apply_deadline_reached() # pylint: disable=protected-access
291
291
  return
292
-
292
+ is_cancelled:bool = False
293
293
  with self._snowflake_query_lock:
294
294
  try:
295
295
  # this is not ideal, but "Bind variable in stored procedure is not supported yet"
@@ -297,11 +297,11 @@ class SyncRequest(ABC):
297
297
  f"call {self._source_app_name}.API.PLUGIN_CANCELLATION_CHECK({self._run_id})"
298
298
  ).collect()
299
299
  cancellation_result = handle_proc_result(query_result)
300
- is_cancelled: bool = cancellation_result["is_cancelled"]
301
- if is_cancelled:
302
- self.apply_cancellation()
300
+ is_cancelled = cancellation_result["is_cancelled"]
303
301
  except Exception as e:
304
302
  logger.error(f"Error checking cancellation: {e}")
303
+ if is_cancelled:
304
+ self.apply_cancellation()
305
305
  cancellation_token.wait(20)
306
306
  logger.info("cancel checking worker exiting")
307
307
 
@@ -1029,7 +1029,7 @@ class InboundSyncRequest(SyncRequest):
1029
1029
  # flatten
1030
1030
  all_dfs:List[pandas.DataFrame] = [x for sublist in all_df_lists for x in sublist]
1031
1031
  combined_length = sum([len(x) for x in all_dfs])
1032
- # first, don't both if the count is less than 10000, since it's unlikely to be even close
1032
+ # first, don't bother if the count is less than 10000, since it's unlikely to be even close
1033
1033
  if combined_length > 10000:
1034
1034
  if sum([x.memory_usage(index=True).sum() for x in all_dfs]) > 200000000:
1035
1035
  logger.info(f"Applying results queue immediately due to combined dataframe size")