omnata-plugin-runtime 0.12.1__py3-none-any.whl → 0.12.2a347__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -693,6 +693,9 @@ class ConnectionConfigurationParameters(SubscriptableBaseModel):
693
693
  _snowflake: Optional[Any] = PrivateAttr( # or use Any to annotate the type and use Field to initialize
694
694
  default=None
695
695
  )
696
+ _plugin_instance: Optional[Any] = PrivateAttr( # Reference to OmnataPlugin instance for accessing sync_request
697
+ default=None
698
+ )
696
699
 
697
700
  @model_validator(mode='after')
698
701
  def validate_ngrok_tunnel_settings(self) -> Self:
@@ -739,6 +742,22 @@ class ConnectionConfigurationParameters(SubscriptableBaseModel):
739
742
  """
740
743
  if parameter_name=='access_token' and self.access_token_secret_name is not None:
741
744
  import _snowflake # pylint: disable=import-error, import-outside-toplevel # type: ignore
745
+ from .threading_utils import is_managed_worker_thread
746
+
747
+ # Check if we're in a worker thread using the explicit flag
748
+ # This is more reliable than checking thread names
749
+ if is_managed_worker_thread() and self._plugin_instance is not None and self._plugin_instance._sync_request is not None:
750
+ logger.debug(f"Worker thread requesting access_token via OAuth token service for secret: {self.access_token_secret_name}")
751
+ try:
752
+ access_token = self._plugin_instance._sync_request.request_access_token_from_main_thread(
753
+ self.access_token_secret_name
754
+ )
755
+ return StoredConfigurationValue(value=access_token)
756
+ except Exception as e:
757
+ logger.error(f"Error requesting access_token from main thread: {e}")
758
+ raise
759
+
760
+ # Otherwise, call _snowflake directly (main thread)
742
761
  return StoredConfigurationValue(
743
762
  value=_snowflake.get_oauth_access_token(self.access_token_secret_name)
744
763
  )
@@ -1005,14 +1024,41 @@ StoredFieldMappings.model_rebuild()
1005
1024
  OutboundSyncConfigurationParameters.model_rebuild()
1006
1025
 
1007
1026
  @tracer.start_as_current_span("get_secrets")
1008
- def get_secrets(oauth_secret_name: Optional[str], other_secrets_name: Optional[str]
1027
+ def get_secrets(oauth_secret_name: Optional[str], other_secrets_name: Optional[str],
1028
+ sync_request: Optional[Any] = None
1009
1029
  ) -> Dict[str, StoredConfigurationValue]:
1030
+ """
1031
+ Get secrets from Snowflake. This function can be called from the main thread or worker threads.
1032
+ When called from worker threads (e.g., within @managed_inbound_processing) for OAuth access tokens,
1033
+ it will automatically route the OAuth token request through the main thread to avoid threading issues
1034
+ with _snowflake.get_oauth_access_token. Other secrets can be fetched directly.
1035
+
1036
+ :param oauth_secret_name: The name of the OAuth secret to retrieve
1037
+ :param other_secrets_name: The name of other secrets to retrieve
1038
+ :param sync_request: Optional SyncRequest instance for worker threads. If not provided, will attempt to detect.
1039
+ :return: Dictionary of StoredConfigurationValue objects
1040
+ """
1041
+ from .threading_utils import is_managed_worker_thread
1010
1042
  connection_secrets = {}
1011
1043
  import _snowflake # pylint: disable=import-error, import-outside-toplevel # type: ignore
1044
+
1045
+ # OAuth token needs special handling in worker threads
1012
1046
  if oauth_secret_name is not None:
1013
- connection_secrets["access_token"] = StoredConfigurationValue(
1014
- value=_snowflake.get_oauth_access_token(oauth_secret_name)
1015
- )
1047
+ if is_managed_worker_thread() and sync_request is not None:
1048
+ logger.debug(f"Worker thread requesting OAuth access token via main thread for secret: {oauth_secret_name}")
1049
+ try:
1050
+ access_token = sync_request.request_access_token_from_main_thread(oauth_secret_name)
1051
+ connection_secrets["access_token"] = StoredConfigurationValue(value=access_token)
1052
+ except Exception as e:
1053
+ logger.error(f"Error requesting OAuth access token from main thread: {e}")
1054
+ raise
1055
+ else:
1056
+ # Main thread - call _snowflake directly
1057
+ connection_secrets["access_token"] = StoredConfigurationValue(
1058
+ value=_snowflake.get_oauth_access_token(oauth_secret_name)
1059
+ )
1060
+
1061
+ # Other secrets can be fetched directly from any thread
1016
1062
  if other_secrets_name is not None:
1017
1063
  try:
1018
1064
  secret_string_content = _snowflake.get_generic_secret_string(
@@ -886,8 +886,9 @@ def prune(view_part: SnowflakeViewPart, joined_parts: List[SnowflakeViewPart]) -
886
886
  deps.append(dep_key)
887
887
  else:
888
888
  logger.warning(
889
- f"Column {column.original_name} in {part.stream_name} references "
890
- f"{ref_field} in {resolved_stream}, which doesn't exist"
889
+ msg=f"Column {column.original_name} in {part.stream_name} references "
890
+ f"{ref_field} in {resolved_stream}, which doesn't exist",
891
+ stack_info=True
891
892
  )
892
893
  has_invalid_dep = True
893
894
 
@@ -906,8 +907,9 @@ def prune(view_part: SnowflakeViewPart, joined_parts: List[SnowflakeViewPart]) -
906
907
  for dep_key in deps:
907
908
  if dep_key in columns_with_invalid_deps:
908
909
  logger.warning(
909
- f"Column {col_key[1]} in {col_key[0]} depends on "
910
- f"{dep_key[1]} in {dep_key[0]}, which has invalid dependencies"
910
+ msg=f"Column {col_key[1]} in {col_key[0]} depends on "
911
+ f"{dep_key[1]} in {dep_key[0]}, which has invalid dependencies",
912
+ stack_info=True
911
913
  )
912
914
  columns_with_invalid_deps.add(col_key)
913
915
  changed = True
@@ -995,8 +997,9 @@ def prune(view_part: SnowflakeViewPart, joined_parts: List[SnowflakeViewPart]) -
995
997
 
996
998
  if missing_refs:
997
999
  logger.warning(
998
- f"Removing column {col.original_name} from {part.stream_name} because it references "
999
- f"non-existent column(s): {', '.join(missing_refs)}"
1000
+ msg=f"Removing column {col.original_name} from {part.stream_name} because it references "
1001
+ f"non-existent column(s): {', '.join(missing_refs)}",
1002
+ stack_info=True
1000
1003
  )
1001
1004
  else:
1002
1005
  # Column is not needed (not referenced by main part)
@@ -101,6 +101,7 @@ from .rate_limiting import (
101
101
  from .json_schema import (
102
102
  FullyQualifiedTable
103
103
  )
104
+ from .threading_utils import is_managed_worker_thread, set_managed_worker_thread
104
105
 
105
106
  SortDirectionType = Literal["asc", "desc"]
106
107
 
@@ -375,6 +376,11 @@ class SyncRequest(ABC):
375
376
  self._last_states_update = None
376
377
  # store the opentelemetry context so that it can be attached inside threads
377
378
  self.opentelemetry_context = context.get_current()
379
+
380
+ # Secrets service for thread-safe access to _snowflake.get_oauth_access_token
381
+ # which can only be called from the main thread
382
+ # The main thread (in decorator wait loops) will service these requests
383
+ self._secrets_request_queue: queue.Queue = queue.Queue()
378
384
 
379
385
  threading.excepthook = self.thread_exception_hook
380
386
  if self.development_mode is False:
@@ -499,6 +505,82 @@ class SyncRequest(ABC):
499
505
  cancellation_token.wait(20)
500
506
  logger.info("cancel checking worker exiting")
501
507
 
508
+ def _service_oauth_token_request(self):
509
+ """
510
+ Services any pending OAuth token requests from worker threads.
511
+ This should be called periodically from the main thread while waiting for workers.
512
+ Returns True if any requests were serviced, False otherwise.
513
+ """
514
+ import _snowflake # pylint: disable=import-error, import-outside-toplevel # type: ignore
515
+
516
+ serviced_any = False
517
+ # Process all pending requests (non-blocking)
518
+ while not self._secrets_request_queue.empty():
519
+ try:
520
+ request = self._secrets_request_queue.get_nowait()
521
+ except queue.Empty:
522
+ break
523
+
524
+ serviced_any = True
525
+ oauth_secret_name = request.get('oauth_secret_name')
526
+ response_queue = request['response_queue']
527
+
528
+ logger.debug(f"Main thread servicing OAuth token request for secret: {oauth_secret_name}")
529
+
530
+ try:
531
+ # Call _snowflake.get_oauth_access_token directly (we're on the main thread now)
532
+ access_token = _snowflake.get_oauth_access_token(oauth_secret_name)
533
+
534
+ # Send successful response
535
+ response_queue.put({
536
+ 'success': True,
537
+ 'result': access_token
538
+ })
539
+ except Exception as e:
540
+ logger.error(f"Error servicing OAuth token request: {e}")
541
+ # Send error response
542
+ response_queue.put({
543
+ 'success': False,
544
+ 'error': str(e)
545
+ })
546
+ finally:
547
+ self._secrets_request_queue.task_done()
548
+
549
+ return serviced_any
550
+
551
+ def request_access_token_from_main_thread(self, oauth_secret_name: str, timeout: int = 30) -> str:
552
+ """
553
+ Request OAuth access token from the main thread. This should be called from worker threads
554
+ when they need to access the OAuth token via _snowflake.get_oauth_access_token.
555
+ The main thread services these requests while waiting for workers to complete.
556
+
557
+ :param oauth_secret_name: The name of the OAuth secret to retrieve
558
+ :param timeout: Maximum time to wait for the response in seconds
559
+ :return: The OAuth access token string
560
+ :raises TimeoutError: if the request times out
561
+ :raises ValueError: if the secrets service returns an error
562
+ """
563
+ # Create a response queue for this specific request
564
+ response_queue: queue.Queue = queue.Queue()
565
+
566
+ logger.debug(f"Requesting OAuth access token from main thread for secret: {oauth_secret_name}")
567
+
568
+ # Put the request in the queue with its own response queue
569
+ self._secrets_request_queue.put({
570
+ 'oauth_secret_name': oauth_secret_name,
571
+ 'response_queue': response_queue
572
+ })
573
+
574
+ # Block on the response queue with timeout
575
+ try:
576
+ response = response_queue.get(timeout=timeout)
577
+ if response['success']:
578
+ return response['result']
579
+ else:
580
+ raise ValueError(f"Error getting OAuth access token: {response['error']}")
581
+ except queue.Empty:
582
+ raise TimeoutError(f"Timeout waiting for OAuth access token request after {timeout} seconds")
583
+
502
584
  @abstractmethod
503
585
  def apply_results_queue(self):
504
586
  """
@@ -1344,18 +1426,23 @@ class InboundSyncRequest(SyncRequest):
1344
1426
  logger.debug(f"Applying results queue immediately due to combined dataframe size")
1345
1427
  self.apply_results_queue()
1346
1428
 
1347
- def delete_by_criteria(self, stream_name: str, criteria: Dict[str, Any]):
1429
+ def delete_by_criteria(self, stream_name: str, criteria: Dict[str, Any], new_state: Any):
1348
1430
  """
1349
- Submits some critera (field→value dict) which will cause matching records to be marked as deleted at the end of the run.
1431
+ Submits some critera (field→value dict) which will cause matching records to be marked as deleted
1432
+ during checkpointing or at the end of the run.
1350
1433
  This feature was created primarily for array fields that become child streams.
1351
1434
  The parent record is updated, which means there is a set of new children, but we need to delete the previously sync'd records and we don't know their identifiers.
1352
1435
 
1353
- The criteria is applied before the new records for the current run are applied. In other words, it will not delete any records from the current run.
1436
+ The criteria is applied before the new records for the current run/checkpoint are applied.
1354
1437
 
1355
1438
  For a record to be deleted, it must match fields with all the criteria supplied. At least one field value must be provided.
1439
+
1440
+ If you pass in None for new_state, then the criteria delete will not apply unless you also enqueue record state for the same stream. This provides the ability to do an atomic delete-and-replace.
1441
+ If you pass in some new state, then the criteria deletes will be applied in isolation along with the new state in a transaction.
1356
1442
  """
1357
1443
  if len(criteria) == 0:
1358
1444
  raise ValueError("At least one field value must be provided for deletion criteria")
1445
+
1359
1446
  if stream_name not in self._streams_dict:
1360
1447
  raise ValueError(
1361
1448
  f"Cannot delete records for stream {stream_name} as its configuration doesn't exist"
@@ -1374,6 +1461,10 @@ class InboundSyncRequest(SyncRequest):
1374
1461
  existing_results.append(
1375
1462
  CriteriaDeleteResult(
1376
1463
  criteria_deletes=pandas.DataFrame([{"STREAM_NAME":stream_name,"DELETE_CRITERIA": criteria}])))
1464
+ if new_state is not None:
1465
+ existing_results.append(
1466
+ StateResult(new_state=new_state)
1467
+ ) # append the new state at the end
1377
1468
  self._apply_results[stream_name] = existing_results
1378
1469
  if self.development_mode is False:
1379
1470
  self._apply_results_if_size_exceeded()
@@ -1450,7 +1541,7 @@ class InboundSyncRequest(SyncRequest):
1450
1541
  with self._apply_results_lock:
1451
1542
  if stream_name in self._apply_results:
1452
1543
  if len(self._apply_results[stream_name]) > 0:
1453
- self._apply_results[stream_name].append(new_state)
1544
+ self._apply_results[stream_name].append(StateResult(new_state=new_state))
1454
1545
  return
1455
1546
 
1456
1547
  self._directly_insert_to_state_register(
@@ -2184,6 +2275,9 @@ def __managed_outbound_processing_worker(
2184
2275
  Consumes a fixed sized set of records by passing them to the wrapped function,
2185
2276
  while adhering to the defined API constraints.
2186
2277
  """
2278
+ # Mark this thread as a managed worker thread
2279
+ set_managed_worker_thread(True)
2280
+
2187
2281
  context.attach(plugin_class_obj.opentelemetry_context)
2188
2282
  logger.debug(
2189
2283
  f"worker {worker_index} processing. Cancelled: {cancellation_token.is_set()}"
@@ -2323,6 +2417,8 @@ def managed_outbound_processing(concurrency: int, batch_size: int):
2323
2417
  task.join() # Ensure the thread is fully finished
2324
2418
  tasks.remove(task)
2325
2419
  logger.info(f"Thread {task.name} has completed processing")
2420
+ # Service any OAuth token requests from worker threads while we wait
2421
+ self._sync_request._service_oauth_token_request()
2326
2422
  time.sleep(1) # Avoid busy waiting
2327
2423
  logger.info("All workers completed processing")
2328
2424
 
@@ -2367,6 +2463,9 @@ def __managed_inbound_processing_worker(
2367
2463
  A worker thread for the managed_inbound_processing annotation.
2368
2464
  Passes single streams at a time to the wrapped function, adhering to concurrency constraints.
2369
2465
  """
2466
+ # Mark this thread as a managed worker thread
2467
+ set_managed_worker_thread(True)
2468
+
2370
2469
  context.attach(plugin_class_obj.opentelemetry_context)
2371
2470
  while not cancellation_token.is_set():
2372
2471
  # Get our generator object out of the queue
@@ -2505,6 +2604,8 @@ def managed_inbound_processing(concurrency: int):
2505
2604
  task.join() # Ensure the thread is fully finished
2506
2605
  tasks.remove(task)
2507
2606
  logger.info(f"Thread {task.name} has completed processing")
2607
+ # Service any OAuth token requests from worker threads while we wait
2608
+ self._sync_request._service_oauth_token_request()
2508
2609
  time.sleep(1) # Avoid busy waiting
2509
2610
  logger.info("All workers completed processing")
2510
2611
 
@@ -2762,7 +2863,7 @@ def omnata_udf(
2762
2863
 
2763
2864
  return decorator
2764
2865
 
2765
- def find_udf_functions(path:str = '.',top_level_modules:Optional[List[str]] = None) -> List[UDFDefinition]:
2866
+ def find_udf_functions(path:str = '.',top_level_modules:Optional[List[str]] = None, exclude_top_level_modules:Optional[List[str]] = None) -> List[UDFDefinition]:
2766
2867
  """
2767
2868
  Finds all functions in the specified directory which have the 'omnata_udf' decorator applied
2768
2869
  """
@@ -2778,6 +2879,9 @@ def find_udf_functions(path:str = '.',top_level_modules:Optional[List[str]] = No
2778
2879
  if top_level_modules is not None:
2779
2880
  if len([x for x in top_level_modules if module_name.startswith(x)]) == 0:
2780
2881
  continue
2882
+ if exclude_top_level_modules is not None:
2883
+ if any(module_name.startswith(y) for y in exclude_top_level_modules):
2884
+ continue
2781
2885
  module = importlib.import_module(module_name)
2782
2886
 
2783
2887
  # Iterate over all members of the module
@@ -188,6 +188,8 @@ class PluginEntrypoint:
188
188
  sync_id=request.sync_id,
189
189
  branch_name=request.sync_branch_name
190
190
  )
191
+ # Store plugin_instance reference in parameters for worker thread OAuth token access
192
+ parameters._plugin_instance = self._plugin_instance # pylint: disable=protected-access
191
193
  try:
192
194
  self._plugin_instance._configuration_parameters = parameters
193
195
  with tracer.start_as_current_span("invoke_plugin") as span:
@@ -246,6 +248,8 @@ class PluginEntrypoint:
246
248
  sync_id=request.sync_id,
247
249
  branch_name=request.sync_branch_name
248
250
  )
251
+ # Store plugin_instance reference in parameters for worker thread OAuth token access
252
+ parameters._plugin_instance = self._plugin_instance # pylint: disable=protected-access
249
253
  try:
250
254
  self._plugin_instance._configuration_parameters = parameters
251
255
 
@@ -0,0 +1,27 @@
1
+ """
2
+ Utilities for thread management in the plugin runtime.
3
+ """
4
+ import threading
5
+
6
+ # Thread-local storage to track if we're in a managed worker thread
7
+ # This is more reliable than checking thread names
8
+ _thread_local = threading.local()
9
+
10
+
11
+ def is_managed_worker_thread() -> bool:
12
+ """
13
+ Check if the current thread is a managed worker thread.
14
+ Returns True if running in a @managed_inbound_processing or @managed_outbound_processing worker.
15
+
16
+ This is set by the decorator worker functions and is more reliable than checking thread names.
17
+ """
18
+ return getattr(_thread_local, 'is_managed_worker', False)
19
+
20
+
21
+ def set_managed_worker_thread(is_worker: bool):
22
+ """
23
+ Set the flag indicating whether the current thread is a managed worker thread.
24
+
25
+ This should only be called by the managed processing decorator worker functions.
26
+ """
27
+ _thread_local.is_managed_worker = is_worker
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omnata-plugin-runtime
3
- Version: 0.12.1
3
+ Version: 0.12.2a347
4
4
  Summary: Classes and common runtime components for building and running Omnata Plugins
5
5
  License-File: LICENSE
6
6
  Author: James Weakley
@@ -36,8 +36,8 @@ Requires-Dist: pytz (<=2025.2)
36
36
  Requires-Dist: pyyaml (<=6.0.3)
37
37
  Requires-Dist: requests (>=2,<=2.32.5)
38
38
  Requires-Dist: setuptools (<=80.9.0)
39
- Requires-Dist: snowflake-connector-python (>=3,<4)
40
- Requires-Dist: snowflake-snowpark-python (>=1.20.0,<=1.43.0)
39
+ Requires-Dist: snowflake-connector-python (>=3.0.0,<=4.2.0)
40
+ Requires-Dist: snowflake-snowpark-python (>=1.20.0,<=1.44.0)
41
41
  Requires-Dist: snowflake-telemetry-python (<=0.5.0)
42
42
  Requires-Dist: tenacity (>=8,<9)
43
43
  Requires-Dist: tomlkit (<=0.13.3)
@@ -0,0 +1,14 @@
1
+ omnata_plugin_runtime/__init__.py,sha256=MS9d1whnfT_B3-ThqZ7l63QeC_8OEKTuaYV5wTwRpBA,1576
2
+ omnata_plugin_runtime/api.py,sha256=5gbjbnFy72Xjf0E3kbG23G0V2J3CorvD5kpBn_BkdlI,8084
3
+ omnata_plugin_runtime/configuration.py,sha256=-tN0yztdi-trgzKhLxSsPU0Ar3EBOmsNNLWFIIg3Bbc,49714
4
+ omnata_plugin_runtime/forms.py,sha256=Lrbr3otsFDrvHWJw7v-slsW4PvEHJ6BG1Yl8oaJfiDo,20529
5
+ omnata_plugin_runtime/json_schema.py,sha256=Wu0rByO8pFSZ3ugKqfs_yWMU24PwiC2jmoO83n9fycM,59852
6
+ omnata_plugin_runtime/logging.py,sha256=qUtRA9syQNnjfJZHA2W18K282voXX6vHwrBIPOBo1n8,4521
7
+ omnata_plugin_runtime/omnata_plugin.py,sha256=AJZFi9PIykFNKCWsM9mZgmXss4JyI5VWDPe2m4jnYqA,148592
8
+ omnata_plugin_runtime/plugin_entrypoints.py,sha256=9vN1m0w7-z3qu9up1qZokfncvJlQL6tYPh5ASAuY5VQ,33023
9
+ omnata_plugin_runtime/rate_limiting.py,sha256=qpr5esU4Ks8hMzuMpSR3gLFdor2ZUXYWCjmsQH_K6lQ,25882
10
+ omnata_plugin_runtime/threading_utils.py,sha256=fqlKLCPTEPVYdMinf8inPKLYxwD4d4WWVMLB3a2mNqk,906
11
+ omnata_plugin_runtime-0.12.2a347.dist-info/METADATA,sha256=8g-oocptHhzfjF_DyFkNfyVZSQus3JeA5NdofJlD99U,2235
12
+ omnata_plugin_runtime-0.12.2a347.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
13
+ omnata_plugin_runtime-0.12.2a347.dist-info/licenses/LICENSE,sha256=rGaMQG3R3F5-JGDp_-rlMKpDIkg5n0SI4kctTk8eZSI,56
14
+ omnata_plugin_runtime-0.12.2a347.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.2.1
2
+ Generator: poetry-core 2.3.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,13 +0,0 @@
1
- omnata_plugin_runtime/__init__.py,sha256=MS9d1whnfT_B3-ThqZ7l63QeC_8OEKTuaYV5wTwRpBA,1576
2
- omnata_plugin_runtime/api.py,sha256=5gbjbnFy72Xjf0E3kbG23G0V2J3CorvD5kpBn_BkdlI,8084
3
- omnata_plugin_runtime/configuration.py,sha256=SffokJfgvy6V3kUsoEjXcK3GdNgHo6U3mgBEs0qBv4I,46972
4
- omnata_plugin_runtime/forms.py,sha256=Lrbr3otsFDrvHWJw7v-slsW4PvEHJ6BG1Yl8oaJfiDo,20529
5
- omnata_plugin_runtime/json_schema.py,sha256=ZfHMG-XSJBE9Smt33Y6GPpl5skF7pB1TRCf9AvWuw-Y,59705
6
- omnata_plugin_runtime/logging.py,sha256=qUtRA9syQNnjfJZHA2W18K282voXX6vHwrBIPOBo1n8,4521
7
- omnata_plugin_runtime/omnata_plugin.py,sha256=8FT3XNdZzty76OldvcxdKpbKrPENKjAIbwa_rxceVyg,143564
8
- omnata_plugin_runtime/plugin_entrypoints.py,sha256=_1pDLov3iQorGmfcae8Sw2bVjxw1vYeowBaKKNzRclQ,32629
9
- omnata_plugin_runtime/rate_limiting.py,sha256=qpr5esU4Ks8hMzuMpSR3gLFdor2ZUXYWCjmsQH_K6lQ,25882
10
- omnata_plugin_runtime-0.12.1.dist-info/METADATA,sha256=SCl6ee1e3Q8DN0wa47snuMAOBABw387hC54HXuYSTcs,2222
11
- omnata_plugin_runtime-0.12.1.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
12
- omnata_plugin_runtime-0.12.1.dist-info/licenses/LICENSE,sha256=rGaMQG3R3F5-JGDp_-rlMKpDIkg5n0SI4kctTk8eZSI,56
13
- omnata_plugin_runtime-0.12.1.dist-info/RECORD,,