omnata-plugin-runtime 0.11.6__py3-none-any.whl → 0.12.2a347__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omnata_plugin_runtime/configuration.py +50 -4
- omnata_plugin_runtime/json_schema.py +9 -6
- omnata_plugin_runtime/omnata_plugin.py +240 -107
- omnata_plugin_runtime/plugin_entrypoints.py +4 -0
- omnata_plugin_runtime/threading_utils.py +27 -0
- omnata_plugin_runtime-0.12.2a347.dist-info/METADATA +56 -0
- omnata_plugin_runtime-0.12.2a347.dist-info/RECORD +14 -0
- {omnata_plugin_runtime-0.11.6.dist-info → omnata_plugin_runtime-0.12.2a347.dist-info}/WHEEL +1 -1
- omnata_plugin_runtime-0.11.6.dist-info/METADATA +0 -55
- omnata_plugin_runtime-0.11.6.dist-info/RECORD +0 -13
- {omnata_plugin_runtime-0.11.6.dist-info → omnata_plugin_runtime-0.12.2a347.dist-info}/licenses/LICENSE +0 -0
|
@@ -693,6 +693,9 @@ class ConnectionConfigurationParameters(SubscriptableBaseModel):
|
|
|
693
693
|
_snowflake: Optional[Any] = PrivateAttr( # or use Any to annotate the type and use Field to initialize
|
|
694
694
|
default=None
|
|
695
695
|
)
|
|
696
|
+
_plugin_instance: Optional[Any] = PrivateAttr( # Reference to OmnataPlugin instance for accessing sync_request
|
|
697
|
+
default=None
|
|
698
|
+
)
|
|
696
699
|
|
|
697
700
|
@model_validator(mode='after')
|
|
698
701
|
def validate_ngrok_tunnel_settings(self) -> Self:
|
|
@@ -739,6 +742,22 @@ class ConnectionConfigurationParameters(SubscriptableBaseModel):
|
|
|
739
742
|
"""
|
|
740
743
|
if parameter_name=='access_token' and self.access_token_secret_name is not None:
|
|
741
744
|
import _snowflake # pylint: disable=import-error, import-outside-toplevel # type: ignore
|
|
745
|
+
from .threading_utils import is_managed_worker_thread
|
|
746
|
+
|
|
747
|
+
# Check if we're in a worker thread using the explicit flag
|
|
748
|
+
# This is more reliable than checking thread names
|
|
749
|
+
if is_managed_worker_thread() and self._plugin_instance is not None and self._plugin_instance._sync_request is not None:
|
|
750
|
+
logger.debug(f"Worker thread requesting access_token via OAuth token service for secret: {self.access_token_secret_name}")
|
|
751
|
+
try:
|
|
752
|
+
access_token = self._plugin_instance._sync_request.request_access_token_from_main_thread(
|
|
753
|
+
self.access_token_secret_name
|
|
754
|
+
)
|
|
755
|
+
return StoredConfigurationValue(value=access_token)
|
|
756
|
+
except Exception as e:
|
|
757
|
+
logger.error(f"Error requesting access_token from main thread: {e}")
|
|
758
|
+
raise
|
|
759
|
+
|
|
760
|
+
# Otherwise, call _snowflake directly (main thread)
|
|
742
761
|
return StoredConfigurationValue(
|
|
743
762
|
value=_snowflake.get_oauth_access_token(self.access_token_secret_name)
|
|
744
763
|
)
|
|
@@ -1005,14 +1024,41 @@ StoredFieldMappings.model_rebuild()
|
|
|
1005
1024
|
OutboundSyncConfigurationParameters.model_rebuild()
|
|
1006
1025
|
|
|
1007
1026
|
@tracer.start_as_current_span("get_secrets")
|
|
1008
|
-
def get_secrets(oauth_secret_name: Optional[str], other_secrets_name: Optional[str]
|
|
1027
|
+
def get_secrets(oauth_secret_name: Optional[str], other_secrets_name: Optional[str],
|
|
1028
|
+
sync_request: Optional[Any] = None
|
|
1009
1029
|
) -> Dict[str, StoredConfigurationValue]:
|
|
1030
|
+
"""
|
|
1031
|
+
Get secrets from Snowflake. This function can be called from the main thread or worker threads.
|
|
1032
|
+
When called from worker threads (e.g., within @managed_inbound_processing) for OAuth access tokens,
|
|
1033
|
+
it will automatically route the OAuth token request through the main thread to avoid threading issues
|
|
1034
|
+
with _snowflake.get_oauth_access_token. Other secrets can be fetched directly.
|
|
1035
|
+
|
|
1036
|
+
:param oauth_secret_name: The name of the OAuth secret to retrieve
|
|
1037
|
+
:param other_secrets_name: The name of other secrets to retrieve
|
|
1038
|
+
:param sync_request: Optional SyncRequest instance for worker threads. If not provided, will attempt to detect.
|
|
1039
|
+
:return: Dictionary of StoredConfigurationValue objects
|
|
1040
|
+
"""
|
|
1041
|
+
from .threading_utils import is_managed_worker_thread
|
|
1010
1042
|
connection_secrets = {}
|
|
1011
1043
|
import _snowflake # pylint: disable=import-error, import-outside-toplevel # type: ignore
|
|
1044
|
+
|
|
1045
|
+
# OAuth token needs special handling in worker threads
|
|
1012
1046
|
if oauth_secret_name is not None:
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1047
|
+
if is_managed_worker_thread() and sync_request is not None:
|
|
1048
|
+
logger.debug(f"Worker thread requesting OAuth access token via main thread for secret: {oauth_secret_name}")
|
|
1049
|
+
try:
|
|
1050
|
+
access_token = sync_request.request_access_token_from_main_thread(oauth_secret_name)
|
|
1051
|
+
connection_secrets["access_token"] = StoredConfigurationValue(value=access_token)
|
|
1052
|
+
except Exception as e:
|
|
1053
|
+
logger.error(f"Error requesting OAuth access token from main thread: {e}")
|
|
1054
|
+
raise
|
|
1055
|
+
else:
|
|
1056
|
+
# Main thread - call _snowflake directly
|
|
1057
|
+
connection_secrets["access_token"] = StoredConfigurationValue(
|
|
1058
|
+
value=_snowflake.get_oauth_access_token(oauth_secret_name)
|
|
1059
|
+
)
|
|
1060
|
+
|
|
1061
|
+
# Other secrets can be fetched directly from any thread
|
|
1016
1062
|
if other_secrets_name is not None:
|
|
1017
1063
|
try:
|
|
1018
1064
|
secret_string_content = _snowflake.get_generic_secret_string(
|
|
@@ -886,8 +886,9 @@ def prune(view_part: SnowflakeViewPart, joined_parts: List[SnowflakeViewPart]) -
|
|
|
886
886
|
deps.append(dep_key)
|
|
887
887
|
else:
|
|
888
888
|
logger.warning(
|
|
889
|
-
f"Column {column.original_name} in {part.stream_name} references "
|
|
890
|
-
f"{ref_field} in {resolved_stream}, which doesn't exist"
|
|
889
|
+
msg=f"Column {column.original_name} in {part.stream_name} references "
|
|
890
|
+
f"{ref_field} in {resolved_stream}, which doesn't exist",
|
|
891
|
+
stack_info=True
|
|
891
892
|
)
|
|
892
893
|
has_invalid_dep = True
|
|
893
894
|
|
|
@@ -906,8 +907,9 @@ def prune(view_part: SnowflakeViewPart, joined_parts: List[SnowflakeViewPart]) -
|
|
|
906
907
|
for dep_key in deps:
|
|
907
908
|
if dep_key in columns_with_invalid_deps:
|
|
908
909
|
logger.warning(
|
|
909
|
-
f"Column {col_key[1]} in {col_key[0]} depends on "
|
|
910
|
-
f"{dep_key[1]} in {dep_key[0]}, which has invalid dependencies"
|
|
910
|
+
msg=f"Column {col_key[1]} in {col_key[0]} depends on "
|
|
911
|
+
f"{dep_key[1]} in {dep_key[0]}, which has invalid dependencies",
|
|
912
|
+
stack_info=True
|
|
911
913
|
)
|
|
912
914
|
columns_with_invalid_deps.add(col_key)
|
|
913
915
|
changed = True
|
|
@@ -995,8 +997,9 @@ def prune(view_part: SnowflakeViewPart, joined_parts: List[SnowflakeViewPart]) -
|
|
|
995
997
|
|
|
996
998
|
if missing_refs:
|
|
997
999
|
logger.warning(
|
|
998
|
-
f"Removing column {col.original_name} from {part.stream_name} because it references "
|
|
999
|
-
f"non-existent column(s): {', '.join(missing_refs)}"
|
|
1000
|
+
msg=f"Removing column {col.original_name} from {part.stream_name} because it references "
|
|
1001
|
+
f"non-existent column(s): {', '.join(missing_refs)}",
|
|
1002
|
+
stack_info=True
|
|
1000
1003
|
)
|
|
1001
1004
|
else:
|
|
1002
1005
|
# Column is not needed (not referenced by main part)
|
|
@@ -15,7 +15,7 @@ if tuple(sys.version_info[:2]) >= (3, 9):
|
|
|
15
15
|
else:
|
|
16
16
|
# Python 3.8 and below
|
|
17
17
|
from typing_extensions import Annotated
|
|
18
|
-
|
|
18
|
+
from dataclasses import dataclass
|
|
19
19
|
import zipfile
|
|
20
20
|
import datetime
|
|
21
21
|
import http
|
|
@@ -49,6 +49,11 @@ from snowflake.snowpark.functions import col
|
|
|
49
49
|
from tenacity import Retrying, stop_after_attempt, wait_fixed, retry_if_exception_message
|
|
50
50
|
|
|
51
51
|
from .logging import OmnataPluginLogHandler, logger, tracer, meter
|
|
52
|
+
stream_duration_gauge = meter.create_gauge(
|
|
53
|
+
name="omnata.sync_run.stream_duration",
|
|
54
|
+
description="The duration of stream processing",
|
|
55
|
+
unit="s",
|
|
56
|
+
)
|
|
52
57
|
from opentelemetry import context
|
|
53
58
|
import math
|
|
54
59
|
import numpy as np
|
|
@@ -96,6 +101,7 @@ from .rate_limiting import (
|
|
|
96
101
|
from .json_schema import (
|
|
97
102
|
FullyQualifiedTable
|
|
98
103
|
)
|
|
104
|
+
from .threading_utils import is_managed_worker_thread, set_managed_worker_thread
|
|
99
105
|
|
|
100
106
|
SortDirectionType = Literal["asc", "desc"]
|
|
101
107
|
|
|
@@ -265,6 +271,29 @@ def jinja_filter(func):
|
|
|
265
271
|
func.is_jinja_filter = True
|
|
266
272
|
return func
|
|
267
273
|
|
|
274
|
+
@dataclass
|
|
275
|
+
class StateResult:
|
|
276
|
+
"""
|
|
277
|
+
Represents the current cursor state of a stream. This simple wrapper just helps us identify what type of
|
|
278
|
+
object is in the apply_results list.
|
|
279
|
+
"""
|
|
280
|
+
new_state: Any
|
|
281
|
+
|
|
282
|
+
@dataclass
|
|
283
|
+
class RecordsToUploadResult:
|
|
284
|
+
"""
|
|
285
|
+
Represents the records to upload for a stream. This simple wrapper just helps us identify what type of
|
|
286
|
+
object is in the apply_results list.
|
|
287
|
+
"""
|
|
288
|
+
records: pandas.DataFrame
|
|
289
|
+
|
|
290
|
+
@dataclass
|
|
291
|
+
class CriteriaDeleteResult:
|
|
292
|
+
"""
|
|
293
|
+
Represents the result of processing criteria deletes for a stream. This simple wrapper just helps us identify what type of
|
|
294
|
+
object is in the apply_results list.
|
|
295
|
+
"""
|
|
296
|
+
criteria_deletes: pandas.DataFrame
|
|
268
297
|
|
|
269
298
|
class SyncRequest(ABC):
|
|
270
299
|
"""
|
|
@@ -347,6 +376,11 @@ class SyncRequest(ABC):
|
|
|
347
376
|
self._last_states_update = None
|
|
348
377
|
# store the opentelemetry context so that it can be attached inside threads
|
|
349
378
|
self.opentelemetry_context = context.get_current()
|
|
379
|
+
|
|
380
|
+
# Secrets service for thread-safe access to _snowflake.get_oauth_access_token
|
|
381
|
+
# which can only be called from the main thread
|
|
382
|
+
# The main thread (in decorator wait loops) will service these requests
|
|
383
|
+
self._secrets_request_queue: queue.Queue = queue.Queue()
|
|
350
384
|
|
|
351
385
|
threading.excepthook = self.thread_exception_hook
|
|
352
386
|
if self.development_mode is False:
|
|
@@ -471,6 +505,82 @@ class SyncRequest(ABC):
|
|
|
471
505
|
cancellation_token.wait(20)
|
|
472
506
|
logger.info("cancel checking worker exiting")
|
|
473
507
|
|
|
508
|
+
def _service_oauth_token_request(self):
|
|
509
|
+
"""
|
|
510
|
+
Services any pending OAuth token requests from worker threads.
|
|
511
|
+
This should be called periodically from the main thread while waiting for workers.
|
|
512
|
+
Returns True if any requests were serviced, False otherwise.
|
|
513
|
+
"""
|
|
514
|
+
import _snowflake # pylint: disable=import-error, import-outside-toplevel # type: ignore
|
|
515
|
+
|
|
516
|
+
serviced_any = False
|
|
517
|
+
# Process all pending requests (non-blocking)
|
|
518
|
+
while not self._secrets_request_queue.empty():
|
|
519
|
+
try:
|
|
520
|
+
request = self._secrets_request_queue.get_nowait()
|
|
521
|
+
except queue.Empty:
|
|
522
|
+
break
|
|
523
|
+
|
|
524
|
+
serviced_any = True
|
|
525
|
+
oauth_secret_name = request.get('oauth_secret_name')
|
|
526
|
+
response_queue = request['response_queue']
|
|
527
|
+
|
|
528
|
+
logger.debug(f"Main thread servicing OAuth token request for secret: {oauth_secret_name}")
|
|
529
|
+
|
|
530
|
+
try:
|
|
531
|
+
# Call _snowflake.get_oauth_access_token directly (we're on the main thread now)
|
|
532
|
+
access_token = _snowflake.get_oauth_access_token(oauth_secret_name)
|
|
533
|
+
|
|
534
|
+
# Send successful response
|
|
535
|
+
response_queue.put({
|
|
536
|
+
'success': True,
|
|
537
|
+
'result': access_token
|
|
538
|
+
})
|
|
539
|
+
except Exception as e:
|
|
540
|
+
logger.error(f"Error servicing OAuth token request: {e}")
|
|
541
|
+
# Send error response
|
|
542
|
+
response_queue.put({
|
|
543
|
+
'success': False,
|
|
544
|
+
'error': str(e)
|
|
545
|
+
})
|
|
546
|
+
finally:
|
|
547
|
+
self._secrets_request_queue.task_done()
|
|
548
|
+
|
|
549
|
+
return serviced_any
|
|
550
|
+
|
|
551
|
+
def request_access_token_from_main_thread(self, oauth_secret_name: str, timeout: int = 30) -> str:
|
|
552
|
+
"""
|
|
553
|
+
Request OAuth access token from the main thread. This should be called from worker threads
|
|
554
|
+
when they need to access the OAuth token via _snowflake.get_oauth_access_token.
|
|
555
|
+
The main thread services these requests while waiting for workers to complete.
|
|
556
|
+
|
|
557
|
+
:param oauth_secret_name: The name of the OAuth secret to retrieve
|
|
558
|
+
:param timeout: Maximum time to wait for the response in seconds
|
|
559
|
+
:return: The OAuth access token string
|
|
560
|
+
:raises TimeoutError: if the request times out
|
|
561
|
+
:raises ValueError: if the secrets service returns an error
|
|
562
|
+
"""
|
|
563
|
+
# Create a response queue for this specific request
|
|
564
|
+
response_queue: queue.Queue = queue.Queue()
|
|
565
|
+
|
|
566
|
+
logger.debug(f"Requesting OAuth access token from main thread for secret: {oauth_secret_name}")
|
|
567
|
+
|
|
568
|
+
# Put the request in the queue with its own response queue
|
|
569
|
+
self._secrets_request_queue.put({
|
|
570
|
+
'oauth_secret_name': oauth_secret_name,
|
|
571
|
+
'response_queue': response_queue
|
|
572
|
+
})
|
|
573
|
+
|
|
574
|
+
# Block on the response queue with timeout
|
|
575
|
+
try:
|
|
576
|
+
response = response_queue.get(timeout=timeout)
|
|
577
|
+
if response['success']:
|
|
578
|
+
return response['result']
|
|
579
|
+
else:
|
|
580
|
+
raise ValueError(f"Error getting OAuth access token: {response['error']}")
|
|
581
|
+
except queue.Empty:
|
|
582
|
+
raise TimeoutError(f"Timeout waiting for OAuth access token request after {timeout} seconds")
|
|
583
|
+
|
|
474
584
|
@abstractmethod
|
|
475
585
|
def apply_results_queue(self):
|
|
476
586
|
"""
|
|
@@ -1057,7 +1167,6 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1057
1167
|
}
|
|
1058
1168
|
|
|
1059
1169
|
# These are similar to the results, but represent requests to delete records by some criteria
|
|
1060
|
-
self._apply_results_criteria_deletes: Dict[str, List[pandas.DataFrame]] = {}
|
|
1061
1170
|
self._temp_tables = {}
|
|
1062
1171
|
self._temp_table_lock = threading.Lock()
|
|
1063
1172
|
self._results_exist: Dict[
|
|
@@ -1096,7 +1205,9 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1096
1205
|
self._criteria_deletes_table_name = results_table.get_fully_qualified_criteria_deletes_table_name()
|
|
1097
1206
|
self.state_register_table_name = results_table.get_fully_qualified_state_register_table_name()
|
|
1098
1207
|
# this is keyed on stream name, each containing a list of dataframes and state updates mixed
|
|
1099
|
-
self._apply_results: Dict[str, List[
|
|
1208
|
+
self._apply_results: Dict[str, List[RecordsToUploadResult | StateResult | CriteriaDeleteResult]] = {}
|
|
1209
|
+
# track the start times of each stream, so we can calculate durations. The int is a epoch (time.time()) value
|
|
1210
|
+
self._stream_start_times: Dict[str, int] = {}
|
|
1100
1211
|
|
|
1101
1212
|
def apply_results_queue(self):
|
|
1102
1213
|
"""
|
|
@@ -1105,7 +1216,8 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1105
1216
|
logger.debug("InboundSyncRequest apply_results_queue")
|
|
1106
1217
|
if self._apply_results is not None:
|
|
1107
1218
|
with self._apply_results_lock:
|
|
1108
|
-
|
|
1219
|
+
records_to_upload:List[pandas.DataFrame] = []
|
|
1220
|
+
criteria_deletes_to_upload:List[pandas.DataFrame] = []
|
|
1109
1221
|
stream_states_for_upload:Dict[str, Dict[str, Any]] = {}
|
|
1110
1222
|
for stream_name, stream_results in self._apply_results.items():
|
|
1111
1223
|
# the stream results contains an ordered sequence of dataframes and state updates (append only)
|
|
@@ -1113,9 +1225,9 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1113
1225
|
# so first, we iterate backwards to find the last state update
|
|
1114
1226
|
last_state_index = -1
|
|
1115
1227
|
for i in range(len(stream_results) - 1, -1, -1):
|
|
1116
|
-
if isinstance(stream_results[i],
|
|
1228
|
+
if isinstance(stream_results[i], StateResult):
|
|
1117
1229
|
last_state_index = i
|
|
1118
|
-
stream_states_for_upload[stream_name] = stream_results[i]
|
|
1230
|
+
stream_states_for_upload[stream_name] = stream_results[i].new_state
|
|
1119
1231
|
break
|
|
1120
1232
|
# if there are no state updates, we can't do anything with this stream
|
|
1121
1233
|
if last_state_index == -1:
|
|
@@ -1124,56 +1236,54 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1124
1236
|
)
|
|
1125
1237
|
continue
|
|
1126
1238
|
assert isinstance(stream_states_for_upload[stream_name], dict), "Latest state must be a dictionary"
|
|
1127
|
-
# now we can take the dataframes up to the last state update
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
x for x in
|
|
1239
|
+
# now we can take the record dataframes up to the last state update
|
|
1240
|
+
results_subset = stream_results[:last_state_index]
|
|
1241
|
+
non_empty_record_dfs:List[pandas.DataFrame] = [
|
|
1242
|
+
x.records for x in results_subset
|
|
1243
|
+
if x is not None and isinstance(x, RecordsToUploadResult) and len(x.records) > 0
|
|
1131
1244
|
]
|
|
1132
1245
|
# get the total length of all the dataframes
|
|
1133
|
-
total_length = sum([len(x) for x in
|
|
1246
|
+
total_length = sum([len(x) for x in non_empty_record_dfs])
|
|
1134
1247
|
# add the count of this batch to the total for this stream
|
|
1135
1248
|
self._stream_record_counts[
|
|
1136
1249
|
stream_name
|
|
1137
1250
|
] = self._stream_record_counts[stream_name] + total_length
|
|
1138
|
-
|
|
1251
|
+
records_to_upload.extend(non_empty_record_dfs)
|
|
1252
|
+
# also handle any criteria deletes
|
|
1253
|
+
criteria_deletes_to_upload.extend([
|
|
1254
|
+
x.criteria_deletes for x in results_subset
|
|
1255
|
+
if x is not None and isinstance(x, CriteriaDeleteResult) and len(x.criteria_deletes) > 0
|
|
1256
|
+
])
|
|
1139
1257
|
# now remove everything up to the last state update
|
|
1140
1258
|
# we do this so that we don't apply the same state update multiple times
|
|
1259
|
+
# keep everything after the last state update
|
|
1141
1260
|
self._apply_results[stream_name] = stream_results[
|
|
1142
1261
|
last_state_index + 1 :
|
|
1143
|
-
]
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1262
|
+
]
|
|
1263
|
+
|
|
1264
|
+
if len(records_to_upload) > 0 or len(criteria_deletes_to_upload) > 0:
|
|
1265
|
+
if len(records_to_upload) > 0:
|
|
1266
|
+
logger.debug(
|
|
1267
|
+
f"Applying {len(records_to_upload)} batches of queued results"
|
|
1268
|
+
)
|
|
1269
|
+
# upload all cached apply results
|
|
1270
|
+
records_to_upload_combined = pandas.concat(records_to_upload)
|
|
1271
|
+
self._apply_results_dataframe(list(stream_states_for_upload.keys()), records_to_upload_combined)
|
|
1272
|
+
# now that the results have been updated, we need to insert records into the state register table
|
|
1273
|
+
# we do this by inserting the latest state for each stream
|
|
1274
|
+
if len(criteria_deletes_to_upload) > 0:
|
|
1275
|
+
logger.debug(
|
|
1276
|
+
f"Applying {len(criteria_deletes_to_upload)} batches of queued criteria deletes"
|
|
1277
|
+
)
|
|
1278
|
+
# upload all cached apply results
|
|
1279
|
+
all_criteria_deletes = pandas.concat(criteria_deletes_to_upload)
|
|
1280
|
+
self._apply_criteria_deletes_dataframe(all_criteria_deletes)
|
|
1281
|
+
|
|
1282
|
+
query_id = self._get_query_id_for_now()
|
|
1153
1283
|
self._directly_insert_to_state_register(
|
|
1154
1284
|
stream_states_for_upload, query_id=query_id
|
|
1155
1285
|
)
|
|
1156
1286
|
|
|
1157
|
-
# also take care of uploading delete requests
|
|
1158
|
-
# technically these should be managed along with the state, however there aren't any scenarios where checkpointing is done
|
|
1159
|
-
# and deletes have an impact. This is because we only checkpoint in scenarios where the target table is empty first
|
|
1160
|
-
if hasattr(self,'_apply_results_criteria_deletes') and self._apply_results_criteria_deletes is not None:
|
|
1161
|
-
with self._apply_results_lock:
|
|
1162
|
-
results:List[pandas.DataFrame] = []
|
|
1163
|
-
for stream_name, stream_results in self._apply_results_criteria_deletes.items():
|
|
1164
|
-
results.extend([
|
|
1165
|
-
x for x in stream_results if x is not None and len(x) > 0
|
|
1166
|
-
])
|
|
1167
|
-
if len(results) > 0:
|
|
1168
|
-
logger.debug(
|
|
1169
|
-
f"Applying {len(results)} batches of queued criteria deletes"
|
|
1170
|
-
)
|
|
1171
|
-
# upload all cached apply results
|
|
1172
|
-
all_dfs = pandas.concat(results)
|
|
1173
|
-
self._apply_criteria_deletes_dataframe(all_dfs)
|
|
1174
|
-
# clear the delete requests
|
|
1175
|
-
self._apply_results_criteria_deletes = {}
|
|
1176
|
-
|
|
1177
1287
|
|
|
1178
1288
|
# update the inbound stream record counts, so we can see progress
|
|
1179
1289
|
# we do this last, because marking a stream as completed will cause the sync engine to process it
|
|
@@ -1281,42 +1391,58 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1281
1391
|
if stream_name is None or len(stream_name) == 0:
|
|
1282
1392
|
raise ValueError("Stream name cannot be empty")
|
|
1283
1393
|
with self._apply_results_lock:
|
|
1284
|
-
existing_results: List[
|
|
1394
|
+
existing_results: List[RecordsToUploadResult | StateResult | CriteriaDeleteResult] = []
|
|
1285
1395
|
if stream_name in self._apply_results:
|
|
1286
1396
|
existing_results = self._apply_results[stream_name]
|
|
1287
|
-
existing_results.append(
|
|
1397
|
+
existing_results.append(RecordsToUploadResult(
|
|
1398
|
+
records=self._preprocess_results_list(stream_name, results, is_delete)
|
|
1399
|
+
))
|
|
1288
1400
|
if new_state is not None:
|
|
1289
|
-
existing_results.append(
|
|
1401
|
+
existing_results.append(
|
|
1402
|
+
StateResult(new_state=new_state)
|
|
1403
|
+
) # append the new state at the end
|
|
1290
1404
|
self._apply_results[stream_name] = existing_results
|
|
1291
|
-
# if the total size of all the dataframes exceeds 200MB, apply the results immediately
|
|
1292
|
-
# we'll use df.memory_usage(index=True) for this
|
|
1293
1405
|
if self.development_mode is False:
|
|
1294
1406
|
# note: we want to do it for all values in self._apply_results, not just the new one
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1407
|
+
self._apply_results_if_size_exceeded()
|
|
1408
|
+
|
|
1409
|
+
def _apply_results_if_size_exceeded(self,):
|
|
1410
|
+
# so first we need to get the list of lists from the dictionary values and flatten it
|
|
1411
|
+
# then we can sum the memory usage of each dataframe
|
|
1412
|
+
# if the total exceeds 200MB, we apply the results immediately
|
|
1413
|
+
all_df_lists:List[List[RecordsToUploadResult | StateResult | CriteriaDeleteResult]] = list(self._apply_results.values())
|
|
1414
|
+
# flatten
|
|
1415
|
+
all_dfs:List[pandas.DataFrame] = []
|
|
1416
|
+
for sublist in all_df_lists:
|
|
1417
|
+
for x in sublist:
|
|
1418
|
+
if isinstance(x, RecordsToUploadResult):
|
|
1419
|
+
all_dfs.append(x.records)
|
|
1420
|
+
if isinstance(x, CriteriaDeleteResult):
|
|
1421
|
+
all_dfs.append(x.criteria_deletes)
|
|
1422
|
+
combined_length = sum([len(x) for x in all_dfs])
|
|
1423
|
+
# first, don't bother if the count is less than 10000, since it's unlikely to be even close
|
|
1424
|
+
if combined_length > 10000:
|
|
1425
|
+
if sum([x.memory_usage(index=True).sum() for x in all_dfs]) > 200000000:
|
|
1426
|
+
logger.debug(f"Applying results queue immediately due to combined dataframe size")
|
|
1427
|
+
self.apply_results_queue()
|
|
1307
1428
|
|
|
1308
|
-
def delete_by_criteria(self, stream_name: str, criteria: Dict[str, Any]):
|
|
1429
|
+
def delete_by_criteria(self, stream_name: str, criteria: Dict[str, Any], new_state: Any):
|
|
1309
1430
|
"""
|
|
1310
|
-
Submits some critera (field→value dict) which will cause matching records to be marked as deleted
|
|
1431
|
+
Submits some critera (field→value dict) which will cause matching records to be marked as deleted
|
|
1432
|
+
during checkpointing or at the end of the run.
|
|
1311
1433
|
This feature was created primarily for array fields that become child streams.
|
|
1312
1434
|
The parent record is updated, which means there is a set of new children, but we need to delete the previously sync'd records and we don't know their identifiers.
|
|
1313
1435
|
|
|
1314
|
-
The criteria is applied before the new records for the current run are applied.
|
|
1436
|
+
The criteria is applied before the new records for the current run/checkpoint are applied.
|
|
1315
1437
|
|
|
1316
1438
|
For a record to be deleted, it must match fields with all the criteria supplied. At least one field value must be provided.
|
|
1439
|
+
|
|
1440
|
+
If you pass in None for new_state, then the criteria delete will not apply unless you also enqueue record state for the same stream. This provides the ability to do an atomic delete-and-replace.
|
|
1441
|
+
If you pass in some new state, then the criteria deletes will be applied in isolation along with the new state in a transaction.
|
|
1317
1442
|
"""
|
|
1318
1443
|
if len(criteria) == 0:
|
|
1319
1444
|
raise ValueError("At least one field value must be provided for deletion criteria")
|
|
1445
|
+
|
|
1320
1446
|
if stream_name not in self._streams_dict:
|
|
1321
1447
|
raise ValueError(
|
|
1322
1448
|
f"Cannot delete records for stream {stream_name} as its configuration doesn't exist"
|
|
@@ -1329,27 +1455,26 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1329
1455
|
logger.debug(
|
|
1330
1456
|
f"Enqueuing {len(criteria)} delete criteria for stream {stream_name} for upload"
|
|
1331
1457
|
)
|
|
1332
|
-
existing_results: List[
|
|
1333
|
-
if stream_name in self.
|
|
1334
|
-
existing_results = self.
|
|
1335
|
-
existing_results.append(
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1458
|
+
existing_results: List[RecordsToUploadResult | StateResult | CriteriaDeleteResult] = []
|
|
1459
|
+
if stream_name in self._apply_results:
|
|
1460
|
+
existing_results = self._apply_results[stream_name]
|
|
1461
|
+
existing_results.append(
|
|
1462
|
+
CriteriaDeleteResult(
|
|
1463
|
+
criteria_deletes=pandas.DataFrame([{"STREAM_NAME":stream_name,"DELETE_CRITERIA": criteria}])))
|
|
1464
|
+
if new_state is not None:
|
|
1465
|
+
existing_results.append(
|
|
1466
|
+
StateResult(new_state=new_state)
|
|
1467
|
+
) # append the new state at the end
|
|
1468
|
+
self._apply_results[stream_name] = existing_results
|
|
1339
1469
|
if self.development_mode is False:
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
# first, don't both if the count is less than 10000, since it's unlikely to be even close
|
|
1349
|
-
if combined_length > 10000:
|
|
1350
|
-
if sum([x.memory_usage(index=True).sum() for x in all_dfs if isinstance(x, pandas.DataFrame)]) > 200000000:
|
|
1351
|
-
logger.debug(f"Applying criteria deletes queue immediately due to combined dataframe size")
|
|
1352
|
-
self.apply_results_queue()
|
|
1470
|
+
self._apply_results_if_size_exceeded()
|
|
1471
|
+
|
|
1472
|
+
def mark_stream_started(self, stream_name: str):
|
|
1473
|
+
"""
|
|
1474
|
+
Marks a stream as started, this is called automatically per stream when using @managed_inbound_processing.
|
|
1475
|
+
"""
|
|
1476
|
+
logger.debug(f"Marking stream {stream_name} as started locally")
|
|
1477
|
+
self._stream_start_times[stream_name] = time.time()
|
|
1353
1478
|
|
|
1354
1479
|
def mark_stream_complete(self, stream_name: str):
|
|
1355
1480
|
"""
|
|
@@ -1357,6 +1482,20 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1357
1482
|
If @managed_inbound_processing is not used, call this whenever a stream has finished recieving records.
|
|
1358
1483
|
"""
|
|
1359
1484
|
logger.debug(f"Marking stream {stream_name} as completed locally")
|
|
1485
|
+
if stream_name in self._stream_start_times:
|
|
1486
|
+
start_time = self._stream_start_times[stream_name]
|
|
1487
|
+
duration = time.time() - start_time
|
|
1488
|
+
stream_duration_gauge.set(
|
|
1489
|
+
amount=duration,
|
|
1490
|
+
attributes={
|
|
1491
|
+
"stream_name": stream_name,
|
|
1492
|
+
"sync_run_id": str(self._run_id),
|
|
1493
|
+
"sync_id": str(self._sync_id),
|
|
1494
|
+
"branch_name": str(self._branch_name) if self._branch_name is not None else 'main',
|
|
1495
|
+
"sync_direction": "inbound",
|
|
1496
|
+
"plugin_id": self.plugin_instance.get_manifest().plugin_id,
|
|
1497
|
+
},
|
|
1498
|
+
)
|
|
1360
1499
|
with self._apply_results_lock:
|
|
1361
1500
|
self._completed_streams.append(stream_name)
|
|
1362
1501
|
# dedup just in case it's called twice
|
|
@@ -1402,7 +1541,7 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1402
1541
|
with self._apply_results_lock:
|
|
1403
1542
|
if stream_name in self._apply_results:
|
|
1404
1543
|
if len(self._apply_results[stream_name]) > 0:
|
|
1405
|
-
self._apply_results[stream_name].append(new_state)
|
|
1544
|
+
self._apply_results[stream_name].append(StateResult(new_state=new_state))
|
|
1406
1545
|
return
|
|
1407
1546
|
|
|
1408
1547
|
self._directly_insert_to_state_register(
|
|
@@ -1463,7 +1602,7 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1463
1602
|
logger.debug(f"Failure to convert inbound data: {str(exception)}")
|
|
1464
1603
|
return data
|
|
1465
1604
|
|
|
1466
|
-
def _preprocess_results_list(self, stream_name: str, results: List[Dict],is_delete:Union[bool,List[bool]]):
|
|
1605
|
+
def _preprocess_results_list(self, stream_name: str, results: List[Dict],is_delete:Union[bool,List[bool]]) -> pandas.DataFrame:
|
|
1467
1606
|
"""
|
|
1468
1607
|
Creates a dataframe from the enqueued list, ready to upload.
|
|
1469
1608
|
The result is a dataframe contain all (and only):
|
|
@@ -1608,7 +1747,7 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1608
1747
|
hash_object = hashlib.sha256(key_string.encode())
|
|
1609
1748
|
return hash_object.hexdigest()
|
|
1610
1749
|
|
|
1611
|
-
def _apply_results_dataframe(self, stream_names: List[str], results_df: pandas.DataFrame)
|
|
1750
|
+
def _apply_results_dataframe(self, stream_names: List[str], results_df: pandas.DataFrame):
|
|
1612
1751
|
"""
|
|
1613
1752
|
Applies results for an inbound sync. The results are staged into a temporary
|
|
1614
1753
|
table in Snowflake, so that we can make an atomic commit at the end.
|
|
@@ -1635,7 +1774,6 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1635
1774
|
raise ValueError(
|
|
1636
1775
|
f"Failed to write results to table {self._full_results_table_name}"
|
|
1637
1776
|
)
|
|
1638
|
-
query_id = self._get_query_id_for_now()
|
|
1639
1777
|
logger.debug(
|
|
1640
1778
|
f"Wrote {nrows} rows and {nchunks} chunks to table {self._full_results_table_name}"
|
|
1641
1779
|
)
|
|
@@ -1648,7 +1786,6 @@ class InboundSyncRequest(SyncRequest):
|
|
|
1648
1786
|
# )
|
|
1649
1787
|
for stream_name in stream_names:
|
|
1650
1788
|
self._results_exist[stream_name] = True
|
|
1651
|
-
return query_id
|
|
1652
1789
|
else:
|
|
1653
1790
|
logger.debug("Results dataframe is empty, not applying")
|
|
1654
1791
|
|
|
@@ -2138,6 +2275,9 @@ def __managed_outbound_processing_worker(
|
|
|
2138
2275
|
Consumes a fixed sized set of records by passing them to the wrapped function,
|
|
2139
2276
|
while adhering to the defined API constraints.
|
|
2140
2277
|
"""
|
|
2278
|
+
# Mark this thread as a managed worker thread
|
|
2279
|
+
set_managed_worker_thread(True)
|
|
2280
|
+
|
|
2141
2281
|
context.attach(plugin_class_obj.opentelemetry_context)
|
|
2142
2282
|
logger.debug(
|
|
2143
2283
|
f"worker {worker_index} processing. Cancelled: {cancellation_token.is_set()}"
|
|
@@ -2277,6 +2417,8 @@ def managed_outbound_processing(concurrency: int, batch_size: int):
|
|
|
2277
2417
|
task.join() # Ensure the thread is fully finished
|
|
2278
2418
|
tasks.remove(task)
|
|
2279
2419
|
logger.info(f"Thread {task.name} has completed processing")
|
|
2420
|
+
# Service any OAuth token requests from worker threads while we wait
|
|
2421
|
+
self._sync_request._service_oauth_token_request()
|
|
2280
2422
|
time.sleep(1) # Avoid busy waiting
|
|
2281
2423
|
logger.info("All workers completed processing")
|
|
2282
2424
|
|
|
@@ -2321,6 +2463,9 @@ def __managed_inbound_processing_worker(
|
|
|
2321
2463
|
A worker thread for the managed_inbound_processing annotation.
|
|
2322
2464
|
Passes single streams at a time to the wrapped function, adhering to concurrency constraints.
|
|
2323
2465
|
"""
|
|
2466
|
+
# Mark this thread as a managed worker thread
|
|
2467
|
+
set_managed_worker_thread(True)
|
|
2468
|
+
|
|
2324
2469
|
context.attach(plugin_class_obj.opentelemetry_context)
|
|
2325
2470
|
while not cancellation_token.is_set():
|
|
2326
2471
|
# Get our generator object out of the queue
|
|
@@ -2333,12 +2478,8 @@ def __managed_inbound_processing_worker(
|
|
|
2333
2478
|
sync_request: InboundSyncRequest = cast(
|
|
2334
2479
|
InboundSyncRequest, plugin_class_obj._sync_request
|
|
2335
2480
|
) # pylint: disable=protected-access
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
description="The duration of stream processing",
|
|
2339
|
-
unit="s",
|
|
2340
|
-
)
|
|
2341
|
-
start_time = time.time()
|
|
2481
|
+
if stream.stream_name not in sync_request._stream_start_times:
|
|
2482
|
+
sync_request.mark_stream_started(stream.stream_name)
|
|
2342
2483
|
# restore the first argument, was originally the dataframe/generator but now it's the appropriately sized dataframe
|
|
2343
2484
|
try:
|
|
2344
2485
|
with tracer.start_as_current_span("managed_inbound_processing") as managed_inbound_processing_span:
|
|
@@ -2370,19 +2511,6 @@ def __managed_inbound_processing_worker(
|
|
|
2370
2511
|
omnata_plugin_logger.error(f"{type(e).__name__} syncing stream {stream.stream_name}",
|
|
2371
2512
|
exc_info=True,
|
|
2372
2513
|
extra={'stream_name':stream.stream_name})
|
|
2373
|
-
finally:
|
|
2374
|
-
duration = time.time() - start_time
|
|
2375
|
-
stream_duration_counter.record(
|
|
2376
|
-
duration,
|
|
2377
|
-
attributes={
|
|
2378
|
-
"stream_name": stream.stream_name,
|
|
2379
|
-
"sync_run_id": str(sync_request._run_id),
|
|
2380
|
-
"sync_id": str(sync_request._sync_id),
|
|
2381
|
-
"branch_name": str(sync_request._branch_name) if sync_request._branch_name is not None else 'main',
|
|
2382
|
-
"sync_direction": "inbound",
|
|
2383
|
-
"plugin_id": plugin_class_obj.get_manifest().plugin_id,
|
|
2384
|
-
},
|
|
2385
|
-
)
|
|
2386
2514
|
except queue.Empty:
|
|
2387
2515
|
logger.debug("streams queue is empty")
|
|
2388
2516
|
return
|
|
@@ -2476,6 +2604,8 @@ def managed_inbound_processing(concurrency: int):
|
|
|
2476
2604
|
task.join() # Ensure the thread is fully finished
|
|
2477
2605
|
tasks.remove(task)
|
|
2478
2606
|
logger.info(f"Thread {task.name} has completed processing")
|
|
2607
|
+
# Service any OAuth token requests from worker threads while we wait
|
|
2608
|
+
self._sync_request._service_oauth_token_request()
|
|
2479
2609
|
time.sleep(1) # Avoid busy waiting
|
|
2480
2610
|
logger.info("All workers completed processing")
|
|
2481
2611
|
|
|
@@ -2733,7 +2863,7 @@ def omnata_udf(
|
|
|
2733
2863
|
|
|
2734
2864
|
return decorator
|
|
2735
2865
|
|
|
2736
|
-
def find_udf_functions(path:str = '.',top_level_modules:Optional[List[str]] = None) -> List[UDFDefinition]:
|
|
2866
|
+
def find_udf_functions(path:str = '.',top_level_modules:Optional[List[str]] = None, exclude_top_level_modules:Optional[List[str]] = None) -> List[UDFDefinition]:
|
|
2737
2867
|
"""
|
|
2738
2868
|
Finds all functions in the specified directory which have the 'omnata_udf' decorator applied
|
|
2739
2869
|
"""
|
|
@@ -2749,6 +2879,9 @@ def find_udf_functions(path:str = '.',top_level_modules:Optional[List[str]] = No
|
|
|
2749
2879
|
if top_level_modules is not None:
|
|
2750
2880
|
if len([x for x in top_level_modules if module_name.startswith(x)]) == 0:
|
|
2751
2881
|
continue
|
|
2882
|
+
if exclude_top_level_modules is not None:
|
|
2883
|
+
if any(module_name.startswith(y) for y in exclude_top_level_modules):
|
|
2884
|
+
continue
|
|
2752
2885
|
module = importlib.import_module(module_name)
|
|
2753
2886
|
|
|
2754
2887
|
# Iterate over all members of the module
|
|
@@ -188,6 +188,8 @@ class PluginEntrypoint:
|
|
|
188
188
|
sync_id=request.sync_id,
|
|
189
189
|
branch_name=request.sync_branch_name
|
|
190
190
|
)
|
|
191
|
+
# Store plugin_instance reference in parameters for worker thread OAuth token access
|
|
192
|
+
parameters._plugin_instance = self._plugin_instance # pylint: disable=protected-access
|
|
191
193
|
try:
|
|
192
194
|
self._plugin_instance._configuration_parameters = parameters
|
|
193
195
|
with tracer.start_as_current_span("invoke_plugin") as span:
|
|
@@ -246,6 +248,8 @@ class PluginEntrypoint:
|
|
|
246
248
|
sync_id=request.sync_id,
|
|
247
249
|
branch_name=request.sync_branch_name
|
|
248
250
|
)
|
|
251
|
+
# Store plugin_instance reference in parameters for worker thread OAuth token access
|
|
252
|
+
parameters._plugin_instance = self._plugin_instance # pylint: disable=protected-access
|
|
249
253
|
try:
|
|
250
254
|
self._plugin_instance._configuration_parameters = parameters
|
|
251
255
|
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utilities for thread management in the plugin runtime.
|
|
3
|
+
"""
|
|
4
|
+
import threading
|
|
5
|
+
|
|
6
|
+
# Thread-local storage to track if we're in a managed worker thread
|
|
7
|
+
# This is more reliable than checking thread names
|
|
8
|
+
_thread_local = threading.local()
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def is_managed_worker_thread() -> bool:
|
|
12
|
+
"""
|
|
13
|
+
Check if the current thread is a managed worker thread.
|
|
14
|
+
Returns True if running in a @managed_inbound_processing or @managed_outbound_processing worker.
|
|
15
|
+
|
|
16
|
+
This is set by the decorator worker functions and is more reliable than checking thread names.
|
|
17
|
+
"""
|
|
18
|
+
return getattr(_thread_local, 'is_managed_worker', False)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def set_managed_worker_thread(is_worker: bool):
|
|
22
|
+
"""
|
|
23
|
+
Set the flag indicating whether the current thread is a managed worker thread.
|
|
24
|
+
|
|
25
|
+
This should only be called by the managed processing decorator worker functions.
|
|
26
|
+
"""
|
|
27
|
+
_thread_local.is_managed_worker = is_worker
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: omnata-plugin-runtime
|
|
3
|
+
Version: 0.12.2a347
|
|
4
|
+
Summary: Classes and common runtime components for building and running Omnata Plugins
|
|
5
|
+
License-File: LICENSE
|
|
6
|
+
Author: James Weakley
|
|
7
|
+
Author-email: james.weakley@omnata.com
|
|
8
|
+
Requires-Python: >=3.10,<=3.13
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
14
|
+
Requires-Dist: annotated-types (<=0.6.0)
|
|
15
|
+
Requires-Dist: certifi (<=2025.1.31)
|
|
16
|
+
Requires-Dist: cffi (<=2.0.0)
|
|
17
|
+
Requires-Dist: charset-normalizer (<=3.4.4)
|
|
18
|
+
Requires-Dist: cryptography (<=46.0.3)
|
|
19
|
+
Requires-Dist: filelock (<=3.20.0)
|
|
20
|
+
Requires-Dist: idna (<=3.11)
|
|
21
|
+
Requires-Dist: jinja2 (>=3.1.2,<=3.1.6)
|
|
22
|
+
Requires-Dist: markupsafe (<=3.0.2)
|
|
23
|
+
Requires-Dist: numpy (<=2.3.5)
|
|
24
|
+
Requires-Dist: opentelemetry-api (<=1.38.0)
|
|
25
|
+
Requires-Dist: packaging (<=25.0)
|
|
26
|
+
Requires-Dist: pandas (<=2.3.3)
|
|
27
|
+
Requires-Dist: platformdirs (<=4.5.0)
|
|
28
|
+
Requires-Dist: protobuf (<=6.33.0)
|
|
29
|
+
Requires-Dist: pyarrow (<=21.0.0)
|
|
30
|
+
Requires-Dist: pycparser (<=2.23)
|
|
31
|
+
Requires-Dist: pydantic (>=2,<=2.12.4)
|
|
32
|
+
Requires-Dist: pydantic-core (<=2.41.5)
|
|
33
|
+
Requires-Dist: pyjwt (<=2.10.1)
|
|
34
|
+
Requires-Dist: pyopenssl (<=225.3.0)
|
|
35
|
+
Requires-Dist: pytz (<=2025.2)
|
|
36
|
+
Requires-Dist: pyyaml (<=6.0.3)
|
|
37
|
+
Requires-Dist: requests (>=2,<=2.32.5)
|
|
38
|
+
Requires-Dist: setuptools (<=80.9.0)
|
|
39
|
+
Requires-Dist: snowflake-connector-python (>=3.0.0,<=4.2.0)
|
|
40
|
+
Requires-Dist: snowflake-snowpark-python (>=1.20.0,<=1.44.0)
|
|
41
|
+
Requires-Dist: snowflake-telemetry-python (<=0.5.0)
|
|
42
|
+
Requires-Dist: tenacity (>=8,<9)
|
|
43
|
+
Requires-Dist: tomlkit (<=0.13.3)
|
|
44
|
+
Requires-Dist: urllib3 (<=2.5.0)
|
|
45
|
+
Requires-Dist: wheel (<=0.45.1)
|
|
46
|
+
Requires-Dist: wrapt (<=2.0.1)
|
|
47
|
+
Description-Content-Type: text/markdown
|
|
48
|
+
|
|
49
|
+
# omnata-plugin-runtime
|
|
50
|
+
This package is a runtime dependency for [Omnata Plugins](https://docs.omnata.com/omnata-product-documentation/omnata-sync-for-snowflake/plugins).
|
|
51
|
+
|
|
52
|
+
It contains data classes, interfaces and application logic used to perform plugin operations.
|
|
53
|
+
|
|
54
|
+
For instructions on creating plugins, visit our [docs site](https://docs.omnata.com/omnata-product-documentation/omnata-sync-for-snowflake/plugins/creating-plugins).
|
|
55
|
+
|
|
56
|
+
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
omnata_plugin_runtime/__init__.py,sha256=MS9d1whnfT_B3-ThqZ7l63QeC_8OEKTuaYV5wTwRpBA,1576
|
|
2
|
+
omnata_plugin_runtime/api.py,sha256=5gbjbnFy72Xjf0E3kbG23G0V2J3CorvD5kpBn_BkdlI,8084
|
|
3
|
+
omnata_plugin_runtime/configuration.py,sha256=-tN0yztdi-trgzKhLxSsPU0Ar3EBOmsNNLWFIIg3Bbc,49714
|
|
4
|
+
omnata_plugin_runtime/forms.py,sha256=Lrbr3otsFDrvHWJw7v-slsW4PvEHJ6BG1Yl8oaJfiDo,20529
|
|
5
|
+
omnata_plugin_runtime/json_schema.py,sha256=Wu0rByO8pFSZ3ugKqfs_yWMU24PwiC2jmoO83n9fycM,59852
|
|
6
|
+
omnata_plugin_runtime/logging.py,sha256=qUtRA9syQNnjfJZHA2W18K282voXX6vHwrBIPOBo1n8,4521
|
|
7
|
+
omnata_plugin_runtime/omnata_plugin.py,sha256=AJZFi9PIykFNKCWsM9mZgmXss4JyI5VWDPe2m4jnYqA,148592
|
|
8
|
+
omnata_plugin_runtime/plugin_entrypoints.py,sha256=9vN1m0w7-z3qu9up1qZokfncvJlQL6tYPh5ASAuY5VQ,33023
|
|
9
|
+
omnata_plugin_runtime/rate_limiting.py,sha256=qpr5esU4Ks8hMzuMpSR3gLFdor2ZUXYWCjmsQH_K6lQ,25882
|
|
10
|
+
omnata_plugin_runtime/threading_utils.py,sha256=fqlKLCPTEPVYdMinf8inPKLYxwD4d4WWVMLB3a2mNqk,906
|
|
11
|
+
omnata_plugin_runtime-0.12.2a347.dist-info/METADATA,sha256=8g-oocptHhzfjF_DyFkNfyVZSQus3JeA5NdofJlD99U,2235
|
|
12
|
+
omnata_plugin_runtime-0.12.2a347.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
|
|
13
|
+
omnata_plugin_runtime-0.12.2a347.dist-info/licenses/LICENSE,sha256=rGaMQG3R3F5-JGDp_-rlMKpDIkg5n0SI4kctTk8eZSI,56
|
|
14
|
+
omnata_plugin_runtime-0.12.2a347.dist-info/RECORD,,
|
|
@@ -1,55 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: omnata-plugin-runtime
|
|
3
|
-
Version: 0.11.6
|
|
4
|
-
Summary: Classes and common runtime components for building and running Omnata Plugins
|
|
5
|
-
License-File: LICENSE
|
|
6
|
-
Author: James Weakley
|
|
7
|
-
Author-email: james.weakley@omnata.com
|
|
8
|
-
Requires-Python: >=3.9,<=3.11
|
|
9
|
-
Classifier: Programming Language :: Python :: 3
|
|
10
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
-
Requires-Dist: annotated-types (<=0.6.0)
|
|
14
|
-
Requires-Dist: certifi (<=2024.8.30)
|
|
15
|
-
Requires-Dist: cffi (<=1.16.0)
|
|
16
|
-
Requires-Dist: charset-normalizer (<=3.3.2)
|
|
17
|
-
Requires-Dist: cryptography (<=43.0.0)
|
|
18
|
-
Requires-Dist: filelock (<=3.13.1)
|
|
19
|
-
Requires-Dist: idna (<=3.7)
|
|
20
|
-
Requires-Dist: jinja2 (>=3.1.2,<=3.1.4)
|
|
21
|
-
Requires-Dist: markupsafe (<=2.1.3)
|
|
22
|
-
Requires-Dist: numpy (<=2.1.3)
|
|
23
|
-
Requires-Dist: opentelemetry-api (<=1.23.0)
|
|
24
|
-
Requires-Dist: packaging (<=24.1)
|
|
25
|
-
Requires-Dist: pandas (<=2.2.3)
|
|
26
|
-
Requires-Dist: platformdirs (<=3.10.0)
|
|
27
|
-
Requires-Dist: protobuf (<=4.25.3)
|
|
28
|
-
Requires-Dist: pyarrow (<=16.1.0)
|
|
29
|
-
Requires-Dist: pycparser (<=2.21)
|
|
30
|
-
Requires-Dist: pydantic (>=2,<=2.8.2)
|
|
31
|
-
Requires-Dist: pydantic-core (<=2.21.0)
|
|
32
|
-
Requires-Dist: pyjwt (<=2.8.0)
|
|
33
|
-
Requires-Dist: pyopenssl (<=24.2.1)
|
|
34
|
-
Requires-Dist: pytz (<=2024.1)
|
|
35
|
-
Requires-Dist: pyyaml (<=6.0.1)
|
|
36
|
-
Requires-Dist: requests (>=2,<=2.32.3)
|
|
37
|
-
Requires-Dist: setuptools (<=72.1.0)
|
|
38
|
-
Requires-Dist: snowflake-connector-python (>=3,<=3.12.0)
|
|
39
|
-
Requires-Dist: snowflake-snowpark-python (>=1.20.0,<=1.24.0)
|
|
40
|
-
Requires-Dist: snowflake-telemetry-python (<=0.5.0)
|
|
41
|
-
Requires-Dist: tenacity (>=8,<=8.2.3)
|
|
42
|
-
Requires-Dist: tomlkit (<=0.11.1)
|
|
43
|
-
Requires-Dist: urllib3 (<=2.2.2)
|
|
44
|
-
Requires-Dist: wheel (<=0.43.0)
|
|
45
|
-
Requires-Dist: wrapt (<=1.14.1)
|
|
46
|
-
Description-Content-Type: text/markdown
|
|
47
|
-
|
|
48
|
-
# omnata-plugin-runtime
|
|
49
|
-
This package is a runtime dependency for [Omnata Plugins](https://docs.omnata.com/omnata-product-documentation/omnata-sync-for-snowflake/plugins).
|
|
50
|
-
|
|
51
|
-
It contains data classes, interfaces and application logic used to perform plugin operations.
|
|
52
|
-
|
|
53
|
-
For instructions on creating plugins, visit our [docs site](https://docs.omnata.com/omnata-product-documentation/omnata-sync-for-snowflake/plugins/creating-plugins).
|
|
54
|
-
|
|
55
|
-
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
omnata_plugin_runtime/__init__.py,sha256=MS9d1whnfT_B3-ThqZ7l63QeC_8OEKTuaYV5wTwRpBA,1576
|
|
2
|
-
omnata_plugin_runtime/api.py,sha256=5gbjbnFy72Xjf0E3kbG23G0V2J3CorvD5kpBn_BkdlI,8084
|
|
3
|
-
omnata_plugin_runtime/configuration.py,sha256=SffokJfgvy6V3kUsoEjXcK3GdNgHo6U3mgBEs0qBv4I,46972
|
|
4
|
-
omnata_plugin_runtime/forms.py,sha256=Lrbr3otsFDrvHWJw7v-slsW4PvEHJ6BG1Yl8oaJfiDo,20529
|
|
5
|
-
omnata_plugin_runtime/json_schema.py,sha256=ZfHMG-XSJBE9Smt33Y6GPpl5skF7pB1TRCf9AvWuw-Y,59705
|
|
6
|
-
omnata_plugin_runtime/logging.py,sha256=qUtRA9syQNnjfJZHA2W18K282voXX6vHwrBIPOBo1n8,4521
|
|
7
|
-
omnata_plugin_runtime/omnata_plugin.py,sha256=GTFxri5FIkZhOI70FQpeLoKRo3zOQlqo2dUUUTaDppE,143134
|
|
8
|
-
omnata_plugin_runtime/plugin_entrypoints.py,sha256=_1pDLov3iQorGmfcae8Sw2bVjxw1vYeowBaKKNzRclQ,32629
|
|
9
|
-
omnata_plugin_runtime/rate_limiting.py,sha256=qpr5esU4Ks8hMzuMpSR3gLFdor2ZUXYWCjmsQH_K6lQ,25882
|
|
10
|
-
omnata_plugin_runtime-0.11.6.dist-info/METADATA,sha256=u_aYClOOFjKwP3sbJEA0Dp-YEX-p0szbOCYEPkT7tqA,2179
|
|
11
|
-
omnata_plugin_runtime-0.11.6.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
12
|
-
omnata_plugin_runtime-0.11.6.dist-info/licenses/LICENSE,sha256=rGaMQG3R3F5-JGDp_-rlMKpDIkg5n0SI4kctTk8eZSI,56
|
|
13
|
-
omnata_plugin_runtime-0.11.6.dist-info/RECORD,,
|
|
File without changes
|