warpzone-sdk 15.0.0__tar.gz → 15.0.0.dev2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/PKG-INFO +3 -1
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/pyproject.toml +3 -1
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/db/client.py +13 -63
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/integrations.py +1 -1
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/monitor.py +15 -16
- warpzone_sdk-15.0.0.dev2/warpzone/monitor/__init__.py +2 -0
- warpzone_sdk-15.0.0.dev2/warpzone/monitor/logs.py +24 -0
- warpzone_sdk-15.0.0.dev2/warpzone/monitor/traces.py +99 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/servicebus/events/client.py +13 -13
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tablestorage/db/client.py +27 -15
- warpzone_sdk-15.0.0/warpzone/monitor/__init__.py +0 -2
- warpzone_sdk-15.0.0/warpzone/monitor/logs.py +0 -87
- warpzone_sdk-15.0.0/warpzone/monitor/traces.py +0 -128
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/README.md +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/blobstorage/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/blobstorage/client.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/db/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/deltastorage/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/deltastorage/data_types.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/deltastorage/generated_columns.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/deltastorage/lock_client.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/deltastorage/schema.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/deltastorage/slicing.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/deltastorage/store.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/deltastorage/table.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/enums/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/enums/topicenum.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/checks.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/functionize.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/process.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/processors/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/processors/dependencies.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/processors/outputs.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/processors/triggers.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/signature.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/types.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/healthchecks/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/healthchecks/model.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/servicebus/data/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/servicebus/data/client.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/servicebus/events/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/servicebus/events/triggers.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tablestorage/db/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tablestorage/db/base_client.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tablestorage/db/table_config.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tablestorage/tables/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tablestorage/tables/client.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tablestorage/tables/entities.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tablestorage/tables/helpers.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/testing/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/testing/assertions.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/testing/data.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/testing/matchers.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tools/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/tools/copy.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/transform/__init__.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/transform/data.py +0 -0
- {warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/transform/schema.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: warpzone-sdk
|
|
3
|
-
Version: 15.0.0
|
|
3
|
+
Version: 15.0.0.dev2
|
|
4
4
|
Summary: The main objective of this package is to centralize logic used to interact with Azure Functions, Azure Service Bus and Azure Table Storage
|
|
5
5
|
Author: Team Enigma
|
|
6
6
|
Author-email: enigma@energinet.dk
|
|
@@ -13,9 +13,11 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.14
|
|
14
14
|
Requires-Dist: aiohttp (>=3.8.3)
|
|
15
15
|
Requires-Dist: azure-core (>=1.26.3)
|
|
16
|
+
Requires-Dist: azure-core-tracing-opentelemetry (>=1.0.0b12)
|
|
16
17
|
Requires-Dist: azure-data-tables (>=12.4.0)
|
|
17
18
|
Requires-Dist: azure-functions (>=1.12.0)
|
|
18
19
|
Requires-Dist: azure-identity (>=1.15.0)
|
|
20
|
+
Requires-Dist: azure-monitor-opentelemetry (>=1.8.4)
|
|
19
21
|
Requires-Dist: azure-monitor-opentelemetry-exporter (>=1.0.0b36)
|
|
20
22
|
Requires-Dist: azure-servicebus (>=7.8.0)
|
|
21
23
|
Requires-Dist: azure-storage-blob (>=12.14.1)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "warpzone-sdk"
|
|
3
|
-
version = "15.0.0"
|
|
3
|
+
version = "15.0.0.dev2"
|
|
4
4
|
description = "The main objective of this package is to centralize logic used to interact with Azure Functions, Azure Service Bus and Azure Table Storage"
|
|
5
5
|
authors = [{ name = "Team Enigma", email = "enigma@energinet.dk" }]
|
|
6
6
|
requires-python = ">=3.10"
|
|
@@ -23,6 +23,8 @@ azure-identity = ">=1.15.0"
|
|
|
23
23
|
azure-servicebus = ">=7.8.0"
|
|
24
24
|
azure-storage-blob = ">=12.14.1"
|
|
25
25
|
aiohttp = ">=3.8.3"
|
|
26
|
+
azure-core-tracing-opentelemetry = ">=1.0.0b12"
|
|
27
|
+
azure-monitor-opentelemetry = ">=1.8.4"
|
|
26
28
|
azure-monitor-opentelemetry-exporter = ">=1.0.0b36"
|
|
27
29
|
opentelemetry-sdk = ">=1.32.0"
|
|
28
30
|
azure-functions = ">=1.12.0"
|
|
@@ -32,51 +32,29 @@ class WarpzoneDatabaseClient:
|
|
|
32
32
|
self,
|
|
33
33
|
path: str,
|
|
34
34
|
storage_options: dict[str, str] | None = None,
|
|
35
|
-
table_prefix: str = "",
|
|
36
35
|
):
|
|
37
36
|
self.store = Store(
|
|
38
37
|
path=path,
|
|
39
38
|
storage_options=storage_options,
|
|
40
39
|
)
|
|
41
|
-
self.table_prefix = table_prefix
|
|
42
40
|
|
|
43
41
|
@classmethod
|
|
44
|
-
def
|
|
42
|
+
def from_storage_account(
|
|
45
43
|
cls,
|
|
46
44
|
storage_account: str,
|
|
47
45
|
container_name: str = "datasets",
|
|
48
|
-
sub_path: str = "",
|
|
49
|
-
table_prefix: str = "",
|
|
50
46
|
credential: (
|
|
51
47
|
AzureNamedKeyCredential | AzureSasCredential | TokenCredential
|
|
52
48
|
) = DefaultAzureCredential(),
|
|
53
49
|
):
|
|
54
|
-
"""Create a WarpzoneDatabaseClient from resource name (storage account).
|
|
55
|
-
This assumes the path of the delta lake is of the form:
|
|
56
|
-
abfss://{container_name}@{storage_account}.dfs.core.windows.net/{sub_path}
|
|
57
|
-
|
|
58
|
-
Args:
|
|
59
|
-
storage_account (str): Storage account name.
|
|
60
|
-
container_name (str, optional): Container name. Defaults to "datasets".
|
|
61
|
-
sub_path (str, optional): Sub-path within the container. Defaults to "".
|
|
62
|
-
table_prefix (str, optional): Table prefix to use (e.g. `mz_` for archive).
|
|
63
|
-
Defaults to "".
|
|
64
|
-
credential (optional): Azure credential to use.
|
|
65
|
-
Defaults to DefaultAzureCredential().
|
|
66
|
-
"""
|
|
67
50
|
path = f"abfss://{container_name}@{storage_account}.dfs.core.windows.net"
|
|
68
|
-
if sub_path:
|
|
69
|
-
path += f"/{sub_path}"
|
|
70
|
-
|
|
71
51
|
token = credential.get_token("https://storage.azure.com/.default")
|
|
72
52
|
storage_options = {
|
|
73
53
|
"account_name": storage_account,
|
|
74
54
|
"token": token.token,
|
|
75
55
|
}
|
|
76
56
|
|
|
77
|
-
return cls(
|
|
78
|
-
path=path, storage_options=storage_options, table_prefix=table_prefix
|
|
79
|
-
)
|
|
57
|
+
return cls(path=path, storage_options=storage_options)
|
|
80
58
|
|
|
81
59
|
def get_unit_and_multiple(self, timedelta: pd.Timedelta) -> tuple[str | None, int]:
|
|
82
60
|
"""
|
|
@@ -170,35 +148,7 @@ class WarpzoneDatabaseClient:
|
|
|
170
148
|
time_interval: Optional[pdz.TimeInterval] = None,
|
|
171
149
|
time_travel: Optional[pdz.TimeTravel] = None,
|
|
172
150
|
filters: Optional[dict[str, object]] = None,
|
|
173
|
-
include_validity_period_columns: bool = False,
|
|
174
|
-
include_generated_columns: bool = False,
|
|
175
151
|
) -> pd.DataFrame:
|
|
176
|
-
"""Query table.
|
|
177
|
-
Query defaults are set to match old Table Storage client behavior.
|
|
178
|
-
Time travel defaults to "as of now"
|
|
179
|
-
Validity period columns are dropped by default.
|
|
180
|
-
Generated columns are dropped by default.
|
|
181
|
-
|
|
182
|
-
Args:
|
|
183
|
-
table_name (str): Name of the table
|
|
184
|
-
time_interval (Optional[pdz.TimeInterval], optional): Time interval for the
|
|
185
|
-
query. Defaults to None.
|
|
186
|
-
time_travel (Optional[pdz.TimeTravel], optional): Time travel information.
|
|
187
|
-
Defaults to None.
|
|
188
|
-
filters (Optional[dict[str, object]], optional): Filters to apply to the
|
|
189
|
-
query.
|
|
190
|
-
Defaults to None.
|
|
191
|
-
include_validity_period_columns (bool, optional): Whether to include
|
|
192
|
-
validity period columns in the result;
|
|
193
|
-
(e.g. `valid_from_time_utc`, `valid_to_time_utc`).
|
|
194
|
-
Defaults to False.
|
|
195
|
-
include_generated_columns (bool, optional): Whether to include generated
|
|
196
|
-
columns in the result; (e.g. `valid_from_time_utc`, `valid_to_time_utc`).
|
|
197
|
-
Defaults to False.
|
|
198
|
-
|
|
199
|
-
Returns:
|
|
200
|
-
pd.DataFrame: The result of the query.
|
|
201
|
-
"""
|
|
202
152
|
# We do 'camelCaseToSnake_case' conversion here because the old
|
|
203
153
|
# naming convention used in WarpZone was CamelCase, while the new
|
|
204
154
|
# naming convention is snake_case. The goal is to remove this
|
|
@@ -241,16 +191,16 @@ class WarpzoneDatabaseClient:
|
|
|
241
191
|
for col in pd_df.select_dtypes(include=["datetime", "datetimetz"]).columns:
|
|
242
192
|
pd_df[col] = pd_df[col].dt.floor("s").dt.as_unit("ns")
|
|
243
193
|
|
|
244
|
-
#
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
194
|
+
# We remove the valid-from and valid-to columns, as well
|
|
195
|
+
# as any generated columns, as this was not present
|
|
196
|
+
# in the old solution (Azure Table Stroage)
|
|
197
|
+
generated_cols = []
|
|
198
|
+
for field in table.schema().fields:
|
|
199
|
+
if field.generated_as is not None:
|
|
200
|
+
generated_cols.append(field.column_name)
|
|
201
|
+
|
|
202
|
+
pd_df = pd_df.drop(
|
|
203
|
+
columns=["valid_from_time_utc", "valid_to_time_utc"] + generated_cols
|
|
204
|
+
)
|
|
255
205
|
|
|
256
206
|
return pd_df
|
|
@@ -62,7 +62,7 @@ def get_db_client() -> WarpzoneDatabaseClient:
|
|
|
62
62
|
|
|
63
63
|
|
|
64
64
|
def get_delta_db_client() -> WarpzoneDeltaDatabaseClient:
|
|
65
|
-
db_client = WarpzoneDeltaDatabaseClient.
|
|
65
|
+
db_client = WarpzoneDeltaDatabaseClient.from_storage_account(
|
|
66
66
|
os.environ["OPERATIONAL_DATA_STORAGE_ACCOUNT"],
|
|
67
67
|
credential=_credential,
|
|
68
68
|
)
|
|
@@ -1,34 +1,33 @@
|
|
|
1
|
-
import
|
|
1
|
+
import inspect
|
|
2
2
|
from contextlib import contextmanager
|
|
3
3
|
from typing import Callable
|
|
4
4
|
|
|
5
5
|
import azure.functions as func
|
|
6
|
+
from azure.monitor.opentelemetry import configure_azure_monitor
|
|
7
|
+
from opentelemetry import trace
|
|
6
8
|
|
|
7
9
|
from warpzone.function.types import SingleArgumentCallable
|
|
8
10
|
from warpzone.monitor import logs, traces
|
|
9
11
|
|
|
12
|
+
# Configure Azure Monitor first
|
|
13
|
+
configure_azure_monitor()
|
|
14
|
+
|
|
15
|
+
# Apply trace filtering to suppress all Azure SDK traces except Service Bus
|
|
16
|
+
tracer_provider = trace.get_tracer_provider()
|
|
17
|
+
if hasattr(tracer_provider, "_active_span_processor"):
|
|
18
|
+
# Wrap the existing span processor with our filter
|
|
19
|
+
original_processor = tracer_provider._active_span_processor
|
|
20
|
+
filtered_processor = traces.AzureSDKTraceFilter(original_processor)
|
|
21
|
+
tracer_provider._active_span_processor = filtered_processor
|
|
22
|
+
|
|
10
23
|
SUBJECT_IDENTIFIER = "<Subject>"
|
|
11
24
|
|
|
12
25
|
tracer = traces.get_tracer(__name__)
|
|
13
26
|
logger = logs.get_logger(__name__)
|
|
14
27
|
|
|
15
28
|
|
|
16
|
-
def configure_monitoring():
|
|
17
|
-
"""
|
|
18
|
-
Configure logging and tracing on Azure Function to
|
|
19
|
-
- export telemetry to App Insights
|
|
20
|
-
"""
|
|
21
|
-
# configure tracer provider
|
|
22
|
-
traces.configure_tracing()
|
|
23
|
-
|
|
24
|
-
# configure logger provider
|
|
25
|
-
logs.configure_logging()
|
|
26
|
-
|
|
27
|
-
|
|
28
29
|
@contextmanager
|
|
29
30
|
def run_in_trace_context(context: func.Context):
|
|
30
|
-
configure_monitoring()
|
|
31
|
-
|
|
32
31
|
trace_context = context.trace_context
|
|
33
32
|
with traces.set_trace_context(
|
|
34
33
|
trace_context.trace_parent, trace_context.trace_state
|
|
@@ -65,7 +64,7 @@ def monitor(main: SingleArgumentCallable) -> Callable:
|
|
|
65
64
|
result = main(arg)
|
|
66
65
|
return result
|
|
67
66
|
|
|
68
|
-
if
|
|
67
|
+
if inspect.iscoroutinefunction(main):
|
|
69
68
|
return wrapper_async
|
|
70
69
|
else:
|
|
71
70
|
return wrapper
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
# Suppress verbose logging from Azure SDK
|
|
4
|
+
logging.getLogger("azure.core.pipeline.policies.http_logging_policy").setLevel(
|
|
5
|
+
logging.WARNING
|
|
6
|
+
)
|
|
7
|
+
logging.getLogger("azure.data.tables").setLevel(logging.WARNING)
|
|
8
|
+
logging.getLogger("azure.storage.blob").setLevel(logging.WARNING)
|
|
9
|
+
logging.getLogger("azure.servicebus").setLevel(logging.WARNING)
|
|
10
|
+
logging.getLogger("uamqp").setLevel(logging.WARNING)
|
|
11
|
+
logging.getLogger("azure.identity").setLevel(logging.WARNING)
|
|
12
|
+
# Suppress Azure Functions host/worker logging (e.g., trigger details)
|
|
13
|
+
logging.getLogger("azure_functions_worker").setLevel(logging.WARNING)
|
|
14
|
+
logging.getLogger("azure.functions").setLevel(logging.WARNING)
|
|
15
|
+
# Suppress Azure Monitor exporter logging (e.g., transmission success messages)
|
|
16
|
+
logging.getLogger("azure.monitor.opentelemetry.exporter").setLevel(logging.WARNING)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def get_logger(name: str):
|
|
20
|
+
# set up standard logger
|
|
21
|
+
logger = logging.getLogger(name)
|
|
22
|
+
logger.setLevel(logging.INFO)
|
|
23
|
+
|
|
24
|
+
return logger
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
from contextlib import contextmanager
|
|
2
|
+
|
|
3
|
+
from azure.core.settings import settings
|
|
4
|
+
from opentelemetry import context, trace
|
|
5
|
+
from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor
|
|
6
|
+
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
|
7
|
+
|
|
8
|
+
settings.tracing_implementation = "opentelemetry"
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AzureSDKTraceFilter(SpanProcessor):
|
|
12
|
+
"""Custom SpanProcessor to filter out Azure SDK traces except Service Bus.
|
|
13
|
+
|
|
14
|
+
It drops spans from Azure SDK libraries except Service Bus messages,
|
|
15
|
+
preventing them from being exported to Azure Monitor.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, wrapped_processor: SpanProcessor):
|
|
19
|
+
"""Initialize with the actual processor to wrap.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
wrapped_processor: The underlying processor (e.g., BatchSpanProcessor)
|
|
23
|
+
"""
|
|
24
|
+
self.wrapped_processor = wrapped_processor
|
|
25
|
+
|
|
26
|
+
def on_start(
|
|
27
|
+
self, span: ReadableSpan, parent_context: context.Context = None
|
|
28
|
+
) -> None:
|
|
29
|
+
"""Called when a span is started."""
|
|
30
|
+
self.wrapped_processor.on_start(span, parent_context)
|
|
31
|
+
|
|
32
|
+
def on_end(self, span: ReadableSpan) -> None:
|
|
33
|
+
"""Called when a span is ended. Filter based on span attributes."""
|
|
34
|
+
# Check if service bus span - always allow
|
|
35
|
+
if "servicebus.message" in span.name.lower():
|
|
36
|
+
self.wrapped_processor.on_end(span)
|
|
37
|
+
return
|
|
38
|
+
|
|
39
|
+
# Check if this is an Azure SDK span we want to suppress
|
|
40
|
+
instrumentation_scope = span.instrumentation_scope
|
|
41
|
+
if instrumentation_scope and instrumentation_scope.name:
|
|
42
|
+
# Suppress spans from Azure SDK libraries
|
|
43
|
+
if instrumentation_scope.name.startswith("azure."):
|
|
44
|
+
return # Drop this span
|
|
45
|
+
|
|
46
|
+
# Pass through all other spans
|
|
47
|
+
self.wrapped_processor.on_end(span)
|
|
48
|
+
|
|
49
|
+
def shutdown(self) -> None:
|
|
50
|
+
"""Shutdown the wrapped processor."""
|
|
51
|
+
self.wrapped_processor.shutdown()
|
|
52
|
+
|
|
53
|
+
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
|
54
|
+
"""Force flush the wrapped processor."""
|
|
55
|
+
return self.wrapped_processor.force_flush(timeout_millis)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@contextmanager
|
|
59
|
+
def set_trace_context(trace_parent: str, trace_state: str = ""):
|
|
60
|
+
"""Context manager for setting the trace context
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
trace_parent (str): Trace parent ID
|
|
64
|
+
trace_state (str, optional): Trace state. Defaults to "".
|
|
65
|
+
"""
|
|
66
|
+
carrier = {"traceparent": trace_parent, "tracestate": trace_state}
|
|
67
|
+
ctx = TraceContextTextMapPropagator().extract(carrier=carrier)
|
|
68
|
+
|
|
69
|
+
token = context.attach(ctx) # attach context before run
|
|
70
|
+
try:
|
|
71
|
+
yield
|
|
72
|
+
finally:
|
|
73
|
+
context.detach(token) # detach context after run
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def get_tracer(name: str):
|
|
77
|
+
tracer = trace.get_tracer(name)
|
|
78
|
+
return tracer
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def get_current_diagnostic_id() -> str:
|
|
82
|
+
"""Gets diagnostic id from current span
|
|
83
|
+
|
|
84
|
+
The diagnostic id is a concatenation of operation-id and parent-id
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
str: diagnostic id
|
|
88
|
+
"""
|
|
89
|
+
span = trace.get_current_span()
|
|
90
|
+
|
|
91
|
+
if not span.is_recording():
|
|
92
|
+
return ""
|
|
93
|
+
|
|
94
|
+
operation_id = "{:016x}".format(span.context.trace_id)
|
|
95
|
+
parent_id = "{:016x}".format(span.context.span_id)
|
|
96
|
+
|
|
97
|
+
diagnostic_id = f"00-{operation_id}-{parent_id}-01"
|
|
98
|
+
|
|
99
|
+
return diagnostic_id
|
|
@@ -125,20 +125,20 @@ class WarpzoneEventClient:
|
|
|
125
125
|
):
|
|
126
126
|
typeguard.check_type(value=topic, expected_type=Topic)
|
|
127
127
|
topic_name = topic.value
|
|
128
|
-
with traces.servicebus_send_span(event_msg.subject):
|
|
129
|
-
diagnostic_id = traces.get_current_diagnostic_id()
|
|
130
|
-
|
|
131
|
-
az_sdk_msg = ServiceBusMessage(
|
|
132
|
-
body=json.dumps(event_msg.event),
|
|
133
|
-
subject=event_msg.subject,
|
|
134
|
-
content_type="application/json",
|
|
135
|
-
message_id=event_msg.message_id,
|
|
136
|
-
application_properties={"Diagnostic-Id": diagnostic_id},
|
|
137
|
-
time_to_live=event_msg.time_to_live,
|
|
138
|
-
)
|
|
139
128
|
|
|
140
|
-
|
|
141
|
-
|
|
129
|
+
diagnostic_id = traces.get_current_diagnostic_id()
|
|
130
|
+
|
|
131
|
+
az_sdk_msg = ServiceBusMessage(
|
|
132
|
+
body=json.dumps(event_msg.event),
|
|
133
|
+
subject=event_msg.subject,
|
|
134
|
+
content_type="application/json",
|
|
135
|
+
message_id=event_msg.message_id,
|
|
136
|
+
application_properties={"Diagnostic-Id": diagnostic_id},
|
|
137
|
+
time_to_live=event_msg.time_to_live,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
with self._get_topic_sender(topic_name) as sender:
|
|
141
|
+
sender.send_messages(message=az_sdk_msg)
|
|
142
142
|
|
|
143
143
|
def list_subscriptions(
|
|
144
144
|
self,
|
|
@@ -12,10 +12,13 @@ from azure.identity import DefaultAzureCredential
|
|
|
12
12
|
|
|
13
13
|
from warpzone.blobstorage.client import WarpzoneBlobClient
|
|
14
14
|
from warpzone.healthchecks import HealthCheckResult, check_health_of
|
|
15
|
+
from warpzone.monitor import traces
|
|
15
16
|
from warpzone.tablestorage.db import base_client
|
|
16
17
|
from warpzone.tablestorage.db.table_config import DataType, TableMetadata
|
|
17
18
|
from warpzone.tablestorage.tables.client import WarpzoneTableClient
|
|
18
19
|
|
|
20
|
+
tracer = traces.get_tracer(__name__)
|
|
21
|
+
|
|
19
22
|
|
|
20
23
|
class WarpzoneDatabaseClient:
|
|
21
24
|
"""Class to interact with Azure Table Storage for database queries
|
|
@@ -157,22 +160,31 @@ class WarpzoneDatabaseClient:
|
|
|
157
160
|
filters: Optional[dict[str, object]] = None,
|
|
158
161
|
use_cache: Optional[bool] = True,
|
|
159
162
|
) -> pd.DataFrame:
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
163
|
+
with tracer.start_as_current_span(
|
|
164
|
+
"WarpzoneDatabaseClient.query",
|
|
165
|
+
attributes={
|
|
166
|
+
"table_name": table_name,
|
|
167
|
+
"use_cache": use_cache,
|
|
168
|
+
"has_filters": filters is not None,
|
|
169
|
+
"has_time_interval": time_interval is not None,
|
|
170
|
+
},
|
|
171
|
+
):
|
|
172
|
+
table_metadata = self.get_table_metadata(table_name)
|
|
173
|
+
|
|
174
|
+
match table_metadata.data_type:
|
|
175
|
+
case DataType.TIME_SERIES:
|
|
176
|
+
df = self._query_time_series(
|
|
177
|
+
table_metadata, time_interval, filters, use_cache
|
|
172
178
|
)
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
179
|
+
case _:
|
|
180
|
+
if time_interval:
|
|
181
|
+
raise ValueError(
|
|
182
|
+
f"Table {table_name} is not a time series table,"
|
|
183
|
+
" and cannot be queried with a time interval."
|
|
184
|
+
)
|
|
185
|
+
df = self._query_generic(table_metadata, None, filters, use_cache)
|
|
186
|
+
|
|
187
|
+
return df
|
|
176
188
|
|
|
177
189
|
def list_tables(self):
|
|
178
190
|
return self._table_client.list_tables()
|
|
@@ -1,87 +0,0 @@
|
|
|
1
|
-
# NOTE: OpenTelemetry logging to Azure is still in EXPERIMENTAL mode!
|
|
2
|
-
import logging
|
|
3
|
-
import os
|
|
4
|
-
import threading
|
|
5
|
-
from logging import StreamHandler
|
|
6
|
-
|
|
7
|
-
from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter
|
|
8
|
-
from opentelemetry import _logs as logs
|
|
9
|
-
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
|
|
10
|
-
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
|
|
11
|
-
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
|
12
|
-
|
|
13
|
-
logger = logging.getLogger(__name__)
|
|
14
|
-
logger.addHandler(StreamHandler())
|
|
15
|
-
|
|
16
|
-
# Suppress verbose logging from Azure SDK and infrastructure
|
|
17
|
-
_NOISY_LOGGERS = [
|
|
18
|
-
"azure.core.pipeline.policies.http_logging_policy",
|
|
19
|
-
"azure.data.tables",
|
|
20
|
-
"azure.storage.blob",
|
|
21
|
-
"azure.servicebus",
|
|
22
|
-
"azure.identity",
|
|
23
|
-
"azure.monitor.opentelemetry.exporter",
|
|
24
|
-
"azure_functions_worker",
|
|
25
|
-
"azure.functions",
|
|
26
|
-
"uamqp",
|
|
27
|
-
]
|
|
28
|
-
for _logger_name in _NOISY_LOGGERS:
|
|
29
|
-
logging.getLogger(_logger_name).setLevel(logging.WARNING)
|
|
30
|
-
|
|
31
|
-
_LOGGING_LOCK = threading.Lock()
|
|
32
|
-
LOGGING_IS_CONFIGURED = False
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
def configure_logging():
|
|
36
|
-
global LOGGING_IS_CONFIGURED
|
|
37
|
-
# Add thread locking to avoid race conditions during setup
|
|
38
|
-
with _LOGGING_LOCK:
|
|
39
|
-
if LOGGING_IS_CONFIGURED:
|
|
40
|
-
# logging should only be set up once
|
|
41
|
-
# to avoid duplicated log handling.
|
|
42
|
-
# Global variables is the pattern used
|
|
43
|
-
# by opentelemetry, so we use the same
|
|
44
|
-
return
|
|
45
|
-
|
|
46
|
-
# set up logger provider based on the Azure Function resource
|
|
47
|
-
# (this is make sure App Insights can track the log source correctly)
|
|
48
|
-
# (https://learn.microsoft.com/en-us/azure/azure-monitor/app/opentelemetry-enable?tabs=net#set-the-cloud-role-name-and-the-cloud-role-instance)
|
|
49
|
-
service_name = os.getenv("WEBSITE_SITE_NAME") or "unknown-service"
|
|
50
|
-
resource = Resource.create({SERVICE_NAME: service_name})
|
|
51
|
-
logs.set_logger_provider(
|
|
52
|
-
LoggerProvider(
|
|
53
|
-
resource=resource,
|
|
54
|
-
)
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
# setup azure monitor log exporter to send telemetry to App Insights
|
|
58
|
-
try:
|
|
59
|
-
log_exporter = AzureMonitorLogExporter()
|
|
60
|
-
except ValueError:
|
|
61
|
-
logger.warning(
|
|
62
|
-
"Cant set up logging to App Insights,"
|
|
63
|
-
" as no connection string is set."
|
|
64
|
-
)
|
|
65
|
-
else:
|
|
66
|
-
log_record_processor = BatchLogRecordProcessor(log_exporter)
|
|
67
|
-
logs.get_logger_provider().add_log_record_processor(log_record_processor)
|
|
68
|
-
|
|
69
|
-
LOGGING_IS_CONFIGURED = True
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def get_logger(name: str):
|
|
73
|
-
# set up standard logger
|
|
74
|
-
logger = logging.getLogger(name)
|
|
75
|
-
logger.setLevel(logging.INFO)
|
|
76
|
-
|
|
77
|
-
# Check if OTEL handler is already added to this specific logger
|
|
78
|
-
# (not using hasHandlers() as it also checks parent/root handlers)
|
|
79
|
-
has_otel_handler = any(isinstance(h, LoggingHandler) for h in logger.handlers)
|
|
80
|
-
if not has_otel_handler:
|
|
81
|
-
# add OTEL handler for trace correlation
|
|
82
|
-
handler = LoggingHandler()
|
|
83
|
-
logger.addHandler(handler)
|
|
84
|
-
# Don't propagate to root logger to avoid duplicate logs
|
|
85
|
-
logger.propagate = False
|
|
86
|
-
|
|
87
|
-
return logger
|
|
@@ -1,128 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import os
|
|
3
|
-
import threading
|
|
4
|
-
from contextlib import contextmanager
|
|
5
|
-
from logging import StreamHandler
|
|
6
|
-
|
|
7
|
-
from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter
|
|
8
|
-
from opentelemetry import context, trace
|
|
9
|
-
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
|
10
|
-
from opentelemetry.sdk.trace import TracerProvider
|
|
11
|
-
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
12
|
-
from opentelemetry.sdk.trace.sampling import ALWAYS_ON
|
|
13
|
-
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
|
14
|
-
|
|
15
|
-
logger = logging.getLogger(__name__)
|
|
16
|
-
logger.addHandler(StreamHandler())
|
|
17
|
-
|
|
18
|
-
tracer = trace.get_tracer(__name__)
|
|
19
|
-
|
|
20
|
-
_TRACING_LOCK = threading.Lock()
|
|
21
|
-
TRACING_IS_CONFIGURED = False
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def configure_tracing():
|
|
25
|
-
global TRACING_IS_CONFIGURED
|
|
26
|
-
# Add thread locking to avoid race conditions during setup
|
|
27
|
-
with _TRACING_LOCK:
|
|
28
|
-
if TRACING_IS_CONFIGURED:
|
|
29
|
-
# tracing should only be set up once
|
|
30
|
-
# to avoid duplicated trace handling.
|
|
31
|
-
# Global variables is the pattern used
|
|
32
|
-
# by opentelemetry, so we use the same
|
|
33
|
-
return
|
|
34
|
-
|
|
35
|
-
# set up tracer provider based on the Azure Function resource
|
|
36
|
-
# (this is make sure App Insights can track the trace source correctly)
|
|
37
|
-
# (https://learn.microsoft.com/en-us/azure/azure-monitor/app/opentelemetry-enable?tabs=net#set-the-cloud-role-name-and-the-cloud-role-instance).
|
|
38
|
-
# We use the ALWAYS ON sampler since otherwise spans will not be
|
|
39
|
-
# recording upon creation
|
|
40
|
-
# (https://anecdotes.dev/opentelemetry-on-google-cloud-unraveling-the-mystery-f61f044c18be)
|
|
41
|
-
service_name = os.getenv("WEBSITE_SITE_NAME") or "unknown-service"
|
|
42
|
-
resource = Resource.create({SERVICE_NAME: service_name})
|
|
43
|
-
trace.set_tracer_provider(
|
|
44
|
-
TracerProvider(
|
|
45
|
-
sampler=ALWAYS_ON,
|
|
46
|
-
resource=resource,
|
|
47
|
-
)
|
|
48
|
-
)
|
|
49
|
-
|
|
50
|
-
# setup azure monitor trace exporter to send telemetry to App Insights
|
|
51
|
-
try:
|
|
52
|
-
trace_exporter = AzureMonitorTraceExporter()
|
|
53
|
-
except ValueError:
|
|
54
|
-
logger.warning(
|
|
55
|
-
"Cant set up tracing to App Insights,"
|
|
56
|
-
" as no connection string is set."
|
|
57
|
-
)
|
|
58
|
-
else:
|
|
59
|
-
span_processor = BatchSpanProcessor(trace_exporter)
|
|
60
|
-
trace.get_tracer_provider().add_span_processor(span_processor)
|
|
61
|
-
|
|
62
|
-
TRACING_IS_CONFIGURED = True
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
@contextmanager
|
|
66
|
-
def set_trace_context(trace_parent: str, trace_state: str = ""):
|
|
67
|
-
"""Context manager for setting the trace context
|
|
68
|
-
|
|
69
|
-
Args:
|
|
70
|
-
trace_parent (str): Trace parent ID
|
|
71
|
-
trace_state (str, optional): Trace state. Defaults to "".
|
|
72
|
-
"""
|
|
73
|
-
carrier = {"traceparent": trace_parent, "tracestate": trace_state}
|
|
74
|
-
ctx = TraceContextTextMapPropagator().extract(carrier=carrier)
|
|
75
|
-
|
|
76
|
-
token = context.attach(ctx) # attach context before run
|
|
77
|
-
try:
|
|
78
|
-
yield
|
|
79
|
-
finally:
|
|
80
|
-
context.detach(token) # detach context after run
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
def get_tracer(name: str):
|
|
84
|
-
tracer = trace.get_tracer(name)
|
|
85
|
-
return tracer
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def get_current_diagnostic_id() -> str:
|
|
89
|
-
"""Gets diagnostic id from current span
|
|
90
|
-
|
|
91
|
-
The diagnostic id is a concatenation of operation-id and parent-id
|
|
92
|
-
|
|
93
|
-
Returns:
|
|
94
|
-
str: diagnostic id
|
|
95
|
-
"""
|
|
96
|
-
span = trace.get_current_span()
|
|
97
|
-
|
|
98
|
-
if not span.is_recording():
|
|
99
|
-
return ""
|
|
100
|
-
|
|
101
|
-
operation_id = "{:016x}".format(span.context.trace_id)
|
|
102
|
-
parent_id = "{:016x}".format(span.context.span_id)
|
|
103
|
-
|
|
104
|
-
diagnostic_id = f"00-{operation_id}-{parent_id}-01"
|
|
105
|
-
|
|
106
|
-
return diagnostic_id
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
# Service Bus trace constants (these were removed from azure-servicebus SDK)
|
|
110
|
-
_SB_TRACE_NAMESPACE = "Microsoft.ServiceBus"
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
@contextmanager
|
|
114
|
-
def servicebus_send_span(subject: str) -> trace.Span:
|
|
115
|
-
"""Start span for Service Bus message tracing.
|
|
116
|
-
|
|
117
|
-
Args:
|
|
118
|
-
subject: The message subject (used as span name for easy identification)
|
|
119
|
-
|
|
120
|
-
Yields:
|
|
121
|
-
trace.Span: the span
|
|
122
|
-
"""
|
|
123
|
-
with tracer.start_as_current_span(
|
|
124
|
-
subject, kind=trace.SpanKind.PRODUCER
|
|
125
|
-
) as msg_span:
|
|
126
|
-
msg_span.set_attributes({"az.namespace": _SB_TRACE_NAMESPACE})
|
|
127
|
-
|
|
128
|
-
yield msg_span
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{warpzone_sdk-15.0.0 → warpzone_sdk-15.0.0.dev2}/warpzone/function/processors/dependencies.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|