warpzone-sdk 14.2.0__tar.gz → 15.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/PKG-INFO +2 -3
  2. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/pyproject.toml +2 -3
  3. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/db/client.py +63 -13
  4. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/integrations.py +1 -1
  5. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/monitor.py +0 -10
  6. warpzone_sdk-15.0.0/warpzone/monitor/__init__.py +2 -0
  7. warpzone_sdk-15.0.0/warpzone/monitor/logs.py +87 -0
  8. warpzone_sdk-15.0.0/warpzone/monitor/traces.py +128 -0
  9. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/servicebus/events/client.py +4 -6
  10. warpzone_sdk-14.2.0/warpzone/monitor/__init__.py +0 -2
  11. warpzone_sdk-14.2.0/warpzone/monitor/logs.py +0 -63
  12. warpzone_sdk-14.2.0/warpzone/monitor/traces.py +0 -136
  13. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/README.md +0 -0
  14. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/__init__.py +0 -0
  15. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/blobstorage/__init__.py +0 -0
  16. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/blobstorage/client.py +0 -0
  17. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/db/__init__.py +0 -0
  18. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/deltastorage/__init__.py +0 -0
  19. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/deltastorage/data_types.py +0 -0
  20. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/deltastorage/generated_columns.py +0 -0
  21. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/deltastorage/lock_client.py +0 -0
  22. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/deltastorage/schema.py +0 -0
  23. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/deltastorage/slicing.py +0 -0
  24. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/deltastorage/store.py +0 -0
  25. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/deltastorage/table.py +0 -0
  26. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/enums/__init__.py +0 -0
  27. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/enums/topicenum.py +0 -0
  28. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/__init__.py +0 -0
  29. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/checks.py +0 -0
  30. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/functionize.py +0 -0
  31. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/process.py +0 -0
  32. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/processors/__init__.py +0 -0
  33. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/processors/dependencies.py +0 -0
  34. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/processors/outputs.py +0 -0
  35. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/processors/triggers.py +0 -0
  36. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/signature.py +0 -0
  37. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/function/types.py +0 -0
  38. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/healthchecks/__init__.py +0 -0
  39. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/healthchecks/model.py +0 -0
  40. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/servicebus/data/__init__.py +0 -0
  41. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/servicebus/data/client.py +0 -0
  42. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/servicebus/events/__init__.py +0 -0
  43. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/servicebus/events/triggers.py +0 -0
  44. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tablestorage/db/__init__.py +0 -0
  45. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tablestorage/db/base_client.py +0 -0
  46. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tablestorage/db/client.py +0 -0
  47. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tablestorage/db/table_config.py +0 -0
  48. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tablestorage/tables/__init__.py +0 -0
  49. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tablestorage/tables/client.py +0 -0
  50. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tablestorage/tables/entities.py +0 -0
  51. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tablestorage/tables/helpers.py +0 -0
  52. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/testing/__init__.py +0 -0
  53. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/testing/assertions.py +0 -0
  54. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/testing/data.py +0 -0
  55. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/testing/matchers.py +0 -0
  56. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tools/__init__.py +0 -0
  57. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/tools/copy.py +0 -0
  58. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/transform/__init__.py +0 -0
  59. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/transform/data.py +0 -0
  60. {warpzone_sdk-14.2.0 → warpzone_sdk-15.0.0}/warpzone/transform/schema.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: warpzone-sdk
3
- Version: 14.2.0
3
+ Version: 15.0.0
4
4
  Summary: The main objective of this package is to centralize logic used to interact with Azure Functions, Azure Service Bus and Azure Table Storage
5
5
  Author: Team Enigma
6
6
  Author-email: enigma@energinet.dk
@@ -13,12 +13,11 @@ Classifier: Programming Language :: Python :: 3.13
13
13
  Classifier: Programming Language :: Python :: 3.14
14
14
  Requires-Dist: aiohttp (>=3.8.3)
15
15
  Requires-Dist: azure-core (>=1.26.3)
16
- Requires-Dist: azure-core-tracing-opentelemetry (>=1.0.0b12)
17
16
  Requires-Dist: azure-data-tables (>=12.4.0)
18
17
  Requires-Dist: azure-functions (>=1.12.0)
19
18
  Requires-Dist: azure-identity (>=1.15.0)
20
19
  Requires-Dist: azure-monitor-opentelemetry-exporter (>=1.0.0b36)
21
- Requires-Dist: azure-servicebus (>=7.8.0,<7.9.0)
20
+ Requires-Dist: azure-servicebus (>=7.8.0)
22
21
  Requires-Dist: azure-storage-blob (>=12.14.1)
23
22
  Requires-Dist: cryptography (==43.0.3)
24
23
  Requires-Dist: datamazing (>=5.1.6)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "warpzone-sdk"
3
- version = "14.2.0"
3
+ version = "15.0.0"
4
4
  description = "The main objective of this package is to centralize logic used to interact with Azure Functions, Azure Service Bus and Azure Table Storage"
5
5
  authors = [{ name = "Team Enigma", email = "enigma@energinet.dk" }]
6
6
  requires-python = ">=3.10"
@@ -20,10 +20,9 @@ datamazing = ">=5.1.6"
20
20
  azure-core = ">=1.26.3"
21
21
  azure-data-tables = ">=12.4.0"
22
22
  azure-identity = ">=1.15.0"
23
- azure-servicebus = "~7.8.0" # pin to avoid breaking changes with tracing in 7.9.0
23
+ azure-servicebus = ">=7.8.0"
24
24
  azure-storage-blob = ">=12.14.1"
25
25
  aiohttp = ">=3.8.3"
26
- azure-core-tracing-opentelemetry = ">=1.0.0b12"
27
26
  azure-monitor-opentelemetry-exporter = ">=1.0.0b36"
28
27
  opentelemetry-sdk = ">=1.32.0"
29
28
  azure-functions = ">=1.12.0"
@@ -32,29 +32,51 @@ class WarpzoneDatabaseClient:
32
32
  self,
33
33
  path: str,
34
34
  storage_options: dict[str, str] | None = None,
35
+ table_prefix: str = "",
35
36
  ):
36
37
  self.store = Store(
37
38
  path=path,
38
39
  storage_options=storage_options,
39
40
  )
41
+ self.table_prefix = table_prefix
40
42
 
41
43
  @classmethod
42
- def from_storage_account(
44
+ def from_resource_name(
43
45
  cls,
44
46
  storage_account: str,
45
47
  container_name: str = "datasets",
48
+ sub_path: str = "",
49
+ table_prefix: str = "",
46
50
  credential: (
47
51
  AzureNamedKeyCredential | AzureSasCredential | TokenCredential
48
52
  ) = DefaultAzureCredential(),
49
53
  ):
54
+ """Create a WarpzoneDatabaseClient from resource name (storage account).
55
+ This assumes the path of the delta lake is of the form:
56
+ abfss://{container_name}@{storage_account}.dfs.core.windows.net/{sub_path}
57
+
58
+ Args:
59
+ storage_account (str): Storage account name.
60
+ container_name (str, optional): Container name. Defaults to "datasets".
61
+ sub_path (str, optional): Sub-path within the container. Defaults to "".
62
+ table_prefix (str, optional): Table prefix to use (e.g. `mz_` for archive).
63
+ Defaults to "".
64
+ credential (optional): Azure credential to use.
65
+ Defaults to DefaultAzureCredential().
66
+ """
50
67
  path = f"abfss://{container_name}@{storage_account}.dfs.core.windows.net"
68
+ if sub_path:
69
+ path += f"/{sub_path}"
70
+
51
71
  token = credential.get_token("https://storage.azure.com/.default")
52
72
  storage_options = {
53
73
  "account_name": storage_account,
54
74
  "token": token.token,
55
75
  }
56
76
 
57
- return cls(path=path, storage_options=storage_options)
77
+ return cls(
78
+ path=path, storage_options=storage_options, table_prefix=table_prefix
79
+ )
58
80
 
59
81
  def get_unit_and_multiple(self, timedelta: pd.Timedelta) -> tuple[str | None, int]:
60
82
  """
@@ -148,7 +170,35 @@ class WarpzoneDatabaseClient:
148
170
  time_interval: Optional[pdz.TimeInterval] = None,
149
171
  time_travel: Optional[pdz.TimeTravel] = None,
150
172
  filters: Optional[dict[str, object]] = None,
173
+ include_validity_period_columns: bool = False,
174
+ include_generated_columns: bool = False,
151
175
  ) -> pd.DataFrame:
176
+ """Query table.
177
+ Query defaults are set to match old Table Storage client behavior.
178
+ Time travel defaults to "as of now"
179
+ Validity period columns are dropped by default.
180
+ Generated columns are dropped by default.
181
+
182
+ Args:
183
+ table_name (str): Name of the table
184
+ time_interval (Optional[pdz.TimeInterval], optional): Time interval for the
185
+ query. Defaults to None.
186
+ time_travel (Optional[pdz.TimeTravel], optional): Time travel information.
187
+ Defaults to None.
188
+ filters (Optional[dict[str, object]], optional): Filters to apply to the
189
+ query.
190
+ Defaults to None.
191
+ include_validity_period_columns (bool, optional): Whether to include
192
+ validity period columns in the result;
193
+ (e.g. `valid_from_time_utc`, `valid_to_time_utc`).
194
+ Defaults to False.
195
+ include_generated_columns (bool, optional): Whether to include generated
196
+ columns in the result; (e.g. `valid_from_time_utc`, `valid_to_time_utc`).
197
+ Defaults to False.
198
+
199
+ Returns:
200
+ pd.DataFrame: The result of the query.
201
+ """
152
202
  # We do 'camelCaseToSnake_case' conversion here because the old
153
203
  # naming convention used in WarpZone was CamelCase, while the new
154
204
  # naming convention is snake_case. The goal is to remove this
@@ -191,16 +241,16 @@ class WarpzoneDatabaseClient:
191
241
  for col in pd_df.select_dtypes(include=["datetime", "datetimetz"]).columns:
192
242
  pd_df[col] = pd_df[col].dt.floor("s").dt.as_unit("ns")
193
243
 
194
- # We remove the valid-from and valid-to columns, as well
195
- # as any generated columns, as this was not present
196
- # in the old solution (Azure Table Stroage)
197
- generated_cols = []
198
- for field in table.schema().fields:
199
- if field.generated_as is not None:
200
- generated_cols.append(field.column_name)
201
-
202
- pd_df = pd_df.drop(
203
- columns=["valid_from_time_utc", "valid_to_time_utc"] + generated_cols
204
- )
244
+ # Drop generated columns
245
+ if not include_generated_columns:
246
+ generated_cols = []
247
+ for field in table.schema().fields:
248
+ if field.generated_as is not None:
249
+ generated_cols.append(field.column_name)
250
+ pd_df = pd_df.drop(columns=generated_cols)
251
+
252
+ # Drop valid-from/to columns
253
+ if not include_validity_period_columns:
254
+ pd_df = pd_df.drop(columns=["valid_from_time_utc", "valid_to_time_utc"])
205
255
 
206
256
  return pd_df
@@ -62,7 +62,7 @@ def get_db_client() -> WarpzoneDatabaseClient:
62
62
 
63
63
 
64
64
  def get_delta_db_client() -> WarpzoneDeltaDatabaseClient:
65
- db_client = WarpzoneDeltaDatabaseClient.from_storage_account(
65
+ db_client = WarpzoneDeltaDatabaseClient.from_resource_name(
66
66
  os.environ["OPERATIONAL_DATA_STORAGE_ACCOUNT"],
67
67
  credential=_credential,
68
68
  )
@@ -1,5 +1,4 @@
1
1
  import asyncio
2
- import logging
3
2
  from contextlib import contextmanager
4
3
  from typing import Callable
5
4
 
@@ -18,16 +17,7 @@ def configure_monitoring():
18
17
  """
19
18
  Configure logging and tracing on Azure Function to
20
19
  - export telemetry to App Insights
21
- - suppress spamming logs
22
20
  """
23
- # disable logging for HTTP calls to avoid log spamming
24
- logging.getLogger("azure.core.pipeline.policies.http_logging_policy").setLevel(
25
- logging.WARNING
26
- )
27
-
28
- # disable logging for Service Bus underlying uAMQP library to avoid log spamming
29
- logging.getLogger("uamqp").setLevel(logging.WARNING)
30
-
31
21
  # configure tracer provider
32
22
  traces.configure_tracing()
33
23
 
@@ -0,0 +1,2 @@
1
+ from .logs import get_logger
2
+ from .traces import get_current_diagnostic_id, get_tracer, servicebus_send_span
@@ -0,0 +1,87 @@
1
+ # NOTE: OpenTelemetry logging to Azure is still in EXPERIMENTAL mode!
2
+ import logging
3
+ import os
4
+ import threading
5
+ from logging import StreamHandler
6
+
7
+ from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter
8
+ from opentelemetry import _logs as logs
9
+ from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
10
+ from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
11
+ from opentelemetry.sdk.resources import SERVICE_NAME, Resource
12
+
13
+ logger = logging.getLogger(__name__)
14
+ logger.addHandler(StreamHandler())
15
+
16
+ # Suppress verbose logging from Azure SDK and infrastructure
17
+ _NOISY_LOGGERS = [
18
+ "azure.core.pipeline.policies.http_logging_policy",
19
+ "azure.data.tables",
20
+ "azure.storage.blob",
21
+ "azure.servicebus",
22
+ "azure.identity",
23
+ "azure.monitor.opentelemetry.exporter",
24
+ "azure_functions_worker",
25
+ "azure.functions",
26
+ "uamqp",
27
+ ]
28
+ for _logger_name in _NOISY_LOGGERS:
29
+ logging.getLogger(_logger_name).setLevel(logging.WARNING)
30
+
31
+ _LOGGING_LOCK = threading.Lock()
32
+ LOGGING_IS_CONFIGURED = False
33
+
34
+
35
+ def configure_logging():
36
+ global LOGGING_IS_CONFIGURED
37
+ # Add thread locking to avoid race conditions during setup
38
+ with _LOGGING_LOCK:
39
+ if LOGGING_IS_CONFIGURED:
40
+ # logging should only be set up once
41
+ # to avoid duplicated log handling.
42
+ # Global variables is the pattern used
43
+ # by opentelemetry, so we use the same
44
+ return
45
+
46
+ # set up logger provider based on the Azure Function resource
47
+ # (this is make sure App Insights can track the log source correctly)
48
+ # (https://learn.microsoft.com/en-us/azure/azure-monitor/app/opentelemetry-enable?tabs=net#set-the-cloud-role-name-and-the-cloud-role-instance)
49
+ service_name = os.getenv("WEBSITE_SITE_NAME") or "unknown-service"
50
+ resource = Resource.create({SERVICE_NAME: service_name})
51
+ logs.set_logger_provider(
52
+ LoggerProvider(
53
+ resource=resource,
54
+ )
55
+ )
56
+
57
+ # setup azure monitor log exporter to send telemetry to App Insights
58
+ try:
59
+ log_exporter = AzureMonitorLogExporter()
60
+ except ValueError:
61
+ logger.warning(
62
+ "Cant set up logging to App Insights,"
63
+ " as no connection string is set."
64
+ )
65
+ else:
66
+ log_record_processor = BatchLogRecordProcessor(log_exporter)
67
+ logs.get_logger_provider().add_log_record_processor(log_record_processor)
68
+
69
+ LOGGING_IS_CONFIGURED = True
70
+
71
+
72
+ def get_logger(name: str):
73
+ # set up standard logger
74
+ logger = logging.getLogger(name)
75
+ logger.setLevel(logging.INFO)
76
+
77
+ # Check if OTEL handler is already added to this specific logger
78
+ # (not using hasHandlers() as it also checks parent/root handlers)
79
+ has_otel_handler = any(isinstance(h, LoggingHandler) for h in logger.handlers)
80
+ if not has_otel_handler:
81
+ # add OTEL handler for trace correlation
82
+ handler = LoggingHandler()
83
+ logger.addHandler(handler)
84
+ # Don't propagate to root logger to avoid duplicate logs
85
+ logger.propagate = False
86
+
87
+ return logger
@@ -0,0 +1,128 @@
1
+ import logging
2
+ import os
3
+ import threading
4
+ from contextlib import contextmanager
5
+ from logging import StreamHandler
6
+
7
+ from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter
8
+ from opentelemetry import context, trace
9
+ from opentelemetry.sdk.resources import SERVICE_NAME, Resource
10
+ from opentelemetry.sdk.trace import TracerProvider
11
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
12
+ from opentelemetry.sdk.trace.sampling import ALWAYS_ON
13
+ from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
14
+
15
+ logger = logging.getLogger(__name__)
16
+ logger.addHandler(StreamHandler())
17
+
18
+ tracer = trace.get_tracer(__name__)
19
+
20
+ _TRACING_LOCK = threading.Lock()
21
+ TRACING_IS_CONFIGURED = False
22
+
23
+
24
+ def configure_tracing():
25
+ global TRACING_IS_CONFIGURED
26
+ # Add thread locking to avoid race conditions during setup
27
+ with _TRACING_LOCK:
28
+ if TRACING_IS_CONFIGURED:
29
+ # tracing should only be set up once
30
+ # to avoid duplicated trace handling.
31
+ # Global variables is the pattern used
32
+ # by opentelemetry, so we use the same
33
+ return
34
+
35
+ # set up tracer provider based on the Azure Function resource
36
+ # (this is make sure App Insights can track the trace source correctly)
37
+ # (https://learn.microsoft.com/en-us/azure/azure-monitor/app/opentelemetry-enable?tabs=net#set-the-cloud-role-name-and-the-cloud-role-instance).
38
+ # We use the ALWAYS ON sampler since otherwise spans will not be
39
+ # recording upon creation
40
+ # (https://anecdotes.dev/opentelemetry-on-google-cloud-unraveling-the-mystery-f61f044c18be)
41
+ service_name = os.getenv("WEBSITE_SITE_NAME") or "unknown-service"
42
+ resource = Resource.create({SERVICE_NAME: service_name})
43
+ trace.set_tracer_provider(
44
+ TracerProvider(
45
+ sampler=ALWAYS_ON,
46
+ resource=resource,
47
+ )
48
+ )
49
+
50
+ # setup azure monitor trace exporter to send telemetry to App Insights
51
+ try:
52
+ trace_exporter = AzureMonitorTraceExporter()
53
+ except ValueError:
54
+ logger.warning(
55
+ "Cant set up tracing to App Insights,"
56
+ " as no connection string is set."
57
+ )
58
+ else:
59
+ span_processor = BatchSpanProcessor(trace_exporter)
60
+ trace.get_tracer_provider().add_span_processor(span_processor)
61
+
62
+ TRACING_IS_CONFIGURED = True
63
+
64
+
65
+ @contextmanager
66
+ def set_trace_context(trace_parent: str, trace_state: str = ""):
67
+ """Context manager for setting the trace context
68
+
69
+ Args:
70
+ trace_parent (str): Trace parent ID
71
+ trace_state (str, optional): Trace state. Defaults to "".
72
+ """
73
+ carrier = {"traceparent": trace_parent, "tracestate": trace_state}
74
+ ctx = TraceContextTextMapPropagator().extract(carrier=carrier)
75
+
76
+ token = context.attach(ctx) # attach context before run
77
+ try:
78
+ yield
79
+ finally:
80
+ context.detach(token) # detach context after run
81
+
82
+
83
+ def get_tracer(name: str):
84
+ tracer = trace.get_tracer(name)
85
+ return tracer
86
+
87
+
88
+ def get_current_diagnostic_id() -> str:
89
+ """Gets diagnostic id from current span
90
+
91
+ The diagnostic id is a concatenation of operation-id and parent-id
92
+
93
+ Returns:
94
+ str: diagnostic id
95
+ """
96
+ span = trace.get_current_span()
97
+
98
+ if not span.is_recording():
99
+ return ""
100
+
101
+ operation_id = "{:016x}".format(span.context.trace_id)
102
+ parent_id = "{:016x}".format(span.context.span_id)
103
+
104
+ diagnostic_id = f"00-{operation_id}-{parent_id}-01"
105
+
106
+ return diagnostic_id
107
+
108
+
109
+ # Service Bus trace constants (these were removed from azure-servicebus SDK)
110
+ _SB_TRACE_NAMESPACE = "Microsoft.ServiceBus"
111
+
112
+
113
+ @contextmanager
114
+ def servicebus_send_span(subject: str) -> trace.Span:
115
+ """Start span for Service Bus message tracing.
116
+
117
+ Args:
118
+ subject: The message subject (used as span name for easy identification)
119
+
120
+ Yields:
121
+ trace.Span: the span
122
+ """
123
+ with tracer.start_as_current_span(
124
+ subject, kind=trace.SpanKind.PRODUCER
125
+ ) as msg_span:
126
+ msg_span.set_attributes({"az.namespace": _SB_TRACE_NAMESPACE})
127
+
128
+ yield msg_span
@@ -56,9 +56,9 @@ class WarpzoneEventClient:
56
56
  def from_resource_name(
57
57
  cls,
58
58
  service_bus_namespace: str,
59
- credential: AzureNamedKeyCredential
60
- | AzureSasCredential
61
- | TokenCredential = DefaultAzureCredential(),
59
+ credential: (
60
+ AzureNamedKeyCredential | AzureSasCredential | TokenCredential
61
+ ) = DefaultAzureCredential(),
62
62
  ):
63
63
  service_bus_client = ServiceBusClient(
64
64
  fully_qualified_namespace=f"{service_bus_namespace}.servicebus.windows.net",
@@ -125,9 +125,7 @@ class WarpzoneEventClient:
125
125
  ):
126
126
  typeguard.check_type(value=topic, expected_type=Topic)
127
127
  topic_name = topic.value
128
- with traces.servicebus_send_span(
129
- self._service_bus_client.fully_qualified_namespace, topic_name
130
- ):
128
+ with traces.servicebus_send_span(event_msg.subject):
131
129
  diagnostic_id = traces.get_current_diagnostic_id()
132
130
 
133
131
  az_sdk_msg = ServiceBusMessage(
@@ -1,2 +0,0 @@
1
- from .logs import get_logger
2
- from .traces import get_current_diagnostic_id, get_tracer
@@ -1,63 +0,0 @@
1
- # NOTE: OpenTelemetry logging to Azure is still in EXPERIMENTAL mode!
2
- import logging
3
- import os
4
- from logging import StreamHandler
5
-
6
- from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter
7
- from opentelemetry import _logs as logs
8
- from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
9
- from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
10
- from opentelemetry.sdk.resources import SERVICE_NAME, Resource
11
-
12
- logger = logging.getLogger(__name__)
13
- logger.addHandler(StreamHandler())
14
-
15
- LOGGING_IS_CONFIGURED = False
16
-
17
-
18
- def configure_logging():
19
- global LOGGING_IS_CONFIGURED
20
- if LOGGING_IS_CONFIGURED:
21
- # logging should only be set up once
22
- # to avoid duplicated log handling.
23
- # Global variables is the pattern used
24
- # by opentelemetry, so we use the same
25
- return
26
-
27
- # set up logger provider based on the Azure Function resource
28
- # (this is make sure App Insights can track the log source correctly)
29
- # (https://learn.microsoft.com/en-us/azure/azure-monitor/app/opentelemetry-enable?tabs=net#set-the-cloud-role-name-and-the-cloud-role-instance)
30
- resource = Resource.create({SERVICE_NAME: os.getenv("WEBSITE_SITE_NAME")})
31
- logs.set_logger_provider(
32
- LoggerProvider(
33
- resource=resource,
34
- )
35
- )
36
-
37
- # setup azure monitor log exporter to send telemetry to App Insights
38
- try:
39
- log_exporter = AzureMonitorLogExporter()
40
- except ValueError:
41
- # if no App Insights instrumentation key is set (e.g. when running unit tests),
42
- # the exporter creation will fail. In this case we skip it
43
- logger.warning(
44
- "Cant set up logging to App Insights, as no instrumentation key is set."
45
- )
46
- else:
47
- log_record_processor = BatchLogRecordProcessor(log_exporter)
48
- logs.get_logger_provider().add_log_record_processor(log_record_processor)
49
-
50
- LOGGING_IS_CONFIGURED = True
51
-
52
-
53
- def get_logger(name: str):
54
- # set up standard logger
55
- logger = logging.getLogger(name)
56
- logger.setLevel(logging.INFO)
57
-
58
- if not logger.hasHandlers():
59
- # add OTEL handler
60
- handler = LoggingHandler()
61
- logger.addHandler(handler)
62
-
63
- return logger
@@ -1,136 +0,0 @@
1
- import logging
2
- import os
3
- from contextlib import contextmanager
4
- from logging import StreamHandler
5
-
6
- from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter
7
- from azure.servicebus._common import constants
8
- from opentelemetry import context, trace
9
- from opentelemetry.sdk.resources import SERVICE_NAME, Resource
10
- from opentelemetry.sdk.trace import TracerProvider
11
- from opentelemetry.sdk.trace.export import BatchSpanProcessor
12
- from opentelemetry.sdk.trace.sampling import ALWAYS_ON
13
- from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
14
-
15
- logger = logging.getLogger(__name__)
16
- logger.addHandler(StreamHandler())
17
-
18
- tracer = trace.get_tracer(__name__)
19
-
20
- TRACING_IS_CONFIGURED = False
21
-
22
-
23
- def configure_tracing():
24
- global TRACING_IS_CONFIGURED
25
- if TRACING_IS_CONFIGURED:
26
- # tracing should only be set up once
27
- # to avoid duplicated trace handling.
28
- # Global variables is the pattern used
29
- # by opentelemetry, so we use the same
30
- return
31
-
32
- # set up tracer provider based on the Azure Function resource
33
- # (this is make sure App Insights can track the trace source correctly)
34
- # (https://learn.microsoft.com/en-us/azure/azure-monitor/app/opentelemetry-enable?tabs=net#set-the-cloud-role-name-and-the-cloud-role-instance).
35
- # We use the ALWAYS ON sampler since otherwise spans will not be
36
- # recording upon creation
37
- # (https://anecdotes.dev/opentelemetry-on-google-cloud-unraveling-the-mystery-f61f044c18be)
38
- resource = Resource.create({SERVICE_NAME: os.getenv("WEBSITE_SITE_NAME")})
39
- trace.set_tracer_provider(
40
- TracerProvider(
41
- sampler=ALWAYS_ON,
42
- resource=resource,
43
- )
44
- )
45
-
46
- # setup azure monitor trace exporter to send telemetry to App Insights
47
- try:
48
- trace_exporter = AzureMonitorTraceExporter()
49
- except ValueError:
50
- # if no App Insights instrumentation key is set (e.g. when running unit tests),
51
- # the exporter creation will fail. In this case we skip it
52
- logger.warning(
53
- "Cant set up tracing to App Insights, as no instrumentation key is set."
54
- )
55
- else:
56
- span_processor = BatchSpanProcessor(trace_exporter)
57
- trace.get_tracer_provider().add_span_processor(span_processor)
58
-
59
- TRACING_IS_CONFIGURED = True
60
-
61
-
62
- @contextmanager
63
- def set_trace_context(trace_parent: str, trace_state: str = ""):
64
- """Context manager for setting the trace context
65
-
66
- Args:
67
- trace_parent (str): Trace parent ID
68
- trace_state (str, optional): Trace state. Defaults to "".
69
- """
70
- carrier = {"traceparent": trace_parent, "tracestate": trace_state}
71
- ctx = TraceContextTextMapPropagator().extract(carrier=carrier)
72
-
73
- token = context.attach(ctx) # attach context before run
74
- try:
75
- yield
76
- finally:
77
- context.detach(token) # detach context after run
78
-
79
-
80
- def get_tracer(name: str):
81
- tracer = trace.get_tracer(name)
82
- return tracer
83
-
84
-
85
- def get_current_diagnostic_id() -> str:
86
- """Gets diagnostic id from current span
87
-
88
- The diagnostic id is a concatenation of operation-id and parent-id
89
-
90
- Returns:
91
- str: diagnostic id
92
- """
93
- span = trace.get_current_span()
94
-
95
- if not span.is_recording():
96
- return ""
97
-
98
- operation_id = "{:016x}".format(span.context.trace_id)
99
- parent_id = "{:016x}".format(span.context.span_id)
100
-
101
- diagnostic_id = f"00-{operation_id}-{parent_id}-01"
102
-
103
- return diagnostic_id
104
-
105
-
106
- @contextmanager
107
- def servicebus_send_span(servicebus_url: str, topic_name: str) -> trace.Span:
108
- """Start spans necessary for correct tracing
109
-
110
- Args:
111
- servicebus_url (str): The full servicebus url
112
- topic_name (str): Name of topic
113
-
114
- Yields:
115
- trace.Span: the inner span
116
- """
117
- with tracer.start_as_current_span(
118
- constants.SPAN_NAME_SEND, kind=trace.SpanKind.CLIENT
119
- ) as send_span:
120
- send_span.set_attributes(
121
- {
122
- constants.TRACE_COMPONENT_PROPERTY: constants.TRACE_NAMESPACE,
123
- constants.TRACE_NAMESPACE_PROPERTY: constants.TRACE_NAMESPACE_PROPERTY,
124
- constants.TRACE_BUS_DESTINATION_PROPERTY: topic_name,
125
- constants.TRACE_PEER_ADDRESS_PROPERTY: servicebus_url,
126
- }
127
- )
128
-
129
- with tracer.start_as_current_span(
130
- constants.SPAN_NAME_MESSAGE, kind=trace.SpanKind.PRODUCER
131
- ) as msg_span:
132
- msg_span.set_attributes(
133
- {constants.TRACE_NAMESPACE_PROPERTY: constants.TRACE_NAMESPACE}
134
- )
135
-
136
- yield msg_span
File without changes