atlan-application-sdk 2.3.1__py3-none-any.whl → 2.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,109 @@
1
+ """Data models for observability metrics.
2
+
3
+ This module contains Pydantic models and enums used across the observability system.
4
+ Separated from metrics_adaptor.py to avoid circular dependencies.
5
+ """
6
+
7
+ from enum import Enum
8
+ from time import time as get_current_time
9
+ from typing import Dict, Optional
10
+
11
+ from pydantic import BaseModel
12
+
13
+
14
+ class MetricType(str, Enum):
15
+ """Enum for metric types."""
16
+
17
+ COUNTER = "counter"
18
+ GAUGE = "gauge"
19
+ HISTOGRAM = "histogram"
20
+
21
+
22
+ class MetricRecord(BaseModel):
23
+ """A Pydantic model representing a metric record in the system.
24
+
25
+ This model defines the structure for metric data with fields for timestamp,
26
+ name, value, type, labels, and optional description and unit.
27
+
28
+ Attributes:
29
+ timestamp (float): Unix timestamp when the metric was recorded
30
+ name (str): Name of the metric
31
+ value (float): Numeric value of the metric
32
+ type (str): Type of metric (counter, gauge, or histogram)
33
+ labels (Dict[str, str]): Key-value pairs for metric dimensions
34
+ description (Optional[str]): Optional description of the metric
35
+ unit (Optional[str]): Optional unit of measurement
36
+ """
37
+
38
+ timestamp: float
39
+ name: str
40
+ value: float
41
+ type: MetricType # counter, gauge, histogram
42
+ labels: Dict[str, str]
43
+ description: Optional[str] = None
44
+ unit: Optional[str] = None
45
+
46
+ class Config:
47
+ """Configuration for the MetricRecord Pydantic model.
48
+
49
+ Provides custom parsing logic to ensure consistent data types and structure
50
+ for metric records, including validation and type conversion for all fields.
51
+ """
52
+
53
+ @classmethod
54
+ def parse_obj(cls, obj):
55
+ if isinstance(obj, dict):
56
+ # Ensure labels is a dictionary with consistent structure
57
+ if "labels" in obj:
58
+ # Create a new labels dict with only the expected fields
59
+ new_labels = {}
60
+ expected_fields = [
61
+ "database",
62
+ "status",
63
+ "type",
64
+ "mode",
65
+ "workflow_id",
66
+ "workflow_type",
67
+ ]
68
+
69
+ # Copy only the expected fields if they exist
70
+ for field in expected_fields:
71
+ if field in obj["labels"]:
72
+ new_labels[field] = str(obj["labels"][field])
73
+
74
+ obj["labels"] = new_labels
75
+
76
+ # Ensure value is float
77
+ if "value" in obj:
78
+ try:
79
+ obj["value"] = float(obj["value"])
80
+ except (ValueError, TypeError):
81
+ obj["value"] = 0.0
82
+
83
+ # Ensure timestamp is float
84
+ if "timestamp" in obj:
85
+ try:
86
+ obj["timestamp"] = float(obj["timestamp"])
87
+ except (ValueError, TypeError):
88
+ obj["timestamp"] = get_current_time()
89
+
90
+ # Ensure type is MetricType
91
+ if "type" in obj:
92
+ try:
93
+ obj["type"] = MetricType(obj["type"])
94
+ except ValueError:
95
+ obj["type"] = MetricType.COUNTER
96
+
97
+ # Ensure name is string
98
+ if "name" in obj:
99
+ obj["name"] = str(obj["name"])
100
+
101
+ # Ensure description is string or None
102
+ if "description" in obj and obj["description"] is not None:
103
+ obj["description"] = str(obj["description"])
104
+
105
+ # Ensure unit is string or None
106
+ if "unit" in obj and obj["unit"] is not None:
107
+ obj["unit"] = str(obj["unit"])
108
+
109
+ return super().parse_obj(obj)
@@ -0,0 +1,420 @@
1
+ import asyncio
2
+ import base64
3
+ import logging
4
+ import threading
5
+ from datetime import datetime
6
+ from typing import TYPE_CHECKING, Any, Dict, Optional
7
+
8
+ import httpx
9
+ from pydantic import BaseModel, ConfigDict, Field
10
+
11
+ from application_sdk.constants import (
12
+ SEGMENT_API_URL,
13
+ SEGMENT_BATCH_SIZE,
14
+ SEGMENT_BATCH_TIMEOUT_SECONDS,
15
+ SEGMENT_DEFAULT_USER_ID,
16
+ SEGMENT_WRITE_KEY,
17
+ )
18
+ from application_sdk.observability.models import MetricRecord
19
+
20
+ if TYPE_CHECKING:
21
+ pass # Reserved for future type-checking-only imports
22
+
23
+
24
+ class SegmentTrackEvent(BaseModel):
25
+ """Pydantic model for a single Segment track event.
26
+
27
+ Attributes:
28
+ userId: The user ID for the event
29
+ event: The event name
30
+ properties: Event properties/metadata
31
+ timestamp: ISO 8601 timestamp of the event
32
+ type: Event type (always "track" for track events)
33
+ """
34
+
35
+ userId: str = Field(..., description="User ID for the event")
36
+ event: str = Field(..., description="Event name")
37
+ properties: Dict[str, Any] = Field(
38
+ default_factory=dict, description="Event properties"
39
+ )
40
+ timestamp: str = Field(..., description="ISO 8601 timestamp")
41
+ type: str = Field(default="track", description="Event type")
42
+
43
+ model_config = ConfigDict(
44
+ json_schema_extra={
45
+ "example": {
46
+ "userId": "atlan.automation",
47
+ "event": "metric_recorded",
48
+ "properties": {"value": 100, "metric_type": "counter"},
49
+ "timestamp": "2024-01-01T00:00:00",
50
+ "type": "track",
51
+ }
52
+ }
53
+ )
54
+
55
+
56
+ class SegmentBatchPayload(BaseModel):
57
+ """Pydantic model for Segment batch API payload.
58
+
59
+ Attributes:
60
+ batch: List of track events to send in batch
61
+ """
62
+
63
+ batch: list[SegmentTrackEvent] = Field(
64
+ ..., description="List of events to send in batch"
65
+ )
66
+
67
+ model_config = ConfigDict(
68
+ json_schema_extra={
69
+ "example": {
70
+ "batch": [
71
+ {
72
+ "userId": "atlan.automation",
73
+ "event": "metric_recorded",
74
+ "properties": {"value": 100},
75
+ "timestamp": "2024-01-01T00:00:00",
76
+ "type": "track",
77
+ }
78
+ ]
79
+ }
80
+ }
81
+ )
82
+
83
+
84
+ class SegmentClient:
85
+ """Async Segment client with queue-based event processing.
86
+
87
+ This client uses an asyncio queue to batch and send metrics to Segment API
88
+ asynchronously, avoiding blocking operations and thread creation overhead.
89
+
90
+ Attributes:
91
+ enabled (bool): Whether Segment client is enabled (has valid write key)
92
+ _queue (asyncio.Queue): Queue for pending metric events
93
+ _client (httpx.AsyncClient): Async HTTP client for API calls
94
+ _worker_task (Optional[asyncio.Task]): Background task processing the queue
95
+ """
96
+
97
+ def __init__(
98
+ self,
99
+ enabled: bool,
100
+ write_key: str = "",
101
+ api_url: str = "",
102
+ default_user_id: str = "",
103
+ batch_size: int = 0,
104
+ batch_timeout_seconds: float = 0.0,
105
+ ):
106
+ """Initialize Segment client.
107
+
108
+ Args:
109
+ enabled: Whether Segment metrics are enabled
110
+ write_key: Segment write key for authentication (defaults to SEGMENT_WRITE_KEY)
111
+ api_url: Segment API URL (defaults to SEGMENT_API_URL)
112
+ default_user_id: Default user ID for events (defaults to SEGMENT_DEFAULT_USER_ID)
113
+ batch_size: Maximum number of events per batch (defaults to SEGMENT_BATCH_SIZE)
114
+ batch_timeout_seconds: Max seconds to wait before sending batch (defaults to SEGMENT_BATCH_TIMEOUT_SECONDS)
115
+ """
116
+ self.enabled = enabled
117
+ self._write_key = write_key or SEGMENT_WRITE_KEY
118
+ self._api_url = api_url or SEGMENT_API_URL
119
+ self._default_user_id = default_user_id or SEGMENT_DEFAULT_USER_ID
120
+ self._batch_size = batch_size or SEGMENT_BATCH_SIZE
121
+ self._batch_timeout_seconds = (
122
+ batch_timeout_seconds or SEGMENT_BATCH_TIMEOUT_SECONDS
123
+ )
124
+ self._queue: Optional[asyncio.Queue] = None
125
+ self._client: Optional[httpx.AsyncClient] = None
126
+ self._worker_task: Optional[asyncio.Task] = None
127
+ self._loop: Optional[asyncio.AbstractEventLoop] = None
128
+ self._worker_thread: Optional[threading.Thread] = None
129
+ self._initialized_event = threading.Event()
130
+
131
+ if not self.enabled or not self._write_key:
132
+ logging.warning(
133
+ "Segment write key not configured - Segment metrics will be disabled"
134
+ )
135
+ self.enabled = False
136
+ return
137
+
138
+ # Start background thread with event loop for async operations
139
+ self._start_worker_thread()
140
+
141
+ logging.info(
142
+ f"Segment metrics client initialized (batch_size={self._batch_size}, "
143
+ f"batch_timeout={self._batch_timeout_seconds}s)"
144
+ )
145
+
146
+ def _start_worker_thread(self) -> None:
147
+ """Start a background thread with event loop for async operations."""
148
+ if self._worker_thread and self._worker_thread.is_alive():
149
+ return
150
+
151
+ # Clear initialization event before starting new thread
152
+ # This ensures we wait for the new thread to actually initialize
153
+ self._initialized_event.clear()
154
+
155
+ # Reset references to prevent using stale objects from dead thread
156
+ self._loop = None
157
+ self._queue = None
158
+ self._client = None
159
+ self._worker_task = None
160
+
161
+ def run_worker():
162
+ """Run async worker in dedicated thread."""
163
+ loop = asyncio.new_event_loop()
164
+ asyncio.set_event_loop(loop)
165
+ self._loop = loop
166
+
167
+ # Initialize queue and client in the event loop
168
+ self._queue = asyncio.Queue()
169
+ self._client = httpx.AsyncClient(timeout=30.0)
170
+
171
+ # Signal that initialization is complete
172
+ self._initialized_event.set()
173
+
174
+ # Create and run worker task
175
+ self._worker_task = loop.create_task(self._process_queue())
176
+
177
+ # Run event loop until task completes
178
+ try:
179
+ loop.run_until_complete(self._worker_task)
180
+ except Exception:
181
+ pass
182
+
183
+ self._worker_thread = threading.Thread(target=run_worker, daemon=True)
184
+ self._worker_thread.start()
185
+
186
+ # Wait for thread to complete initialization (with timeout)
187
+ if not self._initialized_event.wait(timeout=5.0):
188
+ logging.error(
189
+ "Segment client worker thread failed to initialize within timeout"
190
+ )
191
+
192
+ def send_metric(self, metric_record: "MetricRecord") -> None:
193
+ """Send a metric to Segment API via queue (synchronous interface).
194
+
195
+ Args:
196
+ metric_record (MetricRecord): Metric record to send
197
+
198
+ This method is non-blocking - it adds the metric to the queue and returns.
199
+ Only metrics that pass the allow list check will be sent.
200
+ """
201
+ if not self.enabled:
202
+ return
203
+
204
+ # Check if metric should be sent (allow list filtering)
205
+ if not self._should_send_metric(metric_record):
206
+ return
207
+
208
+ # Ensure worker thread is running
209
+ if not self._worker_thread or not self._worker_thread.is_alive():
210
+ self._start_worker_thread()
211
+
212
+ # Wait for initialization to complete before sending metrics
213
+ if not self._initialized_event.wait(timeout=1.0):
214
+ return
215
+
216
+ if not self._loop or not self._queue:
217
+ return
218
+
219
+ try:
220
+ # Schedule coroutine in the worker thread's event loop
221
+ asyncio.run_coroutine_threadsafe(self._queue.put(metric_record), self._loop)
222
+ except Exception as e:
223
+ logging.warning(f"Failed to queue metric for Segment: {e}")
224
+
225
+ async def _process_queue(self) -> None:
226
+ """Background worker that processes metrics from the queue.
227
+
228
+ Continuously processes metrics from the queue and sends them to Segment API in batches.
229
+ """
230
+ if not self._queue or not self._client:
231
+ return
232
+
233
+ batch: list["MetricRecord"] = []
234
+ last_send_time = asyncio.get_event_loop().time()
235
+
236
+ while True:
237
+ try:
238
+ # Calculate remaining time until batch timeout
239
+ current_time = asyncio.get_event_loop().time()
240
+ time_since_last_send = current_time - last_send_time
241
+ timeout = max(0.1, self._batch_timeout_seconds - time_since_last_send)
242
+
243
+ # Get metric from queue (with timeout)
244
+ try:
245
+ metric_record = await asyncio.wait_for(
246
+ self._queue.get(), timeout=timeout
247
+ )
248
+ batch.append(metric_record)
249
+ self._queue.task_done()
250
+ except asyncio.TimeoutError:
251
+ # Timeout - send batch if we have any events
252
+ pass
253
+
254
+ current_time = asyncio.get_event_loop().time()
255
+
256
+ # Send batch if we've reached batch size or timeout
257
+ should_send = len(batch) >= self._batch_size or (
258
+ len(batch) > 0
259
+ and (current_time - last_send_time) >= self._batch_timeout_seconds
260
+ )
261
+
262
+ if should_send and batch:
263
+ await self._send_batch_to_segment(batch)
264
+ batch = []
265
+ last_send_time = asyncio.get_event_loop().time()
266
+
267
+ except asyncio.CancelledError:
268
+ # Worker task cancelled - send remaining batch before exit
269
+ if batch:
270
+ try:
271
+ await self._send_batch_to_segment(batch)
272
+ except Exception as e:
273
+ logging.warning(f"Error sending final batch: {e}")
274
+ break
275
+ except Exception as e:
276
+ logging.warning(f"Error processing Segment metric queue: {e}")
277
+
278
+ def _build_track_event(self, metric_record: "MetricRecord") -> SegmentTrackEvent:
279
+ """Build a Segment track event from a metric record.
280
+
281
+ Args:
282
+ metric_record: Metric record to convert
283
+
284
+ Returns:
285
+ SegmentTrackEvent model
286
+ """
287
+ # Build event properties
288
+ event_properties = {
289
+ "value": metric_record.value,
290
+ "metric_type": metric_record.type.value,
291
+ **metric_record.labels,
292
+ }
293
+
294
+ # Add optional fields if present
295
+ if metric_record.description:
296
+ event_properties["description"] = metric_record.description
297
+ if metric_record.unit:
298
+ event_properties["unit"] = metric_record.unit
299
+
300
+ # Create and return Pydantic model
301
+ return SegmentTrackEvent(
302
+ userId=self._default_user_id,
303
+ event=metric_record.name,
304
+ properties=event_properties,
305
+ timestamp=datetime.fromtimestamp(metric_record.timestamp).isoformat(),
306
+ )
307
+
308
+ async def _send_batch_to_segment(
309
+ self, metric_records: list["MetricRecord"]
310
+ ) -> None:
311
+ """Send a batch of metrics to Segment API.
312
+
313
+ Args:
314
+ metric_records: List of metric records to send
315
+ """
316
+ if not self._client or not metric_records:
317
+ return
318
+
319
+ try:
320
+ # Build batch of track events
321
+ events = [self._build_track_event(record) for record in metric_records]
322
+
323
+ # Create batch payload
324
+ batch_payload = SegmentBatchPayload(batch=events)
325
+
326
+ # Create Basic Auth header
327
+ segment_write_key_encoded = base64.b64encode(
328
+ (self._write_key + ":").encode("ascii")
329
+ ).decode()
330
+
331
+ headers = {
332
+ "content-type": "application/json",
333
+ "Authorization": f"Basic {segment_write_key_encoded}",
334
+ }
335
+
336
+ # Send HTTP request with validated batch payload
337
+ response = await self._client.post(
338
+ self._api_url,
339
+ json=batch_payload.model_dump(),
340
+ headers=headers,
341
+ )
342
+ response.raise_for_status()
343
+
344
+ logging.debug(
345
+ f"Successfully sent batch of {len(metric_records)} metrics to Segment"
346
+ )
347
+ except httpx.HTTPError as e:
348
+ logging.warning(f"HTTP error sending metric to Segment: {e}")
349
+ except Exception as e:
350
+ logging.warning(f"Unexpected error sending metric to Segment: {e}")
351
+
352
+ def close(self) -> None:
353
+ """Close the Segment client and cleanup resources.
354
+
355
+ Stops the worker task and closes the HTTP client.
356
+ This method ensures proper cleanup of all resources including:
357
+ - Worker thread and event loop
358
+ - httpx.AsyncClient connection pools
359
+ """
360
+ if not self.enabled:
361
+ return
362
+
363
+ # Cancel worker task if running
364
+ if self._loop and self._worker_task and not self._worker_task.done():
365
+ try:
366
+ self._loop.call_soon_threadsafe(self._worker_task.cancel)
367
+ except Exception as e:
368
+ logging.warning(f"Error cancelling worker task: {e}")
369
+
370
+ # Close httpx client
371
+ if self._loop and self._client:
372
+ try:
373
+ # Schedule client close in the event loop
374
+ async def close_client():
375
+ if self._client:
376
+ await self._client.aclose()
377
+
378
+ if self._loop.is_running():
379
+ # If loop is running, schedule the close
380
+ future = asyncio.run_coroutine_threadsafe(
381
+ close_client(), self._loop
382
+ )
383
+ # Wait for close to complete (with timeout)
384
+ try:
385
+ future.result(timeout=2.0)
386
+ except Exception as e:
387
+ logging.warning(f"Timeout or error closing httpx client: {e}")
388
+ else:
389
+ # If loop is not running, run it directly
390
+ self._loop.run_until_complete(close_client())
391
+ except Exception as e:
392
+ logging.warning(f"Error closing httpx client: {e}")
393
+
394
+ # Wait for worker thread to finish (with timeout)
395
+ if self._worker_thread and self._worker_thread.is_alive():
396
+ try:
397
+ self._worker_thread.join(timeout=2.0)
398
+ if self._worker_thread.is_alive():
399
+ logging.warning(
400
+ "SegmentClient worker thread did not terminate within timeout"
401
+ )
402
+ except Exception as e:
403
+ logging.warning(f"Error joining worker thread: {e}")
404
+
405
+ def _should_send_metric(self, metric_record: "MetricRecord") -> bool:
406
+ """Determine if a metric should be sent to Segment.
407
+
408
+ Only metrics with the label `send_to_segment: true` will be sent to Segment.
409
+ This provides explicit control over which metrics are forwarded to Segment
410
+ for business analytics.
411
+
412
+ Args:
413
+ metric_record (MetricRecord): The metric record to evaluate
414
+
415
+ Returns:
416
+ bool: True if metric should be sent (has send_to_segment=true label), False otherwise
417
+ """
418
+ # Check for explicit send_to_segment label
419
+ send_to_segment = metric_record.labels.get("send_to_segment", "").lower()
420
+ return send_to_segment == "true"
@@ -5,23 +5,44 @@ to a pub/sub system with automatic fallback to HTTP binding.
5
5
  """
6
6
 
7
7
  import json
8
+ import time
8
9
  from datetime import datetime
9
10
 
10
11
  from dapr import clients
11
12
  from temporalio import activity, workflow
12
13
 
13
14
  from application_sdk.constants import (
15
+ APP_TENANT_ID,
14
16
  APPLICATION_NAME,
15
17
  DAPR_BINDING_OPERATION_CREATE,
18
+ DOMAIN_NAME,
16
19
  EVENT_STORE_NAME,
17
20
  )
18
- from application_sdk.interceptors.models import Event, EventMetadata, WorkflowStates
21
+ from application_sdk.interceptors.models import (
22
+ ApplicationEventNames,
23
+ Event,
24
+ EventMetadata,
25
+ WorkflowStates,
26
+ )
19
27
  from application_sdk.observability.logger_adaptor import get_logger
28
+ from application_sdk.observability.metrics_adaptor import (
29
+ MetricRecord,
30
+ MetricType,
31
+ get_metrics,
32
+ )
20
33
  from application_sdk.services._utils import is_component_registered
21
34
 
22
35
  logger = get_logger(__name__)
23
36
  activity.logger = logger
24
37
 
38
+ # Lifecycle event names that should be sent to Segment
39
+ LIFECYCLE_EVENTS = {
40
+ ApplicationEventNames.WORKFLOW_START.value,
41
+ ApplicationEventNames.WORKFLOW_END.value,
42
+ ApplicationEventNames.ACTIVITY_START.value,
43
+ ApplicationEventNames.ACTIVITY_END.value,
44
+ }
45
+
25
46
 
26
47
  class EventStore:
27
48
  """Unified event store service for publishing application events.
@@ -89,6 +110,97 @@ class EventStore:
89
110
 
90
111
  return event
91
112
 
113
+ @classmethod
114
+ def _send_lifecycle_event_to_segment(cls, event: Event) -> None:
115
+ """Send lifecycle event to Segment if enabled.
116
+
117
+ Args:
118
+ event (Event): The lifecycle event to send to Segment
119
+ """
120
+ # Only send lifecycle events to Segment
121
+ if event.event_name not in LIFECYCLE_EVENTS:
122
+ return
123
+
124
+ try:
125
+ metrics = get_metrics()
126
+
127
+ # Map event names to Segment event names
128
+ segment_event_name_map = {
129
+ ApplicationEventNames.WORKFLOW_START.value: "workflow_started",
130
+ ApplicationEventNames.WORKFLOW_END.value: "workflow_completed",
131
+ ApplicationEventNames.ACTIVITY_START.value: "activity_started",
132
+ ApplicationEventNames.ACTIVITY_END.value: "activity_ended",
133
+ }
134
+
135
+ segment_event_name = segment_event_name_map.get(
136
+ event.event_name, event.event_name
137
+ )
138
+
139
+ # Build labels from event metadata
140
+ labels = {
141
+ "send_to_segment": "true",
142
+ }
143
+
144
+ # Add workflow context if available
145
+ if event.metadata.workflow_id:
146
+ labels["workflow_id"] = event.metadata.workflow_id
147
+ if event.metadata.workflow_run_id:
148
+ labels["workflow_run_id"] = event.metadata.workflow_run_id
149
+ if event.metadata.workflow_type:
150
+ labels["workflow_type"] = event.metadata.workflow_type
151
+ if event.metadata.workflow_state:
152
+ labels["workflow_state"] = event.metadata.workflow_state
153
+
154
+ # Add activity context if available
155
+ if event.metadata.activity_id:
156
+ labels["activity_id"] = event.metadata.activity_id
157
+ if event.metadata.activity_type:
158
+ labels["activity_type"] = event.metadata.activity_type
159
+ if event.metadata.attempt is not None:
160
+ labels["attempt"] = str(event.metadata.attempt)
161
+
162
+ # Add application context
163
+ if event.metadata.application_name:
164
+ labels["application_name"] = event.metadata.application_name
165
+
166
+ labels["tenant_id"] = APP_TENANT_ID
167
+ labels["domain_name"] = DOMAIN_NAME
168
+
169
+ # Add any additional data from event.data
170
+ if event.data:
171
+ for key, value in event.data.items():
172
+ if isinstance(value, (str, int, float, bool)):
173
+ labels[str(key)] = str(value)
174
+
175
+ # Create metric record for Segment
176
+ # Convert timestamp from milliseconds to seconds if needed
177
+ # (timestamps > 1e10 are likely in milliseconds)
178
+ timestamp = (
179
+ event.metadata.created_timestamp / 1000.0
180
+ if event.metadata.created_timestamp
181
+ and event.metadata.created_timestamp > 1e10
182
+ else (
183
+ event.metadata.created_timestamp
184
+ if event.metadata.created_timestamp
185
+ else time.time()
186
+ )
187
+ )
188
+
189
+ metric_record = MetricRecord(
190
+ timestamp=timestamp,
191
+ name=segment_event_name,
192
+ value=1.0,
193
+ type=MetricType.COUNTER,
194
+ labels=labels,
195
+ description=f"Lifecycle event: {segment_event_name}",
196
+ )
197
+
198
+ # Send to Segment
199
+ metrics.segment_client.send_metric(metric_record)
200
+ except Exception as e:
201
+ # Don't fail event publishing if Segment sending fails
202
+ logger.debug(f"Failed to send lifecycle event to Segment: {e}")
203
+
92
204
  @classmethod
93
205
  async def publish_event(cls, event: Event):
94
206
  """Publish event with automatic metadata enrichment and authentication.
@@ -143,6 +255,9 @@ class EventStore:
143
255
  try:
144
256
  event = cls.enrich_event_metadata(event)
145
257
 
258
+ # Send lifecycle events to Segment (non-blocking)
259
+ cls._send_lifecycle_event_to_segment(event)
260
+
146
261
  payload = json.dumps(event.model_dump(mode="json"))
147
262
 
148
263
  # Prepare binding metadata with auth token for HTTP bindings