toggletest 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,244 @@
1
+ """
2
+ event_buffer.py - Batched event tracking with evaluation_reason support
3
+
4
+ Architecture overview:
5
+ The event buffer collects analytics events (flag evaluations, variant
6
+ assignments, conversions) and sends them to the server in batches.
7
+
8
+ Key changes from the old SDK:
9
+ - Events now include an ``evaluation_reason`` field that comes from the
10
+ WASM evaluation result. This lets the backend know *why* a flag was
11
+ enabled or which rule matched, enabling richer analytics.
12
+ - Auth uses x-api-key header instead of Bearer token.
13
+ - Optional x-environment header for multi-environment support.
14
+
15
+ Batching strategy:
16
+ - Events are flushed every N seconds (default: 10s).
17
+ - If the buffer reaches a max size (default: 100), it flushes immediately.
18
+ - On flush failure (network error or non-2xx), events are re-queued for
19
+ retry on the next flush cycle.
20
+ - On close(), a final flush is attempted to avoid data loss.
21
+
22
+ Thread-safety:
23
+ The event list is guarded by a ``threading.Lock``. The periodic flush
24
+ runs on a background daemon timer thread.
25
+ """
26
+
27
+ from __future__ import annotations
28
+
29
+ import logging
30
+ import threading
31
+ from dataclasses import asdict
32
+ from typing import Dict, List, Optional
33
+
34
+ import httpx
35
+
36
+ from .types import SdkEvent
37
+
38
+ logger = logging.getLogger("toggletest")
39
+
40
+ # Default interval between automatic flushes (seconds).
41
+ DEFAULT_FLUSH_INTERVAL = 10.0
42
+
43
+ # Default maximum number of events before an immediate flush is triggered.
44
+ DEFAULT_MAX_BATCH_SIZE = 100
45
+
46
+
47
+ class EventBuffer:
48
+ """Batched event buffer that periodically flushes to the server.
49
+
50
+ Events are accumulated in-memory and sent via POST /sdk/events.
51
+ The buffer handles retries by re-queuing failed batches.
52
+ """
53
+
54
+ MAX_BUFFER_SIZE = 10000
55
+
56
+ def __init__(
57
+ self,
58
+ *,
59
+ base_url: str,
60
+ api_key: str,
61
+ environment: Optional[str] = None,
62
+ flush_interval: float = DEFAULT_FLUSH_INTERVAL,
63
+ max_batch_size: int = DEFAULT_MAX_BATCH_SIZE,
64
+ ) -> None:
65
+ """Initialize the event buffer.
66
+
67
+ No network calls are made in the constructor. The flush timer is
68
+ started by ``start()``.
69
+
70
+ Args:
71
+ base_url: Base URL of the ToggleTest API.
72
+ api_key: SDK API key sent as x-api-key header.
73
+ environment: Optional environment identifier.
74
+ flush_interval: Seconds between automatic flushes.
75
+ max_batch_size: Max events before triggering an immediate flush.
76
+ """
77
+ self._base_url = base_url
78
+ self._api_key = api_key
79
+ self._environment = environment
80
+ self._flush_interval = flush_interval
81
+ self._max_batch_size = max_batch_size
82
+
83
+ # -- Mutable state (guarded by _lock) --
84
+ self._lock = threading.Lock()
85
+ self._events: List[SdkEvent] = []
86
+ self._drop_warned = False
87
+
88
+ # -- Shared HTTP client (set via set_http_client) --
89
+ self._http_client: Optional[httpx.Client] = None
90
+
91
+ # -- Timer state --
92
+ self._timer: Optional[threading.Timer] = None
93
+ self._running = False
94
+
95
+ def set_http_client(self, client: httpx.Client) -> None:
96
+ """Assign a shared httpx.Client for connection reuse."""
97
+ self._http_client = client
98
+
99
+ # ------------------------------------------------------------------
100
+ # Header helpers
101
+ # ------------------------------------------------------------------
102
+
103
+ def _build_headers(self) -> Dict[str, str]:
104
+ """Build the common headers used for event submission.
105
+
106
+ Uses x-api-key for auth (not Bearer token) to match the new SDK
107
+ protocol.
108
+ """
109
+ headers: Dict[str, str] = {
110
+ "Content-Type": "application/json",
111
+ "x-api-key": self._api_key,
112
+ }
113
+ if self._environment:
114
+ headers["x-environment"] = self._environment
115
+ return headers
116
+
117
+ # ------------------------------------------------------------------
118
+ # Lifecycle
119
+ # ------------------------------------------------------------------
120
+
121
+ def start(self) -> None:
122
+ """Start the periodic flush timer.
123
+
124
+ Safe to call multiple times (no-op if already running).
125
+ """
126
+ self._running = True
127
+ self._schedule_flush()
128
+
129
+ def stop(self) -> None:
130
+ """Stop the periodic flush timer and perform a final flush.
131
+
132
+ Called during ``client.close()`` to avoid losing buffered events.
133
+ The final flush is best-effort: if it fails, events are lost
134
+ (acceptable on shutdown).
135
+ """
136
+ self._running = False
137
+ if self._timer is not None:
138
+ self._timer.cancel()
139
+ self._timer = None
140
+
141
+ # Final flush attempt.
142
+ self.flush()
143
+
144
+ # ------------------------------------------------------------------
145
+ # Event ingestion
146
+ # ------------------------------------------------------------------
147
+
148
+ def push(self, event: SdkEvent) -> None:
149
+ """Add an event to the buffer.
150
+
151
+ If the buffer reaches ``max_batch_size``, an immediate flush is
152
+ triggered. If the buffer exceeds ``MAX_BUFFER_SIZE``, the oldest
153
+ events are dropped.
154
+
155
+ Args:
156
+ event: The analytics event to buffer.
157
+ """
158
+ with self._lock:
159
+ while len(self._events) >= self.MAX_BUFFER_SIZE:
160
+ self._events.pop(0) # drop oldest
161
+ if not self._drop_warned:
162
+ logger.warning('[ToggleTest] Event buffer full. Dropping oldest events.')
163
+ self._drop_warned = True
164
+
165
+ self._events.append(event)
166
+ if len(self._events) >= self._max_batch_size:
167
+ self._do_flush()
168
+
169
+ # ------------------------------------------------------------------
170
+ # Flushing
171
+ # ------------------------------------------------------------------
172
+
173
+ def flush(self) -> None:
174
+ """Flush all buffered events to the server.
175
+
176
+ Thread-safe. Acquires the lock, drains the buffer, and sends
177
+ events in a single POST request. On failure, events are re-queued
178
+ at the front of the buffer for retry on the next flush cycle.
179
+ """
180
+ with self._lock:
181
+ self._do_flush()
182
+
183
+ def _do_flush(self) -> None:
184
+ """Internal flush implementation. Caller must hold ``_lock``.
185
+
186
+ Atomically drains the current buffer, sends the batch to the
187
+ server, and re-queues on failure.
188
+ """
189
+ if not self._events:
190
+ return
191
+
192
+ # Drain the buffer.
193
+ batch = list(self._events)
194
+ self._events.clear()
195
+
196
+ # Build the payload, stripping None values for cleaner JSON.
197
+ payload = {
198
+ "events": [
199
+ {k: v for k, v in asdict(e).items() if v is not None}
200
+ for e in batch
201
+ ]
202
+ }
203
+
204
+ try:
205
+ client = self._http_client or httpx.Client(timeout=10.0)
206
+ resp = client.post(
207
+ f"{self._base_url}/sdk/events",
208
+ json=payload,
209
+ headers=self._build_headers(),
210
+ timeout=10.0,
211
+ )
212
+ if resp.status_code == 429:
213
+ # Evaluation limit reached — drop events, don't retry.
214
+ # The WASM engine still evaluates flags locally; only tracking is affected.
215
+ logger.warning("Evaluation limit reached. Events dropped.")
216
+ elif resp.status_code != 200:
217
+ # Other server error — re-queue for retry.
218
+ self._events = batch + self._events
219
+ except Exception:
220
+ # Network error (e.g. offline). Re-queue for retry.
221
+ logger.debug("Failed to flush events, re-queueing")
222
+ self._events = batch + self._events
223
+
224
+ # ------------------------------------------------------------------
225
+ # Timer management
226
+ # ------------------------------------------------------------------
227
+
228
+ def _schedule_flush(self) -> None:
229
+ """Schedule the next periodic flush.
230
+
231
+ Uses ``threading.Timer`` so the flush runs on a separate daemon
232
+ thread after the configured interval.
233
+ """
234
+ if not self._running:
235
+ return
236
+
237
+ self._timer = threading.Timer(self._flush_interval, self._timer_flush)
238
+ self._timer.daemon = True
239
+ self._timer.start()
240
+
241
+ def _timer_flush(self) -> None:
242
+ """Called by the timer thread. Flushes and reschedules."""
243
+ self.flush()
244
+ self._schedule_flush()
@@ -0,0 +1,257 @@
1
+ """
2
+ rules_store.py - Thread-safe rules cache with ETag-based conditional fetching
3
+
4
+ Architecture overview:
5
+ The ToggleTest server compiles all flag/test configuration into a single
6
+ JSON document (the "rules blob") served at GET /sdk/rules. This blob is
7
+ the sole input to the WASM evaluator along with the user context.
8
+
9
+ The rules store:
10
+ 1. Fetches the rules blob on startup.
11
+ 2. Caches the raw JSON string and its ETag (version hash).
12
+ 3. Re-fetches when notified by the SSE stream that a new version is
13
+ available, using If-None-Match to avoid downloading unchanged data.
14
+ 4. Notifies registered listeners whenever the rules actually change.
15
+
16
+ This design means the SDK always has a local copy of the rules for
17
+ zero-latency evaluation, and only re-downloads when something changed.
18
+
19
+ Thread-safety:
20
+ All mutable state is guarded by a ``threading.RLock``. An RLock (rather
21
+ than a plain Lock) is used because listener callbacks may call back into
22
+ the store (e.g. ``get_rules_json``) from the same thread.
23
+ """
24
+
25
+ from __future__ import annotations
26
+
27
+ import logging
28
+ import threading
29
+ from typing import Callable, Dict, List, Optional
30
+
31
+ import httpx
32
+
33
+ from .types import RulesUpdateListener
34
+
35
+ logger = logging.getLogger("toggletest")
36
+
37
+
38
+ class RulesStore:
39
+ """Cached rules JSON with ETag-based conditional fetching.
40
+
41
+ Public API:
42
+ - ``fetch_rules()`` -- fetch (or re-fetch) rules from server
43
+ - ``on_version_notification(v)`` -- called by SSE when rules change
44
+ - ``get_rules_json()`` -- get the cached rules JSON string
45
+ - ``get_version()`` -- get the cached ETag / version hash
46
+ - ``has_rules`` -- whether rules have been fetched once
47
+ - ``on_update(listener)`` -- register a listener, returns unsub fn
48
+ """
49
+
50
+ def __init__(
51
+ self,
52
+ *,
53
+ base_url: str,
54
+ api_key: str,
55
+ environment: Optional[str] = None,
56
+ ) -> None:
57
+ """Initialize the rules store.
58
+
59
+ No network calls are made in the constructor. All I/O happens in
60
+ ``fetch_rules()``.
61
+
62
+ Args:
63
+ base_url: Base URL of the ToggleTest API.
64
+ api_key: SDK API key sent as x-api-key header.
65
+ environment: Optional environment identifier sent as x-environment.
66
+ """
67
+ self._base_url = base_url
68
+ self._api_key = api_key
69
+ self._environment = environment
70
+
71
+ # -- Mutable state (guarded by _lock) --
72
+ self._lock = threading.RLock()
73
+ self._rules_json: Optional[str] = None
74
+ self._etag: Optional[str] = None
75
+ self._listeners: List[RulesUpdateListener] = []
76
+
77
+ # -- Shared HTTP client (set via set_http_client) --
78
+ self._http_client: Optional[httpx.Client] = None
79
+
80
+ def set_http_client(self, client: httpx.Client) -> None:
81
+ """Assign a shared httpx.Client for connection reuse."""
82
+ self._http_client = client
83
+
84
+ # ------------------------------------------------------------------
85
+ # Header helpers
86
+ # ------------------------------------------------------------------
87
+
88
+ def _build_headers(
89
+ self, extra: Optional[Dict[str, str]] = None
90
+ ) -> Dict[str, str]:
91
+ """Build the common headers used for all API requests.
92
+
93
+ Uses x-api-key for auth (not Bearer token) to match the new SDK
94
+ protocol.
95
+
96
+ Args:
97
+ extra: Additional headers to merge in (e.g. If-None-Match).
98
+
99
+ Returns:
100
+ A dict of HTTP headers.
101
+ """
102
+ headers: Dict[str, str] = {
103
+ "x-api-key": self._api_key,
104
+ }
105
+ if self._environment:
106
+ headers["x-environment"] = self._environment
107
+ if extra:
108
+ headers.update(extra)
109
+ return headers
110
+
111
+ # ------------------------------------------------------------------
112
+ # Fetching
113
+ # ------------------------------------------------------------------
114
+
115
+ def fetch_rules(self) -> bool:
116
+ """Fetch the latest rules from the server.
117
+
118
+ On first call, performs an unconditional GET.
119
+ On subsequent calls, sends If-None-Match with the cached ETag.
120
+ If the server responds with 304 Not Modified, the cached rules
121
+ are kept and this method returns False.
122
+
123
+ Returns:
124
+ True if rules were updated, False if they were already current.
125
+
126
+ Raises:
127
+ httpx.HTTPStatusError: On non-2xx/304 responses.
128
+ httpx.HTTPError: On network errors.
129
+ """
130
+ with self._lock:
131
+ current_etag = self._etag
132
+
133
+ # Build headers, adding If-None-Match for conditional fetch.
134
+ extra_headers: Optional[Dict[str, str]] = None
135
+ if current_etag:
136
+ extra_headers = {"If-None-Match": current_etag}
137
+
138
+ headers = self._build_headers(extra_headers)
139
+
140
+ client = self._http_client or httpx.Client(timeout=10.0)
141
+ resp = client.get(
142
+ f"{self._base_url}/sdk/rules",
143
+ headers=headers,
144
+ timeout=10.0,
145
+ )
146
+
147
+ # 304 Not Modified -- our cached copy is still current.
148
+ if resp.status_code == 304:
149
+ return False
150
+
151
+ # Raise on unexpected status codes (4xx, 5xx).
152
+ resp.raise_for_status()
153
+
154
+ # Store the new rules and ETag under the lock.
155
+ new_rules_json = resp.text
156
+ new_etag = resp.headers.get("ETag") or resp.headers.get("etag")
157
+
158
+ with self._lock:
159
+ # Only notify listeners if the rules content actually changed.
160
+ changed = new_rules_json != self._rules_json
161
+ self._rules_json = new_rules_json
162
+ self._etag = new_etag
163
+
164
+ if changed and new_etag:
165
+ self._notify_listeners(new_etag)
166
+
167
+ return changed
168
+
169
+ def on_version_notification(self, version: str) -> None:
170
+ """Called when the SSE stream receives a ``rules_updated`` event.
171
+
172
+ Compares the incoming version hash against the cached ETag.
173
+ If they differ, re-fetches the rules.
174
+
175
+ Args:
176
+ version: The version hash from the SSE event payload.
177
+ """
178
+ with self._lock:
179
+ current_etag = self._etag
180
+
181
+ # If the version matches what we have, skip the fetch entirely.
182
+ if current_etag == version:
183
+ return
184
+
185
+ # Re-fetch rules. Errors propagate to the caller (SSE handler).
186
+ self.fetch_rules()
187
+
188
+ # ------------------------------------------------------------------
189
+ # Accessors (thread-safe reads)
190
+ # ------------------------------------------------------------------
191
+
192
+ def get_rules_json(self) -> Optional[str]:
193
+ """Get the cached rules JSON string, or None if not yet fetched."""
194
+ with self._lock:
195
+ return self._rules_json
196
+
197
+ def get_version(self) -> Optional[str]:
198
+ """Get the current version hash (ETag) of the cached rules."""
199
+ with self._lock:
200
+ return self._etag
201
+
202
+ @property
203
+ def has_rules(self) -> bool:
204
+ """Whether rules have been fetched at least once."""
205
+ with self._lock:
206
+ return self._rules_json is not None
207
+
208
+ # ------------------------------------------------------------------
209
+ # Listener management
210
+ # ------------------------------------------------------------------
211
+
212
+ def on_update(self, listener: RulesUpdateListener) -> Callable[[], None]:
213
+ """Register a listener that is called whenever rules are updated.
214
+
215
+ The listener receives the new version hash (ETag string) as its
216
+ sole argument.
217
+
218
+ Args:
219
+ listener: Callback function accepting a version string.
220
+
221
+ Returns:
222
+ An unsubscribe function. Call it to remove the listener.
223
+ """
224
+ with self._lock:
225
+ self._listeners.append(listener)
226
+
227
+ def unsubscribe() -> None:
228
+ with self._lock:
229
+ try:
230
+ self._listeners.remove(listener)
231
+ except ValueError:
232
+ pass # Already removed, nothing to do.
233
+
234
+ return unsubscribe
235
+
236
+ def _notify_listeners(self, version: str) -> None:
237
+ """Notify all registered listeners of a rules update.
238
+
239
+ Swallows individual listener errors to prevent one bad listener
240
+ from breaking the notification chain.
241
+
242
+ Args:
243
+ version: The new rules version hash (ETag).
244
+ """
245
+ with self._lock:
246
+ # Copy the list so listeners can unsubscribe during notification
247
+ # without causing a ConcurrentModificationError.
248
+ listeners_snapshot = list(self._listeners)
249
+
250
+ for listener in listeners_snapshot:
251
+ try:
252
+ listener(version)
253
+ except Exception:
254
+ # Swallow the error. Log for debugging but don't propagate.
255
+ logger.debug(
256
+ "Rules update listener raised an exception", exc_info=True
257
+ )