openfeature-provider-flagd 0.1.5__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. openfeature/.gitignore +2 -0
  2. openfeature/contrib/provider/flagd/config.py +193 -23
  3. openfeature/contrib/provider/flagd/provider.py +62 -12
  4. openfeature/contrib/provider/flagd/resolvers/__init__.py +1 -47
  5. openfeature/contrib/provider/flagd/resolvers/grpc.py +226 -17
  6. openfeature/contrib/provider/flagd/resolvers/in_process.py +40 -31
  7. openfeature/contrib/provider/flagd/resolvers/process/connector/__init__.py +11 -0
  8. openfeature/contrib/provider/flagd/resolvers/process/connector/file_watcher.py +106 -0
  9. openfeature/contrib/provider/flagd/resolvers/process/connector/grpc_watcher.py +192 -0
  10. openfeature/contrib/provider/flagd/resolvers/process/custom_ops.py +58 -19
  11. openfeature/contrib/provider/flagd/resolvers/process/flags.py +50 -6
  12. openfeature/contrib/provider/flagd/resolvers/process/targeting.py +35 -0
  13. openfeature/contrib/provider/flagd/resolvers/protocol.py +47 -0
  14. openfeature/schemas/protobuf/flagd/evaluation/v1/evaluation_pb2.py +72 -0
  15. openfeature/schemas/protobuf/flagd/evaluation/v1/evaluation_pb2.pyi +450 -0
  16. openfeature/schemas/protobuf/flagd/evaluation/v1/evaluation_pb2_grpc.py +358 -0
  17. openfeature/schemas/protobuf/flagd/evaluation/v1/evaluation_pb2_grpc.pyi +155 -0
  18. openfeature/schemas/protobuf/flagd/sync/v1/sync_pb2.py +50 -0
  19. openfeature/schemas/protobuf/flagd/sync/v1/sync_pb2.pyi +148 -0
  20. openfeature/schemas/protobuf/flagd/sync/v1/sync_pb2_grpc.py +186 -0
  21. openfeature/schemas/protobuf/flagd/sync/v1/sync_pb2_grpc.pyi +86 -0
  22. openfeature/schemas/protobuf/schema/v1/schema_pb2.py +72 -0
  23. openfeature/schemas/protobuf/schema/v1/schema_pb2.pyi +451 -0
  24. openfeature/schemas/protobuf/schema/v1/schema_pb2_grpc.py +358 -0
  25. openfeature/schemas/protobuf/schema/v1/schema_pb2_grpc.pyi +156 -0
  26. openfeature/schemas/protobuf/sync/v1/sync_service_pb2.py +47 -0
  27. openfeature/schemas/protobuf/sync/v1/sync_service_pb2.pyi +174 -0
  28. openfeature/schemas/protobuf/sync/v1/sync_service_pb2_grpc.py +143 -0
  29. openfeature/schemas/protobuf/sync/v1/sync_service_pb2_grpc.pyi +70 -0
  30. {openfeature_provider_flagd-0.1.5.dist-info → openfeature_provider_flagd-0.2.0.dist-info}/METADATA +116 -15
  31. openfeature_provider_flagd-0.2.0.dist-info/RECORD +35 -0
  32. {openfeature_provider_flagd-0.1.5.dist-info → openfeature_provider_flagd-0.2.0.dist-info}/WHEEL +1 -1
  33. {openfeature_provider_flagd-0.1.5.dist-info → openfeature_provider_flagd-0.2.0.dist-info}/licenses/LICENSE +1 -1
  34. openfeature/contrib/provider/flagd/proto/flagd/evaluation/v1/evaluation_pb2.py +0 -62
  35. openfeature/contrib/provider/flagd/proto/flagd/evaluation/v1/evaluation_pb2_grpc.py +0 -267
  36. openfeature/contrib/provider/flagd/proto/flagd/sync/v1/sync_pb2.py +0 -40
  37. openfeature/contrib/provider/flagd/proto/flagd/sync/v1/sync_pb2_grpc.py +0 -135
  38. openfeature/contrib/provider/flagd/proto/schema/v1/schema_pb2.py +0 -62
  39. openfeature/contrib/provider/flagd/proto/schema/v1/schema_pb2_grpc.py +0 -267
  40. openfeature/contrib/provider/flagd/proto/sync/v1/sync_service_pb2.py +0 -37
  41. openfeature/contrib/provider/flagd/proto/sync/v1/sync_service_pb2_grpc.py +0 -102
  42. openfeature/contrib/provider/flagd/resolvers/process/file_watcher.py +0 -89
  43. openfeature_provider_flagd-0.1.5.dist-info/RECORD +0 -22
@@ -1,36 +1,227 @@
1
+ import logging
2
+ import threading
3
+ import time
1
4
  import typing
2
5
 
3
6
  import grpc
7
+ from cachebox import BaseCacheImpl, LRUCache
8
+ from google.protobuf.json_format import MessageToDict
4
9
  from google.protobuf.struct_pb2 import Struct
10
+ from grpc import ChannelConnectivity
5
11
 
6
12
  from openfeature.evaluation_context import EvaluationContext
13
+ from openfeature.event import ProviderEventDetails
7
14
  from openfeature.exception import (
15
+ ErrorCode,
8
16
  FlagNotFoundError,
9
17
  GeneralError,
10
18
  InvalidContextError,
11
19
  ParseError,
20
+ ProviderNotReadyError,
12
21
  TypeMismatchError,
13
22
  )
14
- from openfeature.flag_evaluation import FlagResolutionDetails
23
+ from openfeature.flag_evaluation import FlagResolutionDetails, Reason
24
+ from openfeature.schemas.protobuf.flagd.evaluation.v1 import (
25
+ evaluation_pb2,
26
+ evaluation_pb2_grpc,
27
+ )
15
28
 
16
- from ..config import Config
29
+ from ..config import CacheType, Config
17
30
  from ..flag_type import FlagType
18
- from ..proto.schema.v1 import schema_pb2, schema_pb2_grpc
31
+
32
+ if typing.TYPE_CHECKING:
33
+ from google.protobuf.message import Message
19
34
 
20
35
  T = typing.TypeVar("T")
21
36
 
37
+ logger = logging.getLogger("openfeature.contrib")
38
+
22
39
 
23
40
  class GrpcResolver:
24
- def __init__(self, config: Config):
41
+ def __init__(
42
+ self,
43
+ config: Config,
44
+ emit_provider_ready: typing.Callable[[ProviderEventDetails], None],
45
+ emit_provider_error: typing.Callable[[ProviderEventDetails], None],
46
+ emit_provider_stale: typing.Callable[[ProviderEventDetails], None],
47
+ emit_provider_configuration_changed: typing.Callable[
48
+ [ProviderEventDetails], None
49
+ ],
50
+ ):
51
+ self.active = False
25
52
  self.config = config
26
- channel_factory = (
27
- grpc.secure_channel if self.config.tls else grpc.insecure_channel
53
+ self.emit_provider_ready = emit_provider_ready
54
+ self.emit_provider_error = emit_provider_error
55
+ self.emit_provider_stale = emit_provider_stale
56
+ self.emit_provider_configuration_changed = emit_provider_configuration_changed
57
+ self.cache: typing.Optional[BaseCacheImpl] = (
58
+ LRUCache(maxsize=self.config.max_cache_size)
59
+ if self.config.cache == CacheType.LRU
60
+ else None
28
61
  )
29
- self.channel = channel_factory(f"{self.config.host}:{self.config.port}")
30
- self.stub = schema_pb2_grpc.ServiceStub(self.channel)
62
+
63
+ self.retry_grace_period = config.retry_grace_period
64
+ self.streamline_deadline_seconds = config.stream_deadline_ms * 0.001
65
+ self.deadline = config.deadline_ms * 0.001
66
+ self.connected = False
67
+ self.channel = self._generate_channel(config)
68
+ self.stub = evaluation_pb2_grpc.ServiceStub(self.channel)
69
+
70
+ self.thread: typing.Optional[threading.Thread] = None
71
+ self.timer: typing.Optional[threading.Timer] = None
72
+
73
+ self.start_time = time.time()
74
+
75
+ def _generate_channel(self, config: Config) -> grpc.Channel:
76
+ target = f"{config.host}:{config.port}"
77
+ # Create the channel with the service config
78
+ options = [
79
+ ("grpc.keepalive_time_ms", config.keep_alive_time),
80
+ ("grpc.initial_reconnect_backoff_ms", config.retry_backoff_ms),
81
+ ("grpc.max_reconnect_backoff_ms", config.retry_backoff_max_ms),
82
+ ("grpc.min_reconnect_backoff_ms", config.deadline_ms),
83
+ ]
84
+ if config.tls:
85
+ channel_args = {
86
+ "options": options,
87
+ "credentials": grpc.ssl_channel_credentials(),
88
+ }
89
+ if config.cert_path:
90
+ with open(config.cert_path, "rb") as f:
91
+ channel_args["credentials"] = grpc.ssl_channel_credentials(f.read())
92
+
93
+ channel = grpc.secure_channel(target, **channel_args)
94
+
95
+ else:
96
+ channel = grpc.insecure_channel(
97
+ target,
98
+ options=options,
99
+ )
100
+
101
+ return channel
102
+
103
+ def initialize(self, evaluation_context: EvaluationContext) -> None:
104
+ self.connect()
31
105
 
32
106
  def shutdown(self) -> None:
107
+ self.active = False
108
+ self.channel.unsubscribe(self._state_change_callback)
33
109
  self.channel.close()
110
+ if self.timer and self.timer.is_alive():
111
+ logger.debug("gRPC error timer cancelled due to shutdown")
112
+ self.timer.cancel()
113
+ if self.cache:
114
+ self.cache.clear()
115
+
116
+ def connect(self) -> None:
117
+ self.active = True
118
+
119
+ # Run monitoring in a separate thread
120
+ self.monitor_thread = threading.Thread(
121
+ target=self.monitor, daemon=True, name="FlagdGrpcServiceMonitorThread"
122
+ )
123
+ self.monitor_thread.start()
124
+ ## block until ready or deadline reached
125
+ timeout = self.deadline + time.time()
126
+ while not self.connected and time.time() < timeout:
127
+ time.sleep(0.05)
128
+ logger.debug("Finished blocking gRPC state initialization")
129
+
130
+ if not self.connected:
131
+ raise ProviderNotReadyError(
132
+ "Blocking init finished before data synced. Consider increasing startup deadline to avoid inconsistent evaluations."
133
+ )
134
+
135
+ def monitor(self) -> None:
136
+ self.channel.subscribe(self._state_change_callback, try_to_connect=True)
137
+
138
+ def _state_change_callback(self, new_state: ChannelConnectivity) -> None:
139
+ logger.debug(f"gRPC state change: {new_state}")
140
+ if new_state == ChannelConnectivity.READY:
141
+ if not self.thread or not self.thread.is_alive():
142
+ self.thread = threading.Thread(
143
+ target=self.listen,
144
+ daemon=True,
145
+ name="FlagdGrpcServiceWorkerThread",
146
+ )
147
+ self.thread.start()
148
+
149
+ if self.timer and self.timer.is_alive():
150
+ logger.debug("gRPC error timer expired")
151
+ self.timer.cancel()
152
+
153
+ elif new_state == ChannelConnectivity.TRANSIENT_FAILURE:
154
+ # this is the failed reconnect attempt so we are going into stale
155
+ self.emit_provider_stale(
156
+ ProviderEventDetails(
157
+ message="gRPC sync disconnected, reconnecting",
158
+ )
159
+ )
160
+ self.start_time = time.time()
161
+ # adding a timer, so we can emit the error event after time
162
+ self.timer = threading.Timer(self.retry_grace_period, self.emit_error)
163
+
164
+ logger.debug("gRPC error timer started")
165
+ self.timer.start()
166
+ self.connected = False
167
+
168
+ def emit_error(self) -> None:
169
+ logger.debug("gRPC error emitted")
170
+ if self.cache:
171
+ self.cache.clear()
172
+ self.emit_provider_error(
173
+ ProviderEventDetails(
174
+ message="gRPC sync disconnected, reconnecting",
175
+ error_code=ErrorCode.GENERAL,
176
+ )
177
+ )
178
+
179
+ def listen(self) -> None:
180
+ logger.debug("gRPC starting listener thread")
181
+ call_args = (
182
+ {"timeout": self.streamline_deadline_seconds}
183
+ if self.streamline_deadline_seconds > 0
184
+ else {}
185
+ )
186
+ request = evaluation_pb2.EventStreamRequest()
187
+
188
+ # defining a never ending loop to recreate the stream
189
+ while self.active:
190
+ try:
191
+ logger.debug("Setting up gRPC sync flags connection")
192
+ for message in self.stub.EventStream(
193
+ request, wait_for_ready=True, **call_args
194
+ ):
195
+ if message.type == "provider_ready":
196
+ self.emit_provider_ready(
197
+ ProviderEventDetails(
198
+ message="gRPC sync connection established"
199
+ )
200
+ )
201
+ self.connected = True
202
+ elif message.type == "configuration_change":
203
+ data = MessageToDict(message)["data"]
204
+ self.handle_changed_flags(data)
205
+
206
+ if not self.active:
207
+ logger.info("Terminating gRPC sync thread")
208
+ return
209
+ except grpc.RpcError as e: # noqa: PERF203
210
+ # although it seems like this error log is not interesting, without it, the retry is not working as expected
211
+ logger.debug(f"SyncFlags stream error, {e.code()=} {e.details()=}")
212
+ except ParseError:
213
+ logger.exception(
214
+ f"Could not parse flag data using flagd syntax: {message=}"
215
+ )
216
+
217
+ def handle_changed_flags(self, data: typing.Any) -> None:
218
+ changed_flags = list(data["flags"].keys())
219
+
220
+ if self.cache:
221
+ for flag in changed_flags:
222
+ self.cache.pop(flag)
223
+
224
+ self.emit_provider_configuration_changed(ProviderEventDetails(changed_flags))
34
225
 
35
226
  def resolve_boolean_details(
36
227
  self,
@@ -72,41 +263,54 @@ class GrpcResolver:
72
263
  ) -> FlagResolutionDetails[typing.Union[dict, list]]:
73
264
  return self._resolve(key, FlagType.OBJECT, default_value, evaluation_context)
74
265
 
75
- def _resolve(
266
+ def _resolve( # noqa: PLR0915 C901
76
267
  self,
77
268
  flag_key: str,
78
269
  flag_type: FlagType,
79
270
  default_value: T,
80
271
  evaluation_context: typing.Optional[EvaluationContext],
81
272
  ) -> FlagResolutionDetails[T]:
273
+ if self.cache is not None and flag_key in self.cache:
274
+ cached_flag: FlagResolutionDetails[T] = self.cache[flag_key]
275
+ cached_flag.reason = Reason.CACHED
276
+ return cached_flag
277
+
82
278
  context = self._convert_context(evaluation_context)
83
- call_args = {"timeout": self.config.timeout}
279
+ call_args = {"timeout": self.deadline}
84
280
  try:
281
+ request: Message
85
282
  if flag_type == FlagType.BOOLEAN:
86
- request = schema_pb2.ResolveBooleanRequest( # type:ignore[attr-defined]
283
+ request = evaluation_pb2.ResolveBooleanRequest(
87
284
  flag_key=flag_key, context=context
88
285
  )
89
286
  response = self.stub.ResolveBoolean(request, **call_args)
287
+ value = response.value
90
288
  elif flag_type == FlagType.STRING:
91
- request = schema_pb2.ResolveStringRequest( # type:ignore[attr-defined]
289
+ request = evaluation_pb2.ResolveStringRequest(
92
290
  flag_key=flag_key, context=context
93
291
  )
94
292
  response = self.stub.ResolveString(request, **call_args)
293
+ value = response.value
95
294
  elif flag_type == FlagType.OBJECT:
96
- request = schema_pb2.ResolveObjectRequest( # type:ignore[attr-defined]
295
+ request = evaluation_pb2.ResolveObjectRequest(
97
296
  flag_key=flag_key, context=context
98
297
  )
99
298
  response = self.stub.ResolveObject(request, **call_args)
299
+ value = MessageToDict(response, preserving_proto_field_name=True)[
300
+ "value"
301
+ ]
100
302
  elif flag_type == FlagType.FLOAT:
101
- request = schema_pb2.ResolveFloatRequest( # type:ignore[attr-defined]
303
+ request = evaluation_pb2.ResolveFloatRequest(
102
304
  flag_key=flag_key, context=context
103
305
  )
104
306
  response = self.stub.ResolveFloat(request, **call_args)
307
+ value = response.value
105
308
  elif flag_type == FlagType.INTEGER:
106
- request = schema_pb2.ResolveIntRequest( # type:ignore[attr-defined]
309
+ request = evaluation_pb2.ResolveIntRequest(
107
310
  flag_key=flag_key, context=context
108
311
  )
109
312
  response = self.stub.ResolveInt(request, **call_args)
313
+ value = response.value
110
314
  else:
111
315
  raise ValueError(f"Unknown flag type: {flag_type}")
112
316
 
@@ -123,12 +327,17 @@ class GrpcResolver:
123
327
  raise GeneralError(message) from e
124
328
 
125
329
  # Got a valid flag and valid type. Return it.
126
- return FlagResolutionDetails(
127
- value=response.value,
330
+ result = FlagResolutionDetails(
331
+ value=value,
128
332
  reason=response.reason,
129
333
  variant=response.variant,
130
334
  )
131
335
 
336
+ if response.reason == Reason.STATIC and self.cache is not None:
337
+ self.cache.insert(flag_key, result)
338
+
339
+ return result
340
+
132
341
  def _convert_context(
133
342
  self, evaluation_context: typing.Optional[EvaluationContext]
134
343
  ) -> Struct:
@@ -1,44 +1,54 @@
1
- import time
2
1
  import typing
3
2
 
4
- from json_logic import builtins, jsonLogic # type: ignore[import-untyped]
5
-
3
+ from openfeature.contrib.provider.flagd.resolvers.process.connector.file_watcher import (
4
+ FileWatcher,
5
+ )
6
6
  from openfeature.evaluation_context import EvaluationContext
7
+ from openfeature.event import ProviderEventDetails
7
8
  from openfeature.exception import FlagNotFoundError, ParseError
8
9
  from openfeature.flag_evaluation import FlagResolutionDetails, Reason
9
- from openfeature.provider.provider import AbstractProvider
10
10
 
11
11
  from ..config import Config
12
- from .process.custom_ops import ends_with, fractional, sem_ver, starts_with
13
- from .process.file_watcher import FileWatcherFlagStore
12
+ from .process.connector import FlagStateConnector
13
+ from .process.connector.grpc_watcher import GrpcWatcher
14
+ from .process.flags import FlagStore
15
+ from .process.targeting import targeting
14
16
 
15
17
  T = typing.TypeVar("T")
16
18
 
17
19
 
18
20
  class InProcessResolver:
19
- OPERATORS: typing.ClassVar[dict] = {
20
- **builtins.BUILTINS,
21
- "fractional": fractional,
22
- "starts_with": starts_with,
23
- "ends_with": ends_with,
24
- "sem_ver": sem_ver,
25
- }
26
-
27
- def __init__(self, config: Config, provider: AbstractProvider):
21
+ def __init__(
22
+ self,
23
+ config: Config,
24
+ emit_provider_ready: typing.Callable[[ProviderEventDetails], None],
25
+ emit_provider_error: typing.Callable[[ProviderEventDetails], None],
26
+ emit_provider_stale: typing.Callable[[ProviderEventDetails], None],
27
+ emit_provider_configuration_changed: typing.Callable[
28
+ [ProviderEventDetails], None
29
+ ],
30
+ ):
28
31
  self.config = config
29
- self.provider = provider
30
- if not self.config.offline_flag_source_path:
31
- raise ValueError(
32
- "offline_flag_source_path must be provided when using in-process resolver"
32
+ self.flag_store = FlagStore(emit_provider_configuration_changed)
33
+ self.connector: FlagStateConnector = (
34
+ FileWatcher(
35
+ self.config, self.flag_store, emit_provider_ready, emit_provider_error
36
+ )
37
+ if self.config.offline_flag_source_path
38
+ else GrpcWatcher(
39
+ self.config,
40
+ self.flag_store,
41
+ emit_provider_ready,
42
+ emit_provider_error,
43
+ emit_provider_stale,
33
44
  )
34
- self.flag_store = FileWatcherFlagStore(
35
- self.config.offline_flag_source_path,
36
- self.provider,
37
- self.config.offline_poll_interval_seconds,
38
45
  )
39
46
 
47
+ def initialize(self, evaluation_context: EvaluationContext) -> None:
48
+ self.connector.initialize(evaluation_context)
49
+
40
50
  def shutdown(self) -> None:
41
- self.flag_store.shutdown()
51
+ self.connector.shutdown()
42
52
 
43
53
  def resolve_boolean_details(
44
54
  self,
@@ -62,7 +72,10 @@ class InProcessResolver:
62
72
  default_value: float,
63
73
  evaluation_context: typing.Optional[EvaluationContext] = None,
64
74
  ) -> FlagResolutionDetails[float]:
65
- return self._resolve(key, default_value, evaluation_context)
75
+ result = self._resolve(key, default_value, evaluation_context)
76
+ if isinstance(result.value, int):
77
+ result.value = float(result.value)
78
+ return result
66
79
 
67
80
  def resolve_integer_details(
68
81
  self,
@@ -97,12 +110,8 @@ class InProcessResolver:
97
110
  variant, value = flag.default
98
111
  return FlagResolutionDetails(value, variant=variant, reason=Reason.STATIC)
99
112
 
100
- json_logic_context = evaluation_context.attributes if evaluation_context else {}
101
- json_logic_context["$flagd"] = {"flagKey": key, "timestamp": int(time.time())}
102
- json_logic_context["targetingKey"] = (
103
- evaluation_context.targeting_key if evaluation_context else None
104
- )
105
- variant = jsonLogic(flag.targeting, json_logic_context, self.OPERATORS)
113
+ variant = targeting(flag.key, flag.targeting, evaluation_context)
114
+
106
115
  if variant is None:
107
116
  variant, value = flag.default
108
117
  return FlagResolutionDetails(value, variant=variant, reason=Reason.DEFAULT)
@@ -0,0 +1,11 @@
1
+ import typing
2
+
3
+ from openfeature.evaluation_context import EvaluationContext
4
+
5
+
6
+ class FlagStateConnector(typing.Protocol):
7
+ def initialize(
8
+ self, evaluation_context: EvaluationContext
9
+ ) -> None: ... # pragma: no cover
10
+
11
+ def shutdown(self) -> None: ... # pragma: no cover
@@ -0,0 +1,106 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ import threading
5
+ import time
6
+ import typing
7
+
8
+ import yaml
9
+
10
+ from openfeature.contrib.provider.flagd.config import Config
11
+ from openfeature.contrib.provider.flagd.resolvers.process.connector import (
12
+ FlagStateConnector,
13
+ )
14
+ from openfeature.contrib.provider.flagd.resolvers.process.flags import FlagStore
15
+ from openfeature.evaluation_context import EvaluationContext
16
+ from openfeature.event import ProviderEventDetails
17
+ from openfeature.exception import ParseError, ProviderNotReadyError
18
+
19
+ logger = logging.getLogger("openfeature.contrib")
20
+
21
+
22
+ class FileWatcher(FlagStateConnector):
23
+ def __init__(
24
+ self,
25
+ config: Config,
26
+ flag_store: FlagStore,
27
+ emit_provider_ready: typing.Callable[[ProviderEventDetails], None],
28
+ emit_provider_error: typing.Callable[[ProviderEventDetails], None],
29
+ ):
30
+ if config.offline_flag_source_path is None:
31
+ raise ValueError(
32
+ f"`config.offline_flag_source_path` parameter invalid: {config.offline_flag_source_path}"
33
+ )
34
+ else:
35
+ self.file_path = config.offline_flag_source_path
36
+
37
+ self.emit_provider_ready = emit_provider_ready
38
+ self.emit_provider_error = emit_provider_error
39
+ self.deadline_seconds = config.deadline_ms * 0.001
40
+
41
+ self.last_modified = 0.0
42
+ self.flag_store = flag_store
43
+ self.should_emit_ready_on_success = False
44
+
45
+ def initialize(self, evaluation_context: EvaluationContext) -> None:
46
+ self.active = True
47
+ self.thread = threading.Thread(
48
+ target=self.refresh_file, daemon=True, name="FlagdFileWatcherWorkerThread"
49
+ )
50
+ self.thread.start()
51
+
52
+ # Let this throw exceptions so that provider status is set correctly
53
+ try:
54
+ self.should_emit_ready_on_success = True
55
+ self._load_data()
56
+ except Exception as err:
57
+ raise ProviderNotReadyError from err
58
+
59
+ def shutdown(self) -> None:
60
+ self.active = False
61
+
62
+ def refresh_file(self) -> None:
63
+ while self.active:
64
+ time.sleep(self.deadline_seconds)
65
+ logger.debug("checking for new flag store contents from file")
66
+ self.safe_load_data()
67
+
68
+ def safe_load_data(self) -> None:
69
+ try:
70
+ last_modified = os.path.getmtime(self.file_path)
71
+ if last_modified > self.last_modified:
72
+ self._load_data(last_modified)
73
+ except FileNotFoundError:
74
+ self.handle_error("Provided file path not valid")
75
+ except json.JSONDecodeError:
76
+ self.handle_error("Could not parse JSON flag data from file")
77
+ except yaml.error.YAMLError:
78
+ self.handle_error("Could not parse YAML flag data from file")
79
+ except ParseError:
80
+ self.handle_error("Could not parse flag data using flagd syntax")
81
+ except Exception:
82
+ self.handle_error("Could not read flags from file")
83
+
84
+ def _load_data(self, modified_time: typing.Optional[float] = None) -> None:
85
+ with open(self.file_path) as file:
86
+ if self.file_path.endswith(".yaml"):
87
+ data = yaml.safe_load(file)
88
+ else:
89
+ data = json.load(file)
90
+
91
+ self.flag_store.update(data)
92
+
93
+ if self.should_emit_ready_on_success:
94
+ self.emit_provider_ready(
95
+ ProviderEventDetails(
96
+ message="Reloading file contents recovered from error state"
97
+ )
98
+ )
99
+ self.should_emit_ready_on_success = False
100
+
101
+ self.last_modified = modified_time or os.path.getmtime(self.file_path)
102
+
103
+ def handle_error(self, error_message: str) -> None:
104
+ logger.exception(error_message)
105
+ self.should_emit_ready_on_success = True
106
+ self.emit_provider_error(ProviderEventDetails(message=error_message))