dreadnode 1.0.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dreadnode/task.py ADDED
@@ -0,0 +1,447 @@
1
+ import asyncio
2
+ import inspect
3
+ import traceback
4
+ import typing as t
5
+ from dataclasses import dataclass
6
+
7
+ from logfire._internal.stack_info import warn_at_user_stacklevel
8
+ from opentelemetry.trace import Tracer
9
+
10
+ from dreadnode.metric import Scorer, ScorerCallable
11
+ from dreadnode.tracing.span import TaskSpan, current_run_span
12
+
13
+ P = t.ParamSpec("P")
14
+ R = t.TypeVar("R")
15
+
16
+
17
+ class TaskFailedWarning(UserWarning):
18
+ pass
19
+
20
+
21
+ class TaskGeneratorWarning(UserWarning):
22
+ pass
23
+
24
+
25
+ class TaskSpanList(list[TaskSpan[R]]):
26
+ """
27
+ Lightweight wrapper around a list of TaskSpans to provide some convenience methods.
28
+ """
29
+
30
+ def sorted(self, *, reverse: bool = True) -> "TaskSpanList[R]":
31
+ """
32
+ Sorts the spans in this list by their average metric value.
33
+
34
+ Args:
35
+ reverse: If True, sorts in descending order. Defaults to True.
36
+
37
+ Returns:
38
+ A new TaskSpanList sorted by average metric value.
39
+ """
40
+ return TaskSpanList(
41
+ sorted(
42
+ self,
43
+ key=lambda span: span.get_average_metric_value(),
44
+ reverse=reverse,
45
+ ),
46
+ )
47
+
48
+ @t.overload
49
+ def top_n(
50
+ self,
51
+ n: int,
52
+ *,
53
+ as_outputs: t.Literal[False] = False,
54
+ reverse: bool = True,
55
+ ) -> "TaskSpanList[R]": ...
56
+
57
+ @t.overload
58
+ def top_n(
59
+ self,
60
+ n: int,
61
+ *,
62
+ as_outputs: t.Literal[True],
63
+ reverse: bool = True,
64
+ ) -> list[R]: ...
65
+
66
+ def top_n(
67
+ self,
68
+ n: int,
69
+ *,
70
+ as_outputs: bool = False,
71
+ reverse: bool = True,
72
+ ) -> "TaskSpanList[R] | list[R]":
73
+ """
74
+ Take the top n spans from this list, sorted by their average metric value.
75
+
76
+ Args:
77
+ n: The number of spans to take.
78
+ as_outputs: If True, returns a list of outputs instead of spans. Defaults to False.
79
+ reverse: If True, sorts in descending order. Defaults to True.
80
+
81
+ Returns:
82
+ A new TaskSpanList or list of outputs sorted by average metric value.
83
+ """
84
+ sorted_ = self.sorted(reverse=reverse)[:n]
85
+ return (
86
+ t.cast("list[R]", [span.output for span in sorted_])
87
+ if as_outputs
88
+ else TaskSpanList(sorted_)
89
+ )
90
+
91
+
92
+ @dataclass
93
+ class Task(t.Generic[P, R]):
94
+ """
95
+ Structured task wrapper for a function that can be executed within a run.
96
+
97
+ Tasks allow you to associate metadata, inputs, outputs, and metrics for a unit of work.
98
+ """
99
+
100
+ tracer: Tracer
101
+
102
+ name: str
103
+ "The name of the task. This is used for logging and tracing."
104
+ label: str
105
+ "The label of the task - used to group associated metrics and data together."
106
+ attributes: dict[str, t.Any]
107
+ "A dictionary of attributes to attach to the task span."
108
+ func: t.Callable[P, R]
109
+ "The function to execute as the task."
110
+ scorers: list[Scorer[R]]
111
+ "A list of scorers to evaluate the task's output."
112
+ tags: list[str]
113
+ "A list of tags to attach to the task span."
114
+
115
+ log_params: t.Sequence[str] | bool = False
116
+ "Whether to log all, or specific, incoming arguments to the function as parameters."
117
+ log_inputs: t.Sequence[str] | bool = True
118
+ "Whether to log all, or specific, incoming arguments to the function as inputs."
119
+ log_output: bool = True
120
+ "Whether to automatically log the result of the function as an output."
121
+
122
+ def __post_init__(self) -> None:
123
+ self.__signature__ = getattr(
124
+ self.func,
125
+ "__signature__",
126
+ inspect.signature(self.func),
127
+ )
128
+ self.__name__ = getattr(self.func, "__name__", self.name)
129
+ self.__doc__ = getattr(self.func, "__doc__", None)
130
+
131
+ def _bind_args(self, *args: P.args, **kwargs: P.kwargs) -> dict[str, t.Any]:
132
+ signature = inspect.signature(self.func)
133
+ bound_args = signature.bind(*args, **kwargs)
134
+ bound_args.apply_defaults()
135
+ return dict(bound_args.arguments)
136
+
137
+ def clone(self) -> "Task[P, R]":
138
+ """
139
+ Clone a task.
140
+
141
+ Returns:
142
+ A new Task instance with the same attributes as this one.
143
+ """
144
+ return Task(
145
+ tracer=self.tracer,
146
+ name=self.name,
147
+ label=self.label,
148
+ attributes=self.attributes.copy(),
149
+ func=self.func,
150
+ scorers=[scorer.clone() for scorer in self.scorers],
151
+ tags=self.tags.copy(),
152
+ log_params=self.log_params,
153
+ log_inputs=self.log_inputs,
154
+ log_output=self.log_output,
155
+ )
156
+
157
+ def with_(
158
+ self,
159
+ *,
160
+ scorers: t.Sequence[Scorer[R] | ScorerCallable[R]] | None = None,
161
+ name: str | None = None,
162
+ tags: t.Sequence[str] | None = None,
163
+ label: str | None = None,
164
+ log_params: t.Sequence[str] | bool | None = None,
165
+ log_inputs: t.Sequence[str] | bool | None = None,
166
+ log_output: bool | None = None,
167
+ append: bool = False,
168
+ **attributes: t.Any,
169
+ ) -> "Task[P, R]":
170
+ """
171
+ Clone a task and modify its attributes.
172
+
173
+ Args:
174
+ scorers: A list of new scorers to set or append to the task.
175
+ name: The new name for the task.
176
+ tags: A list of new tags to set or append to the task.
177
+ label: The new label for the task.
178
+ log_params: Whether to log all, or specific, incoming arguments to the function as parameters.
179
+ log_inputs: Whether to log all, or specific, incoming arguments to the function as inputs.
180
+ log_output: Whether to automatically log the result of the function as an output.
181
+ append: If True, appends the new scorers and tags to the existing ones. If False, replaces them.
182
+ **attributes: Additional attributes to set or update in the task.
183
+
184
+ Returns:
185
+ A new Task instance with the modified attributes.
186
+ """
187
+ task = self.clone()
188
+ task.name = name or task.name
189
+ task.label = label or task.label
190
+ task.log_params = log_params if log_params is not None else task.log_params
191
+ task.log_inputs = log_inputs if log_inputs is not None else task.log_inputs
192
+ task.log_output = log_output if log_output is not None else task.log_output
193
+
194
+ new_scorers = [Scorer.from_callable(self.tracer, scorer) for scorer in (scorers or [])]
195
+ new_tags = list(tags or [])
196
+
197
+ if append:
198
+ task.scorers.extend(new_scorers)
199
+ task.tags.extend(new_tags)
200
+ task.attributes.update(attributes)
201
+ else:
202
+ task.scorers = new_scorers
203
+ task.tags = new_tags
204
+ task.attributes = attributes
205
+
206
+ return task
207
+
208
+ async def run(self, *args: P.args, **kwargs: P.kwargs) -> TaskSpan[R]:
209
+ """
210
+ Execute the task and return the result as a TaskSpan.
211
+
212
+ Args:
213
+ args: The arguments to pass to the task.
214
+ kwargs: The keyword arguments to pass to the task.
215
+
216
+ Returns:
217
+ The span associated with task execution.
218
+ """
219
+ run = current_run_span.get()
220
+ if run is None or not run.is_recording:
221
+ raise RuntimeError("Tasks must be executed within a run")
222
+
223
+ bound_args = self._bind_args(*args, **kwargs)
224
+
225
+ params_to_log = (
226
+ bound_args
227
+ if self.log_params is True
228
+ else {k: v for k, v in bound_args.items() if k in self.log_params}
229
+ if self.log_params is not False
230
+ else {}
231
+ )
232
+ inputs_to_log = (
233
+ bound_args
234
+ if self.log_inputs is True
235
+ else {k: v for k, v in bound_args.items() if k in self.log_inputs}
236
+ if self.log_inputs is not False
237
+ else {}
238
+ )
239
+
240
+ with TaskSpan[R](
241
+ name=self.name,
242
+ label=self.label,
243
+ attributes=self.attributes,
244
+ params=params_to_log,
245
+ tags=self.tags,
246
+ run_id=run.run_id,
247
+ tracer=self.tracer,
248
+ ) as span:
249
+ span.run.log_metric(f"{self.label}.exec.count", 1, mode="count")
250
+
251
+ for name, value in params_to_log.items():
252
+ span.log_param(name, value)
253
+
254
+ input_object_hashes: list[str] = [
255
+ span.log_input(name, value, label=f"{self.label}.input.{name}")
256
+ for name, value in inputs_to_log.items()
257
+ ]
258
+
259
+ try:
260
+ output = t.cast("R | t.Awaitable[R]", self.func(*args, **kwargs))
261
+ if inspect.isawaitable(output):
262
+ output = await output
263
+ except Exception:
264
+ span.run.log_metric(f"{self.label}.exec.success_rate", 0, mode="avg")
265
+ raise
266
+
267
+ span.run.log_metric(f"{self.label}.exec.success_rate", 1, mode="avg")
268
+ span.output = output
269
+
270
+ if self.log_output:
271
+ output_object_hash = span.log_output(
272
+ "output",
273
+ output,
274
+ label=f"{self.label}.output",
275
+ )
276
+
277
+ # Link the output to the inputs
278
+ for input_object_hash in input_object_hashes:
279
+ span.run.link_objects(output_object_hash, input_object_hash)
280
+
281
+ for scorer in self.scorers:
282
+ metric = await scorer(output)
283
+ span.log_metric(scorer.name, metric, origin=output)
284
+
285
+ return span
286
+
287
+ async def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
288
+ span = await self.run(*args, **kwargs)
289
+ return span.output
290
+
291
+ # NOTE(nick): Not sure I'm in love with these being instance methods here.
292
+ # We could move them to the top level class maybe.
293
+
294
+ async def map_run(
295
+ self,
296
+ count: int,
297
+ *args: P.args,
298
+ **kwargs: P.kwargs,
299
+ ) -> TaskSpanList[R]:
300
+ """
301
+ Run the task multiple times and return a list of spans.
302
+
303
+ Args:
304
+ count: The number of times to run the task.
305
+ args: The arguments to pass to the task.
306
+ kwargs: The keyword arguments to pass to the task.
307
+
308
+ Returns:
309
+ A TaskSpanList associated with each task execution.
310
+ """
311
+ spans = await asyncio.gather(*[self.run(*args, **kwargs) for _ in range(count)])
312
+ return TaskSpanList(spans)
313
+
314
+ async def map(self, count: int, *args: P.args, **kwargs: P.kwargs) -> list[R]:
315
+ """
316
+ Run the task multiple times and return a list of outputs.
317
+
318
+ Args:
319
+ count: The number of times to run the task.
320
+ args: The arguments to pass to the task.
321
+ kwargs: The keyword arguments to pass to the task.
322
+
323
+ Returns:
324
+ A list of outputs from each task execution.
325
+ """
326
+ spans = await self.map_run(count, *args, **kwargs)
327
+ return [span.output for span in spans]
328
+
329
+ async def top_n(
330
+ self,
331
+ count: int,
332
+ n: int,
333
+ *args: P.args,
334
+ **kwargs: P.kwargs,
335
+ ) -> list[R]:
336
+ """
337
+ Run the task multiple times and return the top n outputs.
338
+
339
+ Args:
340
+ count: The number of times to run the task.
341
+ n: The number of top outputs to return.
342
+ args: The arguments to pass to the task.
343
+ kwargs: The keyword arguments to pass to the task.
344
+
345
+ Returns:
346
+ A list of the top n outputs from the task executions.
347
+ """
348
+ spans = await self.map_run(count, *args, **kwargs)
349
+ return spans.top_n(n, as_outputs=True)
350
+
351
+ async def try_run(self, *args: P.args, **kwargs: P.kwargs) -> TaskSpan[R] | None:
352
+ """
353
+ Attempt to run the task and return the result as a TaskSpan.
354
+ If the task fails, a warning is logged and None is returned.
355
+
356
+ Args:
357
+ args: The arguments to pass to the task.
358
+ kwargs: The keyword arguments to pass to the task.
359
+
360
+ Returns:
361
+ The span associated with task execution, or None if the task failed.
362
+ """
363
+ try:
364
+ return await self.run(*args, **kwargs)
365
+ except Exception: # noqa: BLE001
366
+ warn_at_user_stacklevel(
367
+ f"Task '{self.name}' ({self.label}) failed:\n{traceback.format_exc()}",
368
+ TaskFailedWarning,
369
+ )
370
+ return None
371
+
372
+ async def try_(self, *args: P.args, **kwargs: P.kwargs) -> R | None:
373
+ """
374
+ Attempt to run the task and return the result.
375
+ If the task fails, a warning is logged and None is returned.
376
+
377
+ Args:
378
+ args: The arguments to pass to the task.
379
+ kwargs: The keyword arguments to pass to the task.
380
+
381
+ Returns:
382
+ The output of the task, or None if the task failed.
383
+ """
384
+ span = await self.try_run(*args, **kwargs)
385
+ return span.output if span else None
386
+
387
+ async def try_map_run(
388
+ self,
389
+ count: int,
390
+ *args: P.args,
391
+ **kwargs: P.kwargs,
392
+ ) -> TaskSpanList[R]:
393
+ """
394
+ Attempt to run the task multiple times and return a list of spans.
395
+ If any task fails, a warning is logged and None is returned for that task.
396
+
397
+ Args:
398
+ count: The number of times to run the task.
399
+ args: The arguments to pass to the task.
400
+ kwargs: The keyword arguments to pass to the task.
401
+
402
+ Returns:
403
+ A TaskSpanList associated with each task execution.
404
+ """
405
+ spans = await asyncio.gather(
406
+ *[self.try_run(*args, **kwargs) for _ in range(count)],
407
+ )
408
+ return TaskSpanList([span for span in spans if span])
409
+
410
+ async def try_top_n(
411
+ self,
412
+ count: int,
413
+ n: int,
414
+ *args: P.args,
415
+ **kwargs: P.kwargs,
416
+ ) -> list[R]:
417
+ """
418
+ Attempt to run the task multiple times and return the top n outputs.
419
+ If any task fails, a warning is logged and None is returned for that task.
420
+
421
+ Args:
422
+ count: The number of times to run the task.
423
+ n: The number of top outputs to return.
424
+ args: The arguments to pass to the task.
425
+ kwargs: The keyword arguments to pass to the task.
426
+
427
+ Returns:
428
+ A list of the top n outputs from the task executions.
429
+ """
430
+ spans = await self.try_map_run(count, *args, **kwargs)
431
+ return spans.top_n(n, as_outputs=True)
432
+
433
+ async def try_map(self, count: int, *args: P.args, **kwargs: P.kwargs) -> list[R]:
434
+ """
435
+ Attempt to run the task multiple times and return a list of outputs.
436
+ If any task fails, a warning is logged and None is returned for that task.
437
+
438
+ Args:
439
+ count: The number of times to run the task.
440
+ args: The arguments to pass to the task.
441
+ kwargs: The keyword arguments to pass to the task.
442
+
443
+ Returns:
444
+ A list of outputs from each task execution.
445
+ """
446
+ spans = await self.try_map_run(count, *args, **kwargs)
447
+ return [span.output for span in spans if span]
File without changes
@@ -0,0 +1,35 @@
1
+ import typing as t
2
+
3
+ SPAN_NAMESPACE = "dreadnode"
4
+
5
+ SpanType = t.Literal["run", "task", "span", "run_update"]
6
+
7
+ SPAN_ATTRIBUTE_VERSION = f"{SPAN_NAMESPACE}.version"
8
+ SPAN_ATTRIBUTE_TYPE = f"{SPAN_NAMESPACE}.type"
9
+ SPAN_ATTRIBUTE_SCHEMA = f"{SPAN_NAMESPACE}.schema"
10
+ SPAN_ATTRIBUTE_LABEL = f"{SPAN_NAMESPACE}.label"
11
+ SPAN_ATTRIBUTE_TAGS_ = f"{SPAN_NAMESPACE}.tags"
12
+ SPAN_ATTRIBUTE_PROJECT = f"{SPAN_NAMESPACE}.project"
13
+ SPAN_ATTRIBUTE_PARAMS = f"{SPAN_NAMESPACE}.params"
14
+ SPAN_ATTRIBUTE_INPUTS = f"{SPAN_NAMESPACE}.inputs"
15
+ SPAN_ATTRIBUTE_METRICS = f"{SPAN_NAMESPACE}.metrics"
16
+ SPAN_ATTRIBUTE_OUTPUTS = f"{SPAN_NAMESPACE}.outputs"
17
+ SPAN_ATTRIBUTE_OBJECTS = f"{SPAN_NAMESPACE}.objects"
18
+ SPAN_ATTRIBUTE_OBJECT_SCHEMAS = f"{SPAN_NAMESPACE}.object_schemas"
19
+ SPAN_ATTRIBUTE_ARTIFACTS = f"{SPAN_NAMESPACE}.artifacts"
20
+ SPAN_ATTRIBUTE_RUN_ID = f"{SPAN_NAMESPACE}.run.id"
21
+ SPAN_ATTRIBUTE_PARENT_TASK_ID = f"{SPAN_NAMESPACE}.task.parent_id"
22
+ SPAN_ATTRIBUTE_LARGE_ATTRIBUTES = f"{SPAN_NAMESPACE}.large_attributes"
23
+
24
+ EVENT_NAME_OBJECT = f"{SPAN_NAMESPACE}.object"
25
+ EVENT_NAME_OBJECT_INPUT = f"{SPAN_NAMESPACE}.object.input"
26
+ EVENT_NAME_OBJECT_OUTPUT = f"{SPAN_NAMESPACE}.object.output"
27
+ EVENT_NAME_OBJECT_METRIC = f"{SPAN_NAMESPACE}.object.metric"
28
+ EVENT_NAME_OBJECT_LINK = f"{SPAN_NAMESPACE}.object.link"
29
+
30
+ EVENT_ATTRIBUTE_OBJECT_LABEL = f"{SPAN_NAMESPACE}.object.label"
31
+ EVENT_ATTRIBUTE_OBJECT_HASH = f"{SPAN_NAMESPACE}.object.hash"
32
+ EVENT_ATTRIBUTE_LINK_HASH = f"{SPAN_NAMESPACE}.link.hash"
33
+ EVENT_ATTRIBUTE_ORIGIN_SPAN_ID = f"{SPAN_NAMESPACE}.origin.span_id"
34
+
35
+ METRIC_ATTRIBUTE_SOURCE_HASH = f"{SPAN_NAMESPACE}.origin.hash"
@@ -0,0 +1,157 @@
1
+ import threading
2
+ import typing as t
3
+ from collections.abc import Sequence
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+ from typing import IO
7
+
8
+ from google.protobuf import json_format
9
+ from opentelemetry.exporter.otlp.proto.common._log_encoder import encode_logs
10
+ from opentelemetry.exporter.otlp.proto.common.metrics_encoder import encode_metrics
11
+ from opentelemetry.exporter.otlp.proto.common.trace_encoder import encode_spans
12
+ from opentelemetry.sdk._logs import LogData
13
+ from opentelemetry.sdk._logs.export import LogExporter, LogExportResult
14
+ from opentelemetry.sdk.metrics.export import (
15
+ MetricReader,
16
+ MetricsData,
17
+ )
18
+ from opentelemetry.sdk.trace import ReadableSpan
19
+ from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
20
+
21
+ from dreadnode.util import logger
22
+
23
+
24
+ @dataclass
25
+ class FileExportConfig:
26
+ """Configuration for signal exports to JSONL files."""
27
+
28
+ base_path: str | Path = Path.cwd() / ".dreadnode"
29
+ prefix: str = ""
30
+
31
+ def get_path(self, signal: str) -> Path:
32
+ """Get the file path for a specific signal type."""
33
+ base = Path(self.base_path)
34
+ base.mkdir(parents=True, exist_ok=True)
35
+ return base / f"{self.prefix}{signal}.jsonl"
36
+
37
+
38
+ class FileMetricReader(MetricReader):
39
+ """MetricReader that writes metrics to a file in OTLP format."""
40
+
41
+ def __init__(self, config: FileExportConfig):
42
+ super().__init__()
43
+ self.config = config
44
+ self._lock = threading.Lock()
45
+ self._file: IO[str] | None = None
46
+
47
+ @property
48
+ def file(self) -> IO[str]:
49
+ if not self._file:
50
+ self._file = self.config.get_path("metrics").open("a")
51
+ return self._file
52
+
53
+ def _receive_metrics(
54
+ self,
55
+ metrics_data: MetricsData,
56
+ timeout_millis: float = 10_000, # noqa: ARG002
57
+ **kwargs: t.Any, # noqa: ARG002
58
+ ) -> None:
59
+ if metrics_data is None:
60
+ return
61
+
62
+ try:
63
+ encoded = encode_metrics(metrics_data)
64
+ json_str = json_format.MessageToJson(encoded, indent=None)
65
+ with self._lock:
66
+ self.file.write(json_str + "\n")
67
+ self.file.flush()
68
+ except Exception as e: # noqa: BLE001
69
+ logger.error(f"Failed to export metrics: {e}")
70
+
71
+ def shutdown(
72
+ self,
73
+ timeout_millis: float = 30_000, # noqa: ARG002
74
+ **kwargs: t.Any, # noqa: ARG002
75
+ ) -> None:
76
+ with self._lock:
77
+ if self._file:
78
+ self._file.close()
79
+ self._file = None
80
+
81
+
82
+ class FileSpanExporter(SpanExporter):
83
+ """SpanExporter that writes spans to a file in OTLP format."""
84
+
85
+ def __init__(self, config: FileExportConfig):
86
+ self.config = config
87
+ self._lock = threading.Lock()
88
+ self._file: IO[str] | None = None
89
+
90
+ @property
91
+ def file(self) -> IO[str]:
92
+ if not self._file:
93
+ self._file = self.config.get_path("traces").open("a")
94
+ return self._file
95
+
96
+ def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
97
+ try:
98
+ encoded = encode_spans(spans)
99
+ json_str = json_format.MessageToJson(encoded, indent=None)
100
+ with self._lock:
101
+ self.file.write(json_str + "\n")
102
+ self.file.flush()
103
+ except Exception as e: # noqa: BLE001
104
+ logger.error(f"Failed to export spans: {e}")
105
+ return SpanExportResult.FAILURE
106
+ return SpanExportResult.SUCCESS
107
+
108
+ def force_flush(
109
+ self,
110
+ timeout_millis: float = 30_000, # noqa: ARG002
111
+ ) -> bool:
112
+ return True # We flush above
113
+
114
+ def shutdown(self) -> None:
115
+ with self._lock:
116
+ if self._file:
117
+ self._file.close()
118
+ self._file = None
119
+
120
+
121
+ class FileLogExporter(LogExporter):
122
+ """LogExporter that writes logs to a file in OTLP format."""
123
+
124
+ def __init__(self, config: FileExportConfig):
125
+ self.config = config
126
+ self._lock = threading.Lock()
127
+ self._file: IO[str] | None = None
128
+
129
+ @property
130
+ def file(self) -> IO[str]:
131
+ if not self._file:
132
+ self._file = self.config.get_path("logs").open("a")
133
+ return self._file
134
+
135
+ def export(self, batch: Sequence[LogData]) -> LogExportResult:
136
+ try:
137
+ encoded = encode_logs(batch)
138
+ json_str = json_format.MessageToJson(encoded, indent=None)
139
+ with self._lock:
140
+ self.file.write(json_str + "\n")
141
+ self.file.flush()
142
+ except Exception as e: # noqa: BLE001
143
+ logger.error(f"Failed to export logs: {e}")
144
+ return LogExportResult.FAILURE
145
+ return LogExportResult.SUCCESS
146
+
147
+ def force_flush(
148
+ self,
149
+ timeout_millis: float = 30_000, # noqa: ARG002
150
+ ) -> bool:
151
+ return True
152
+
153
+ def shutdown(self) -> None:
154
+ with self._lock:
155
+ if self._file:
156
+ self._file.close()
157
+ self._file = None