fluxfem 0.1.3a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fluxfem might be problematic. Click here for more details.

Files changed (47) hide show
  1. fluxfem/__init__.py +343 -0
  2. fluxfem/core/__init__.py +318 -0
  3. fluxfem/core/assembly.py +788 -0
  4. fluxfem/core/basis.py +996 -0
  5. fluxfem/core/data.py +64 -0
  6. fluxfem/core/dtypes.py +4 -0
  7. fluxfem/core/forms.py +234 -0
  8. fluxfem/core/interp.py +55 -0
  9. fluxfem/core/solver.py +113 -0
  10. fluxfem/core/space.py +419 -0
  11. fluxfem/core/weakform.py +828 -0
  12. fluxfem/helpers_ts.py +11 -0
  13. fluxfem/helpers_wf.py +44 -0
  14. fluxfem/mesh/__init__.py +29 -0
  15. fluxfem/mesh/base.py +244 -0
  16. fluxfem/mesh/hex.py +327 -0
  17. fluxfem/mesh/io.py +87 -0
  18. fluxfem/mesh/predicate.py +45 -0
  19. fluxfem/mesh/surface.py +257 -0
  20. fluxfem/mesh/tet.py +246 -0
  21. fluxfem/physics/__init__.py +53 -0
  22. fluxfem/physics/diffusion.py +18 -0
  23. fluxfem/physics/elasticity/__init__.py +39 -0
  24. fluxfem/physics/elasticity/hyperelastic.py +99 -0
  25. fluxfem/physics/elasticity/linear.py +58 -0
  26. fluxfem/physics/elasticity/materials.py +32 -0
  27. fluxfem/physics/elasticity/stress.py +46 -0
  28. fluxfem/physics/operators.py +109 -0
  29. fluxfem/physics/postprocess.py +113 -0
  30. fluxfem/solver/__init__.py +47 -0
  31. fluxfem/solver/bc.py +439 -0
  32. fluxfem/solver/cg.py +326 -0
  33. fluxfem/solver/dirichlet.py +126 -0
  34. fluxfem/solver/history.py +31 -0
  35. fluxfem/solver/newton.py +400 -0
  36. fluxfem/solver/result.py +62 -0
  37. fluxfem/solver/solve_runner.py +534 -0
  38. fluxfem/solver/solver.py +148 -0
  39. fluxfem/solver/sparse.py +188 -0
  40. fluxfem/tools/__init__.py +7 -0
  41. fluxfem/tools/jit.py +51 -0
  42. fluxfem/tools/timer.py +659 -0
  43. fluxfem/tools/visualizer.py +101 -0
  44. fluxfem-0.1.3a0.dist-info/LICENSE +201 -0
  45. fluxfem-0.1.3a0.dist-info/METADATA +125 -0
  46. fluxfem-0.1.3a0.dist-info/RECORD +47 -0
  47. fluxfem-0.1.3a0.dist-info/WHEEL +4 -0
fluxfem/tools/timer.py ADDED
@@ -0,0 +1,659 @@
1
+ import time
2
+ from abc import ABC, abstractmethod
3
+ from contextlib import AbstractContextManager
4
+
5
+ from collections import defaultdict
6
+ from contextlib import contextmanager
7
+ from dataclasses import dataclass
8
+ from typing import Callable, DefaultDict, Dict, Iterator, List, Optional
9
+
10
+ import logging
11
+
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @dataclass
17
+ class SectionStats:
18
+ name: str
19
+ count: int
20
+ total: float
21
+ avg: float
22
+ max: float
23
+
24
+
25
+ class BaseTimer(ABC):
26
+ @abstractmethod
27
+ def section(self, name: str) -> AbstractContextManager[None]:
28
+ raise NotImplementedError
29
+
30
+
31
+ class NullTimer(BaseTimer):
32
+ @contextmanager
33
+ def section(self, name: str):
34
+ yield
35
+
36
+
37
+ class SectionTimer(BaseTimer):
38
+ """
39
+ Lightweight helper to measure how long named sections take.
40
+
41
+ Examples
42
+ --------
43
+ >>> timer = SectionTimer()
44
+ >>> with timer.section("assemble"):
45
+ ... ...
46
+ >>> with timer.section("solve"):
47
+ ... ...
48
+ >>> timer.report()
49
+
50
+ To track nested calls, enable ``hierarchical`` so section names
51
+ are recorded with their call stack (e.g., ``outer>inner``)::
52
+
53
+ timer = SectionTimer(hierarchical=True)
54
+ with timer.section("outer"):
55
+ with timer.section("inner"):
56
+ ...
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ clock: Optional[Callable[[], float]] = None,
62
+ hierarchical: bool = False,
63
+ sep: str = ">",
64
+ ):
65
+ self._clock: Callable[[], float] = clock or time.perf_counter
66
+ self._records: DefaultDict[str, List[float]] = defaultdict(list)
67
+ self._hierarchical = hierarchical
68
+ self._sep = sep
69
+ self._stack: List[str] = []
70
+ self._running_means: Dict[str, float] = {}
71
+ self._running_counts: Dict[str, int] = {}
72
+
73
+ @contextmanager
74
+ def section(self, name: str) -> Iterator[None]:
75
+ if self._hierarchical and self._stack:
76
+ full_name = self._sep.join([*self._stack, name])
77
+ else:
78
+ full_name = name
79
+ self._stack.append(name)
80
+ start = self._clock()
81
+ try:
82
+ yield
83
+ finally:
84
+ duration = self._clock() - start
85
+ self._records[full_name].append(duration)
86
+ self._stack.pop()
87
+
88
+ def wrap(self, name: str):
89
+ """
90
+ Decorator form of :meth:`section`.
91
+
92
+ This is convenient for quickly instrumenting functions without
93
+ rewriting call sites::
94
+
95
+ @timer.wrap("my_step")
96
+ def my_step(...):
97
+ ...
98
+ """
99
+
100
+ def _decorator(func):
101
+ def _wrapper(*args, **kwargs):
102
+ with self.section(name):
103
+ return func(*args, **kwargs)
104
+ return _wrapper
105
+
106
+ return _decorator
107
+
108
+ def add(self, name: str, duration: float) -> None:
109
+ self._records[name].append(duration)
110
+
111
+ def last(self, name: str, default: float | None = None) -> float:
112
+ """
113
+ Return the most recent duration recorded for a section.
114
+ """
115
+ if name not in self._records or not self._records[name]:
116
+ if default is None:
117
+ raise KeyError(f"No recorded timings for section '{name}'.")
118
+ return float(default)
119
+ return float(self._records[name][-1])
120
+
121
+ def reset(self, name: Optional[str] = None) -> None:
122
+ if name is None:
123
+ self._records.clear()
124
+ self._running_means.clear()
125
+ self._running_counts.clear()
126
+ else:
127
+ self._records.pop(name, None)
128
+ self._running_means.pop(name, None)
129
+ self._running_counts.pop(name, None)
130
+
131
+ def stats(self) -> List[SectionStats]:
132
+ stats: List[SectionStats] = []
133
+ for name, values in self._records.items():
134
+ if not values:
135
+ continue
136
+ total = sum(values)
137
+ stats.append(
138
+ SectionStats(
139
+ name=name,
140
+ count=len(values),
141
+ total=total,
142
+ avg=total / len(values),
143
+ max=max(values),
144
+ )
145
+ )
146
+ return stats
147
+
148
+ def _self_time_stats(self, stats: List[SectionStats]) -> List[SectionStats]:
149
+ sep = self._sep
150
+ totals = {s.name: s.total for s in stats}
151
+ children: DefaultDict[str, List[str]] = defaultdict(list)
152
+ for name in totals:
153
+ if sep in name:
154
+ parent = sep.join(name.split(sep)[:-1])
155
+ children[parent].append(name)
156
+ self_stats: List[SectionStats] = []
157
+ for s in stats:
158
+ child_total = sum(totals[ch] for ch in children.get(s.name, []))
159
+ self_time = max(s.total - child_total, 0.0)
160
+ avg = self_time / s.count if s.count else 0.0
161
+ self_stats.append(
162
+ SectionStats(
163
+ name=s.name,
164
+ count=s.count,
165
+ total=self_time,
166
+ avg=avg,
167
+ max=self_time, # self-time max is self-time total here
168
+ )
169
+ )
170
+ return self_stats
171
+
172
+ def summary(
173
+ self, sort_by: str = "total", descending: bool = True
174
+ ) -> List[SectionStats]:
175
+ key_map = {
176
+ "total": lambda s: s.total,
177
+ "avg": lambda s: s.avg,
178
+ "max": lambda s: s.max,
179
+ "count": lambda s: s.count,
180
+ "name": lambda s: s.name,
181
+ }
182
+ try:
183
+ key_func = key_map[sort_by]
184
+ except KeyError as exc:
185
+ raise ValueError(
186
+ 'sort_by must be one of {"total", "avg", "max", "count", "name"}'
187
+ ) from exc
188
+
189
+ return sorted(self.stats(), key=key_func, reverse=descending)
190
+
191
+ def summary_self_time(
192
+ self, sort_by: str = "total", descending: bool = True
193
+ ) -> List[SectionStats]:
194
+ stats = self._self_time_stats(self.stats())
195
+ key_map = {
196
+ "total": lambda s: s.total,
197
+ "avg": lambda s: s.avg,
198
+ "max": lambda s: s.max,
199
+ "count": lambda s: s.count,
200
+ "name": lambda s: s.name,
201
+ }
202
+ try:
203
+ key_func = key_map[sort_by]
204
+ except KeyError as exc:
205
+ raise ValueError(
206
+ 'sort_by must be one of {"total", "avg", "max", "count", "name"}'
207
+ ) from exc
208
+ return sorted(stats, key=key_func, reverse=descending)
209
+
210
+ def report(
211
+ self,
212
+ sort_by: str = "total",
213
+ descending: bool = True,
214
+ logger_instance=None,
215
+ ) -> str:
216
+ stats = self.summary(sort_by=sort_by, descending=descending)
217
+ if not stats:
218
+ message = "No timing data collected."
219
+ (logger_instance or logger).info(message)
220
+ return message
221
+
222
+ lines = [
223
+ f"{s.name}: total={s.total:.6f}s avg={s.avg:.6f}s max={s.max:.6f}s count={s.count}"
224
+ for s in stats
225
+ ]
226
+
227
+ for line in lines:
228
+ (logger_instance or logger).info(line)
229
+
230
+ return "\n".join(lines)
231
+
232
+ def plot_bar(
233
+ self,
234
+ ax=None,
235
+ sort_by: str = "total",
236
+ value: str = "total",
237
+ descending: bool = True,
238
+ color: str = "C0",
239
+ format_nested: Optional[bool] = None,
240
+ stacked_nested: bool = False,
241
+ moving_average: bool = False,
242
+ use_self_time: bool = False,
243
+ ):
244
+ """
245
+ Plot timing results as a horizontal bar chart without relying on pyplot state.
246
+
247
+ Parameters
248
+ ----------
249
+ ax : matplotlib.axes.Axes, optional
250
+ Target axes. If omitted, a new Figure/Axes is created.
251
+ sort_by : {"total", "avg", "max", "count", "name"}
252
+ Sorting key used before plotting.
253
+ value : {"total", "avg", "max", "count"}
254
+ Metric plotted on the x-axis.
255
+ descending : bool
256
+ Sort order for ``sort_by``.
257
+ color : str
258
+ Bar color passed to Matplotlib.
259
+ format_nested : bool, optional
260
+ If ``True`` and the timer is hierarchical, indent nested section
261
+ labels using ``sep`` for readability. Defaults to ``hierarchical``
262
+ flag used at construction time (ignored when ``stacked_nested`` is
263
+ ``True``).
264
+ stacked_nested : bool
265
+ If ``True`` and hierarchical data are present, render a stacked bar
266
+ for every section that has children: self-time (parent minus
267
+ sum(children)) plus one segment per direct child. Sections without
268
+ children are drawn as regular bars alongside the stacked groups.
269
+ moving_average : bool
270
+ If ``True``, plot exponential-free running averages using the
271
+ incremental mean update (no full history stored).
272
+
273
+ Returns
274
+ -------
275
+ (matplotlib.figure.Figure, matplotlib.axes.Axes)
276
+ Figure/Axes containing the plot. If ``ax`` was provided,
277
+ its parent figure is returned.
278
+ """
279
+
280
+ stats = (
281
+ self.summary_self_time(sort_by=sort_by, descending=descending)
282
+ if use_self_time
283
+ else self.summary(sort_by=sort_by, descending=descending)
284
+ )
285
+ if not stats:
286
+ raise ValueError("No timing data to plot.")
287
+
288
+ metric_map = {
289
+ "total": lambda s: s.total,
290
+ "avg": lambda s: s.avg,
291
+ "max": lambda s: s.max,
292
+ "count": lambda s: s.count,
293
+ }
294
+ if value not in metric_map:
295
+ raise ValueError(
296
+ 'value must be one of {"total", "avg", "max", "count"}'
297
+ )
298
+
299
+ if stacked_nested:
300
+ if not any(self._sep in s.name for s in stats):
301
+ raise ValueError("stacked_nested=True requires hierarchical section names.")
302
+
303
+ name_to_stat = {s.name: s for s in stats}
304
+ children_map: DefaultDict[str, List[str]] = defaultdict(list)
305
+ for s in stats:
306
+ if self._sep in s.name:
307
+ parent = s.name.rsplit(self._sep, 1)[0]
308
+ children_map[parent].append(s.name)
309
+
310
+ top_levels = [s.name for s in stats if self._sep not in s.name]
311
+ fig = None
312
+ if ax is None:
313
+ from matplotlib.figure import Figure
314
+
315
+ # Extra-wide figure to clearly show stacked child segments.
316
+ fig = Figure(figsize=(24, 0.9 * max(1, len(top_levels)) + 2))
317
+ ax = fig.add_subplot(111)
318
+ fig.subplots_adjust(left=0.2, right=0.95)
319
+
320
+ import itertools
321
+ from matplotlib import colors as mcolors
322
+
323
+ # Fix parent/child colors to clearly separate segments.
324
+ parent_color = "#444444" # dark gray for parent self time
325
+ child_palette = [
326
+ "#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd",
327
+ "#8c564b", "#e377c2", "#bcbd22", "#17becf", "#7f7f7f",
328
+ "#aec7e8", "#ffbb78", "#98df8a", "#ff9896", "#c5b0d5",
329
+ "#c49c94", "#f7b6d2", "#dbdb8d", "#9edae5",
330
+ ]
331
+ child_colors_map = {}
332
+ seen_labels = set()
333
+
334
+ for y_idx, name in enumerate(top_levels):
335
+ p_stat = name_to_stat[name]
336
+ p_val = metric_map[value](p_stat)
337
+ child_names = children_map.get(name, [])
338
+ child_vals = [
339
+ metric_map[value](name_to_stat[ch])
340
+ for ch in child_names
341
+ ]
342
+ child_sum = sum(child_vals)
343
+ self_val = max(p_val - child_sum, 0.0)
344
+
345
+ left = 0.0
346
+ lbl_self = f"{name} (self)"
347
+ ax.barh(
348
+ y_idx,
349
+ self_val,
350
+ left=left,
351
+ color=parent_color,
352
+ edgecolor="black",
353
+ linewidth=0.5,
354
+ label=None if lbl_self in seen_labels else lbl_self,
355
+ )
356
+ seen_labels.add(lbl_self)
357
+ left += self_val
358
+
359
+ for ch_name, ch_val in zip(child_names, child_vals):
360
+ ch_label = ch_name.split(self._sep)[-1]
361
+ lbl = f"{name}>{ch_label}"
362
+ if ch_label not in child_colors_map:
363
+ child_colors_map[ch_label] = child_palette[len(child_colors_map) % len(child_palette)]
364
+ c = child_colors_map[ch_label]
365
+ ax.barh(
366
+ y_idx,
367
+ ch_val,
368
+ left=left,
369
+ color=c,
370
+ edgecolor="black",
371
+ linewidth=0.5,
372
+ label=None if lbl in seen_labels else lbl,
373
+ )
374
+ seen_labels.add(lbl)
375
+ left += ch_val
376
+
377
+ ax.set_yticks(range(len(top_levels)))
378
+ ax.set_yticklabels(top_levels)
379
+ ax.set_xlabel(f"{value} [s]" if value in ("total", "avg", "max") else value)
380
+ ax.set_title("Section timing (stacked by parent)")
381
+ ax.invert_yaxis()
382
+ handles, labels = ax.get_legend_handles_labels()
383
+ uniq = dict(zip(labels, handles))
384
+ ax.legend(
385
+ uniq.values(),
386
+ uniq.keys(),
387
+ bbox_to_anchor=(1.04, 1),
388
+ loc="upper left",
389
+ )
390
+ return (fig or ax.figure), ax
391
+
392
+ names = [s.name for s in stats]
393
+ fmt_nested = self._hierarchical if format_nested is None else format_nested
394
+ if fmt_nested:
395
+ indent = " "
396
+ names = [
397
+ f"{indent * name.count(self._sep)}{name.split(self._sep)[-1]}"
398
+ for name in names
399
+ ]
400
+ data = [metric_map[value](s) for s in stats]
401
+ if moving_average:
402
+ data = self._update_running_average(names, data)
403
+
404
+ fig = None
405
+ if ax is None:
406
+ # Use Figure/Axes directly to avoid pyplot-global state.
407
+ from matplotlib.figure import Figure
408
+
409
+ fig = Figure(figsize=(6, 0.4 * len(stats) + 1))
410
+ ax = fig.add_subplot(111)
411
+
412
+ ax.barh(names, data, color=color)
413
+ ax.set_xlabel(f"{value} [s]" if value in ("total", "avg", "max") else value)
414
+ ax.set_title("Section timing")
415
+ ax.invert_yaxis()
416
+ return (fig or ax.figure), ax
417
+
418
+ def plot_pie(
419
+ self,
420
+ ax=None,
421
+ sort_by: str = "total",
422
+ value: str = "total",
423
+ descending: bool = True,
424
+ colors: Optional[List[str]] = None,
425
+ autopct: str = "%.1f%%",
426
+ label_threshold: float = 0.05,
427
+ min_pct_to_label: float = 1.0,
428
+ show_legend: bool = True,
429
+ legend_kwargs: Optional[dict] = None,
430
+ show_total: bool = True,
431
+ moving_average: bool = False,
432
+ use_self_time: bool = False,
433
+ ):
434
+ """
435
+ Plot timing results as a pie chart to show relative time share.
436
+
437
+ Parameters
438
+ ----------
439
+ ax : matplotlib.axes.Axes, optional
440
+ Target axes. If omitted, a new Figure/Axes is created.
441
+ sort_by : {"total", "avg", "max", "count", "name"}
442
+ Sorting key used before plotting.
443
+ value : {"total", "avg", "max", "count"}
444
+ Metric used to size the wedges.
445
+ descending : bool
446
+ Sort order for ``sort_by``.
447
+ colors : list of str, optional
448
+ Colors passed to Matplotlib ``pie``.
449
+ autopct : str
450
+ ``autopct`` string passed to Matplotlib ``pie``.
451
+ label_threshold : float
452
+ Minimum fraction (0-1) required to draw a text label on the wedge.
453
+ Smaller slices omit the label to reduce clutter.
454
+ min_pct_to_label : float
455
+ Minimum percent value to render ``autopct`` text. Use ``None`` to
456
+ always show.
457
+ show_legend : bool
458
+ If ``True``, draw a legend with all section names.
459
+ legend_kwargs : dict, optional
460
+ Extra kwargs forwarded to ``Axes.legend`` when ``show_legend`` is
461
+ ``True``.
462
+ show_total : bool
463
+ If ``True``, append total runtime text to the title.
464
+ moving_average : bool
465
+ If ``True``, plot exponential-free running averages using the
466
+ incremental mean update (no full history stored).
467
+
468
+ Returns
469
+ -------
470
+ (matplotlib.figure.Figure, matplotlib.axes.Axes)
471
+ Figure/Axes containing the plot. If ``ax`` was provided,
472
+ its parent figure is returned.
473
+ """
474
+ stats = (
475
+ self.summary_self_time(sort_by=sort_by, descending=descending)
476
+ if use_self_time
477
+ else self.summary(sort_by=sort_by, descending=descending)
478
+ )
479
+ if not stats:
480
+ raise ValueError("No timing data to plot.")
481
+
482
+ metric_map = {
483
+ "total": lambda s: s.total,
484
+ "avg": lambda s: s.avg,
485
+ "max": lambda s: s.max,
486
+ "count": lambda s: s.count,
487
+ }
488
+ if value not in metric_map:
489
+ raise ValueError(
490
+ 'value must be one of {"total", "avg", "max", "count"}'
491
+ )
492
+
493
+ names = [s.name for s in stats]
494
+ data = [metric_map[value](s) for s in stats]
495
+ if moving_average:
496
+ data = self._update_running_average(names, data)
497
+ if all(v == 0 for v in data):
498
+ raise ValueError("All timing values are zero; nothing to plot.")
499
+ total = sum(data)
500
+ unit = "s" if value in ("total", "avg", "max") else ""
501
+ legend_labels = [
502
+ f"{n} ({val:.3f}{unit})" if unit else f"{n} ({val})"
503
+ for n, val in zip(names, data)
504
+ ]
505
+ value_label = {"total": "total", "avg": "avg", "max": "max", "count": "count"}.get(value, value)
506
+
507
+ fig = None
508
+ if ax is None:
509
+ from matplotlib.figure import Figure
510
+
511
+ fig = Figure(figsize=(12, 6))
512
+ ax = fig.add_subplot(111)
513
+
514
+ labels: List[str] = []
515
+ for name, val in zip(names, data):
516
+ frac = val / total if total else 0.0
517
+ labels.append(name if label_threshold is None or frac >= label_threshold else "")
518
+
519
+ def _autopct(pct: float) -> str:
520
+ if min_pct_to_label is None or pct >= min_pct_to_label:
521
+ return autopct % pct
522
+ return ""
523
+
524
+ wedges, texts, autotexts = ax.pie(
525
+ data,
526
+ labels=labels,
527
+ colors=colors,
528
+ autopct=_autopct,
529
+ startangle=90,
530
+ labeldistance=1.1,
531
+ pctdistance=0.7,
532
+ )
533
+ ax.axis("equal")
534
+ if show_total:
535
+ suffix = f"(total {total:.3f}{unit})" if unit else f"(total {total:.0f})"
536
+ else:
537
+ suffix = ""
538
+ ax.set_title(
539
+ f"Section timing share {suffix}\n"
540
+ f"(values = {value_label} self-time in {unit or 'count'})"
541
+ )
542
+ if show_legend:
543
+ legend_opts = {
544
+ "bbox_to_anchor": (1.05, 0.5),
545
+ "loc": "center left",
546
+ "title": "Sections",
547
+ }
548
+ if legend_kwargs:
549
+ legend_opts.update(legend_kwargs)
550
+ ax.legend(wedges, legend_labels, **legend_opts)
551
+ return (fig or ax.figure), ax
552
+
553
+ def plot(
554
+ self,
555
+ ax=None,
556
+ sort_by: str = "total",
557
+ value: str = "total",
558
+ descending: bool = True,
559
+ color: str = "C0",
560
+ format_nested: Optional[bool] = None,
561
+ stacked_nested: bool = False,
562
+ kind: str = "pie",
563
+ moving_average: bool = False,
564
+ use_self_time: bool = False,
565
+ **kwargs,
566
+ ):
567
+ """
568
+ Plot timing results choosing between pie (default) or bar chart.
569
+
570
+ Parameters
571
+ ----------
572
+ kind : {"pie", "bar"}
573
+ Chart type. ``"pie"`` uses :meth:`plot_pie` and is the default,
574
+ ``"bar"`` uses :meth:`plot_bar`.
575
+ Other parameters
576
+ Passed through to the selected plotting function.
577
+ """
578
+ if kind == "pie":
579
+ return self.plot_pie(
580
+ ax=ax,
581
+ sort_by=sort_by,
582
+ value=value,
583
+ descending=descending,
584
+ moving_average=moving_average,
585
+ use_self_time=use_self_time,
586
+ **kwargs,
587
+ )
588
+ if kind == "bar":
589
+ return self.plot_bar(
590
+ ax=ax,
591
+ sort_by=sort_by,
592
+ value=value,
593
+ descending=descending,
594
+ color=color,
595
+ format_nested=format_nested,
596
+ stacked_nested=stacked_nested,
597
+ moving_average=moving_average,
598
+ use_self_time=use_self_time,
599
+ **kwargs,
600
+ )
601
+ raise ValueError('kind must be one of {"pie", "bar"}')
602
+
603
+ def save_plot(
604
+ self,
605
+ filepath: str,
606
+ sort_by: str = "total",
607
+ value: str = "total",
608
+ descending: bool = True,
609
+ color: str = "C0",
610
+ dpi: int = 150,
611
+ format_nested: Optional[bool] = None,
612
+ stacked_nested: bool = False,
613
+ kind: str = "pie",
614
+ use_self_time: bool = False,
615
+ moving_average: bool = False,
616
+ **kwargs,
617
+ ) -> None:
618
+ """
619
+ Render and save the timing plot to ``filepath``.
620
+
621
+ This helper builds its own Figure/Axes (no pyplot state), so it can be
622
+ used safely inside loops.
623
+ """
624
+ stats = (
625
+ self.summary_self_time(sort_by=sort_by, descending=descending)
626
+ if use_self_time
627
+ else self.summary(sort_by=sort_by, descending=descending)
628
+ )
629
+ if not stats:
630
+ raise ValueError("No timing data to plot.")
631
+
632
+ fig, _ = self.plot(
633
+ ax=None,
634
+ sort_by=sort_by,
635
+ value=value,
636
+ descending=descending,
637
+ color=color,
638
+ format_nested=format_nested,
639
+ stacked_nested=stacked_nested,
640
+ kind=kind,
641
+ use_self_time=use_self_time,
642
+ moving_average=moving_average,
643
+ **kwargs,
644
+ )
645
+ fig.tight_layout()
646
+ fig.savefig(filepath, dpi=dpi)
647
+ fig.clf()
648
+
649
+ def _update_running_average(self, names: List[str], data: List[float]) -> List[float]:
650
+ """Incremental mean update: new_avg = old_avg + (x - old_avg) / n."""
651
+ smoothed: List[float] = []
652
+ for name, val in zip(names, data):
653
+ prev_count = self._running_counts.get(name, 0) + 1
654
+ prev_mean = self._running_means.get(name, val)
655
+ new_mean = prev_mean + (val - prev_mean) / prev_count
656
+ self._running_counts[name] = prev_count
657
+ self._running_means[name] = new_mean
658
+ smoothed.append(new_mean)
659
+ return smoothed