reflex 0.7.4a0__py3-none-any.whl → 0.7.4a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of reflex might be problematic. Click here for more details.

reflex/utils/exec.py CHANGED
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import hashlib
6
+ import importlib.util
6
7
  import json
7
8
  import os
8
9
  import platform
@@ -19,6 +20,7 @@ from reflex import constants
19
20
  from reflex.config import environment, get_config
20
21
  from reflex.constants.base import LogLevel
21
22
  from reflex.utils import console, path_ops
23
+ from reflex.utils.decorator import once
22
24
  from reflex.utils.prerequisites import get_web_dir
23
25
 
24
26
  # For uvicorn windows bug fix (#2335)
@@ -185,13 +187,30 @@ def run_frontend_prod(root: Path, port: str, backend_present: bool = True):
185
187
  )
186
188
 
187
189
 
190
+ @once
191
+ def _warn_user_about_uvicorn():
192
+ # When we eventually switch to Granian by default, we should enable this warning.
193
+ if False:
194
+ console.warn(
195
+ "Using Uvicorn for backend as it is installed. This behavior will change in 0.8.0 to use Granian by default."
196
+ )
197
+
198
+
188
199
  def should_use_granian():
189
200
  """Whether to use Granian for backend.
190
201
 
191
202
  Returns:
192
203
  True if Granian should be used.
193
204
  """
194
- return environment.REFLEX_USE_GRANIAN.get()
205
+ if environment.REFLEX_USE_GRANIAN.get():
206
+ return True
207
+ if (
208
+ importlib.util.find_spec("uvicorn") is None
209
+ or importlib.util.find_spec("gunicorn") is None
210
+ ):
211
+ return True
212
+ _warn_user_about_uvicorn()
213
+ return False
195
214
 
196
215
 
197
216
  def get_app_module():
@@ -200,22 +219,9 @@ def get_app_module():
200
219
  Returns:
201
220
  The app module for the backend.
202
221
  """
203
- return f"reflex.app_module_for_backend:{constants.CompileVars.APP}"
204
-
205
-
206
- def get_granian_target():
207
- """Get the Granian target for the backend.
208
-
209
- Returns:
210
- The Granian target for the backend.
211
- """
212
- import reflex
213
-
214
- app_module_path = Path(reflex.__file__).parent / "app_module_for_backend.py"
222
+ config = get_config()
215
223
 
216
- return (
217
- f"{app_module_path!s}:{constants.CompileVars.APP}.{constants.CompileVars.API}"
218
- )
224
+ return f"{config.module}:{constants.CompileVars.APP}"
219
225
 
220
226
 
221
227
  def run_backend(
@@ -317,7 +323,8 @@ def run_uvicorn_backend(host: str, port: int, loglevel: LogLevel):
317
323
  import uvicorn
318
324
 
319
325
  uvicorn.run(
320
- app=f"{get_app_module()}.{constants.CompileVars.API}",
326
+ app=f"{get_app_module()}",
327
+ factory=True,
321
328
  host=host,
322
329
  port=port,
323
330
  log_level=loglevel.value,
@@ -335,36 +342,91 @@ def run_granian_backend(host: str, port: int, loglevel: LogLevel):
335
342
  loglevel: The log level.
336
343
  """
337
344
  console.debug("Using Granian for backend")
338
- try:
339
- from granian.constants import Interfaces
340
- from granian.log import LogLevels
341
- from granian.server import Server as Granian
342
-
343
- Granian(
344
- target=get_granian_target(),
345
- address=host,
346
- port=port,
347
- interface=Interfaces.ASGI,
348
- log_level=LogLevels(loglevel.value),
349
- reload=True,
350
- reload_paths=get_reload_paths(),
351
- ).serve()
352
- except ImportError:
353
- console.error(
354
- 'InstallError: REFLEX_USE_GRANIAN is set but `granian` is not installed. (run `pip install "granian[reload]>=1.6.0"`)'
345
+
346
+ from granian.constants import Interfaces
347
+ from granian.log import LogLevels
348
+ from granian.server import MPServer as Granian
349
+
350
+ Granian(
351
+ target=get_app_module(),
352
+ factory=True,
353
+ address=host,
354
+ port=port,
355
+ interface=Interfaces.ASGI,
356
+ log_level=LogLevels(loglevel.value),
357
+ reload=True,
358
+ reload_paths=get_reload_paths(),
359
+ ).serve()
360
+
361
+
362
+ def _deprecate_asgi_config(
363
+ config_name: str,
364
+ reason: str = "",
365
+ ):
366
+ # When we eventually switch to Granian by default, we should enable this deprecation.
367
+ if False:
368
+ console.deprecate(
369
+ f"config.{config_name}",
370
+ reason=reason,
371
+ deprecation_version="0.7.5",
372
+ removal_version="0.8.0",
355
373
  )
356
- os._exit(1)
357
374
 
358
375
 
376
+ @once
359
377
  def _get_backend_workers():
360
378
  from reflex.utils import processes
361
379
 
362
380
  config = get_config()
363
- return (
364
- processes.get_num_workers()
365
- if not config.gunicorn_workers
366
- else config.gunicorn_workers
367
- )
381
+
382
+ gunicorn_workers = config.gunicorn_workers or 0
383
+
384
+ if config.gunicorn_workers is not None:
385
+ _deprecate_asgi_config(
386
+ "gunicorn_workers",
387
+ "If you're using Granian, use GRANIAN_WORKERS instead.",
388
+ )
389
+
390
+ return gunicorn_workers if gunicorn_workers else processes.get_num_workers()
391
+
392
+
393
+ @once
394
+ def _get_backend_timeout():
395
+ config = get_config()
396
+
397
+ timeout = config.timeout or 120
398
+
399
+ if config.timeout is not None:
400
+ _deprecate_asgi_config(
401
+ "timeout",
402
+ "If you're using Granian, use GRANIAN_WORKERS_LIFETIME instead.",
403
+ )
404
+
405
+ return timeout
406
+
407
+
408
+ @once
409
+ def _get_backend_max_requests():
410
+ config = get_config()
411
+
412
+ gunicorn_max_requests = config.gunicorn_max_requests or 120
413
+
414
+ if config.gunicorn_max_requests is not None:
415
+ _deprecate_asgi_config("gunicorn_max_requests")
416
+
417
+ return gunicorn_max_requests
418
+
419
+
420
+ @once
421
+ def _get_backend_max_requests_jitter():
422
+ config = get_config()
423
+
424
+ gunicorn_max_requests_jitter = config.gunicorn_max_requests_jitter or 25
425
+
426
+ if config.gunicorn_max_requests_jitter is not None:
427
+ _deprecate_asgi_config("gunicorn_max_requests_jitter")
428
+
429
+ return gunicorn_max_requests_jitter
368
430
 
369
431
 
370
432
  def run_backend_prod(
@@ -408,17 +470,25 @@ def run_uvicorn_backend_prod(host: str, port: int, loglevel: LogLevel):
408
470
  [
409
471
  "uvicorn",
410
472
  *(
411
- [
473
+ (
412
474
  "--limit-max-requests",
413
- str(config.gunicorn_max_requests),
414
- ]
415
- if config.gunicorn_max_requests > 0
416
- else []
475
+ str(max_requessts),
476
+ )
477
+ if (
478
+ (max_requessts := _get_backend_max_requests()) is not None
479
+ and max_requessts > 0
480
+ )
481
+ else ()
482
+ ),
483
+ *(
484
+ ("--timeout-keep-alive", str(timeout))
485
+ if (timeout := _get_backend_timeout()) is not None
486
+ else ()
417
487
  ),
418
- *("--timeout-keep-alive", str(config.timeout)),
419
488
  *("--host", host),
420
489
  *("--port", str(port)),
421
490
  *("--workers", str(_get_backend_workers())),
491
+ "--factory",
422
492
  app_module,
423
493
  ]
424
494
  if constants.IS_WINDOWS
@@ -426,17 +496,34 @@ def run_uvicorn_backend_prod(host: str, port: int, loglevel: LogLevel):
426
496
  "gunicorn",
427
497
  *("--worker-class", config.gunicorn_worker_class),
428
498
  *(
429
- [
499
+ (
430
500
  "--max-requests",
431
- str(config.gunicorn_max_requests),
501
+ str(max_requessts),
502
+ )
503
+ if (
504
+ (max_requessts := _get_backend_max_requests()) is not None
505
+ and max_requessts > 0
506
+ )
507
+ else ()
508
+ ),
509
+ *(
510
+ (
432
511
  "--max-requests-jitter",
433
- str(config.gunicorn_max_requests_jitter),
434
- ]
435
- if config.gunicorn_max_requests > 0
436
- else []
512
+ str(max_requessts_jitter),
513
+ )
514
+ if (
515
+ (max_requessts_jitter := _get_backend_max_requests_jitter())
516
+ is not None
517
+ and max_requessts_jitter > 0
518
+ )
519
+ else ()
437
520
  ),
438
521
  "--preload",
439
- *("--timeout", str(config.timeout)),
522
+ *(
523
+ ("--timeout", str(timeout))
524
+ if (timeout := _get_backend_timeout()) is not None
525
+ else ()
526
+ ),
440
527
  *("--bind", f"{host}:{port}"),
441
528
  *("--threads", str(_get_backend_workers())),
442
529
  f"{app_module}()",
@@ -472,17 +559,12 @@ def run_granian_backend_prod(host: str, port: int, loglevel: LogLevel):
472
559
 
473
560
  command = [
474
561
  "granian",
475
- "--workers",
476
- str(_get_backend_workers()),
477
- "--log-level",
478
- "critical",
479
- "--host",
480
- host,
481
- "--port",
482
- str(port),
483
- "--interface",
484
- str(Interfaces.ASGI),
485
- get_granian_target(),
562
+ *("--workers", str(_get_backend_workers())),
563
+ *("--log-level", "critical"),
564
+ *("--host", host),
565
+ *("--port", str(port)),
566
+ *("--interface", str(Interfaces.ASGI)),
567
+ *("--factory", get_app_module()),
486
568
  ]
487
569
  processes.new_process(
488
570
  command,
@@ -8,6 +8,7 @@ import functools
8
8
  import importlib
9
9
  import importlib.metadata
10
10
  import importlib.util
11
+ import io
11
12
  import json
12
13
  import os
13
14
  import platform
@@ -183,7 +184,7 @@ def get_node_version() -> version.Version | None:
183
184
  try:
184
185
  result = processes.new_process([node_path, "-v"], run=True)
185
186
  # The output will be in the form "vX.Y.Z", but version.parse() can handle it
186
- return version.parse(result.stdout) # pyright: ignore [reportArgumentType]
187
+ return version.parse(result.stdout)
187
188
  except (FileNotFoundError, TypeError):
188
189
  return None
189
190
 
@@ -200,7 +201,7 @@ def get_bun_version() -> version.Version | None:
200
201
  try:
201
202
  # Run the bun -v command and capture the output
202
203
  result = processes.new_process([str(bun_path), "-v"], run=True)
203
- return version.parse(str(result.stdout)) # pyright: ignore [reportArgumentType]
204
+ return version.parse(str(result.stdout))
204
205
  except FileNotFoundError:
205
206
  return None
206
207
  except version.InvalidVersion as e:
@@ -412,6 +413,15 @@ def get_and_validate_app(reload: bool = False) -> AppInfo:
412
413
  return AppInfo(app=app, module=app_module)
413
414
 
414
415
 
416
+ def validate_app(reload: bool = False) -> None:
417
+ """Validate the app instance based on the default config.
418
+
419
+ Args:
420
+ reload: Re-import the app module from disk
421
+ """
422
+ get_and_validate_app(reload=reload)
423
+
424
+
415
425
  def get_compiled_app(reload: bool = False, export: bool = False) -> ModuleType:
416
426
  """Get the app module based on the default config after first compiling it.
417
427
 
@@ -430,6 +440,86 @@ def get_compiled_app(reload: bool = False, export: bool = False) -> ModuleType:
430
440
  return app_module
431
441
 
432
442
 
443
+ def compile_app(reload: bool = False, export: bool = False) -> None:
444
+ """Compile the app module based on the default config.
445
+
446
+ Args:
447
+ reload: Re-import the app module from disk
448
+ export: Compile the app for export
449
+ """
450
+ get_compiled_app(reload=reload, export=export)
451
+
452
+
453
+ def _can_colorize() -> bool:
454
+ """Check if the output can be colorized.
455
+
456
+ Copied from _colorize.can_colorize.
457
+
458
+ https://raw.githubusercontent.com/python/cpython/refs/heads/main/Lib/_colorize.py
459
+
460
+ Returns:
461
+ If the output can be colorized
462
+ """
463
+ file = sys.stdout
464
+
465
+ if not sys.flags.ignore_environment:
466
+ if os.environ.get("PYTHON_COLORS") == "0":
467
+ return False
468
+ if os.environ.get("PYTHON_COLORS") == "1":
469
+ return True
470
+ if os.environ.get("NO_COLOR"):
471
+ return False
472
+ if os.environ.get("FORCE_COLOR"):
473
+ return True
474
+ if os.environ.get("TERM") == "dumb":
475
+ return False
476
+
477
+ if not hasattr(file, "fileno"):
478
+ return False
479
+
480
+ if sys.platform == "win32":
481
+ try:
482
+ import nt
483
+
484
+ if not nt._supports_virtual_terminal():
485
+ return False
486
+ except (ImportError, AttributeError):
487
+ return False
488
+
489
+ try:
490
+ return os.isatty(file.fileno())
491
+ except io.UnsupportedOperation:
492
+ return file.isatty()
493
+
494
+
495
+ def compile_or_validate_app(compile: bool = False) -> bool:
496
+ """Compile or validate the app module based on the default config.
497
+
498
+ Args:
499
+ compile: Whether to compile the app.
500
+
501
+ Returns:
502
+ If the app is compiled successfully.
503
+ """
504
+ try:
505
+ if compile:
506
+ compile_app()
507
+ else:
508
+ validate_app()
509
+ except Exception as e:
510
+ import traceback
511
+
512
+ sys_exception = sys.exception()
513
+
514
+ try:
515
+ colorize = _can_colorize()
516
+ traceback.print_exception(e, colorize=colorize) # pyright: ignore[reportCallIssue]
517
+ except Exception:
518
+ traceback.print_exception(sys_exception)
519
+ return False
520
+ return True
521
+
522
+
433
523
  def get_redis() -> Redis | None:
434
524
  """Get the asynchronous redis client.
435
525
 
@@ -1542,7 +1632,7 @@ def create_config_init_app_from_remote_template(app_name: str, template_url: str
1542
1632
  console.error(f"Failed to unzip the template: {uze}")
1543
1633
  raise typer.Exit(1) from uze
1544
1634
 
1545
- if len(subdirs := os.listdir(unzip_dir)) != 1:
1635
+ if len(subdirs := list(unzip_dir.iterdir())) != 1:
1546
1636
  console.error(f"Expected one directory in the zip, found {subdirs}")
1547
1637
  raise typer.Exit(1)
1548
1638
 
reflex/utils/processes.py CHANGED
@@ -10,7 +10,7 @@ import signal
10
10
  import subprocess
11
11
  from concurrent import futures
12
12
  from pathlib import Path
13
- from typing import Callable, Generator, Sequence, Tuple
13
+ from typing import Any, Callable, Generator, Literal, Sequence, Tuple, overload
14
14
 
15
15
  import psutil
16
16
  import typer
@@ -142,12 +142,30 @@ def handle_port(service_name: str, port: int, auto_increment: bool) -> int:
142
142
  raise typer.Exit()
143
143
 
144
144
 
145
+ @overload
146
+ def new_process(
147
+ args: str | list[str] | list[str | None] | list[str | Path | None],
148
+ run: Literal[False] = False,
149
+ show_logs: bool = False,
150
+ **kwargs,
151
+ ) -> subprocess.Popen[str]: ...
152
+
153
+
154
+ @overload
155
+ def new_process(
156
+ args: str | list[str] | list[str | None] | list[str | Path | None],
157
+ run: Literal[True],
158
+ show_logs: bool = False,
159
+ **kwargs,
160
+ ) -> subprocess.CompletedProcess[str]: ...
161
+
162
+
145
163
  def new_process(
146
164
  args: str | list[str] | list[str | None] | list[str | Path | None],
147
165
  run: bool = False,
148
166
  show_logs: bool = False,
149
167
  **kwargs,
150
- ):
168
+ ) -> subprocess.CompletedProcess[str] | subprocess.Popen[str]:
151
169
  """Wrapper over subprocess.Popen to unify the launch of child processes.
152
170
 
153
171
  Args:
@@ -163,7 +181,8 @@ def new_process(
163
181
  Exit: When attempting to run a command with a None value.
164
182
  """
165
183
  # Check for invalid command first.
166
- if isinstance(args, list) and None in args:
184
+ non_empty_args = list(filter(None, args)) if isinstance(args, list) else [args]
185
+ if isinstance(args, list) and len(non_empty_args) != len(args):
167
186
  console.error(f"Invalid command: {args}")
168
187
  raise typer.Exit(1)
169
188
 
@@ -190,14 +209,20 @@ def new_process(
190
209
  "errors": "replace", # Avoid UnicodeDecodeError in unknown command output
191
210
  **kwargs,
192
211
  }
193
- console.debug(f"Running command: {args}")
194
- fn = subprocess.run if run else subprocess.Popen
195
- return fn(args, **kwargs) # pyright: ignore [reportCallIssue, reportArgumentType]
212
+ console.debug(f"Running command: {non_empty_args}")
213
+
214
+ def subprocess_p_open(args: subprocess._CMD, **kwargs):
215
+ return subprocess.Popen(args, **kwargs)
216
+
217
+ fn: Callable[..., subprocess.CompletedProcess[str] | subprocess.Popen[str]] = (
218
+ subprocess.run if run else subprocess_p_open
219
+ )
220
+ return fn(non_empty_args, **kwargs)
196
221
 
197
222
 
198
223
  @contextlib.contextmanager
199
224
  def run_concurrently_context(
200
- *fns: Callable | Tuple,
225
+ *fns: Callable[..., Any] | tuple[Callable[..., Any], ...],
201
226
  ) -> Generator[list[futures.Future], None, None]:
202
227
  """Run functions concurrently in a thread pool.
203
228
 
@@ -213,14 +238,14 @@ def run_concurrently_context(
213
238
  return
214
239
 
215
240
  # Convert the functions to tuples.
216
- fns = [fn if isinstance(fn, tuple) else (fn,) for fn in fns] # pyright: ignore [reportAssignmentType]
241
+ fns = tuple(fn if isinstance(fn, tuple) else (fn,) for fn in fns)
217
242
 
218
243
  # Run the functions concurrently.
219
244
  executor = None
220
245
  try:
221
246
  executor = futures.ThreadPoolExecutor(max_workers=len(fns))
222
247
  # Submit the tasks.
223
- tasks = [executor.submit(*fn) for fn in fns] # pyright: ignore [reportArgumentType]
248
+ tasks = [executor.submit(*fn) for fn in fns]
224
249
 
225
250
  # Yield control back to the main thread while tasks are running.
226
251
  yield tasks
@@ -311,6 +336,7 @@ def show_status(
311
336
  process: subprocess.Popen,
312
337
  suppress_errors: bool = False,
313
338
  analytics_enabled: bool = False,
339
+ prior_processes: Tuple[subprocess.Popen, ...] = (),
314
340
  ):
315
341
  """Show the status of a process.
316
342
 
@@ -319,15 +345,17 @@ def show_status(
319
345
  process: The process.
320
346
  suppress_errors: If True, do not exit if errors are encountered (for fallback).
321
347
  analytics_enabled: Whether analytics are enabled for this command.
348
+ prior_processes: The prior processes that have been run.
322
349
  """
323
- with console.status(message) as status:
324
- for line in stream_logs(
325
- message,
326
- process,
327
- suppress_errors=suppress_errors,
328
- analytics_enabled=analytics_enabled,
329
- ):
330
- status.update(f"{message} {line}")
350
+ for one_process in (*prior_processes, process):
351
+ with console.status(message) as status:
352
+ for line in stream_logs(
353
+ message,
354
+ one_process,
355
+ suppress_errors=suppress_errors,
356
+ analytics_enabled=analytics_enabled,
357
+ ):
358
+ status.update(f"{message} {line}")
331
359
 
332
360
 
333
361
  def show_progress(message: str, process: subprocess.Popen, checkpoints: list[str]):
@@ -381,6 +409,7 @@ def run_process_with_fallbacks(
381
409
  show_status_message: str,
382
410
  fallbacks: str | Sequence[str] | Sequence[Sequence[str]] | None = None,
383
411
  analytics_enabled: bool = False,
412
+ prior_processes: Tuple[subprocess.Popen, ...] = (),
384
413
  **kwargs,
385
414
  ):
386
415
  """Run subprocess and retry using fallback command if initial command fails.
@@ -390,7 +419,8 @@ def run_process_with_fallbacks(
390
419
  show_status_message: The status message to be displayed in the console.
391
420
  fallbacks: The fallback command to run if the initial command fails.
392
421
  analytics_enabled: Whether analytics are enabled for this command.
393
- kwargs: Kwargs to pass to new_process function.
422
+ prior_processes: The prior processes that have been run.
423
+ **kwargs: Kwargs to pass to new_process function.
394
424
  """
395
425
  process = new_process(get_command_with_loglevel(args), **kwargs)
396
426
  if not fallbacks:
@@ -399,6 +429,7 @@ def run_process_with_fallbacks(
399
429
  show_status_message,
400
430
  process,
401
431
  analytics_enabled=analytics_enabled,
432
+ prior_processes=prior_processes,
402
433
  )
403
434
  else:
404
435
  # Suppress errors for initial command, because we will try to fallback
@@ -411,7 +442,7 @@ def run_process_with_fallbacks(
411
442
  # retry with fallback command.
412
443
  fallback_with_args = (
413
444
  [current_fallback, *args[1:]]
414
- if isinstance(fallbacks, str)
445
+ if isinstance(current_fallback, str)
415
446
  else [*current_fallback, *args[1:]]
416
447
  )
417
448
  console.warn(
@@ -422,6 +453,7 @@ def run_process_with_fallbacks(
422
453
  show_status_message=show_status_message,
423
454
  fallbacks=next_fallbacks,
424
455
  analytics_enabled=analytics_enabled,
456
+ prior_processes=(*prior_processes, process),
425
457
  **kwargs,
426
458
  )
427
459
 
reflex/vars/base.py CHANGED
@@ -160,7 +160,7 @@ class VarData:
160
160
  if isinstance(hooks, str):
161
161
  hooks = [hooks]
162
162
  if not isinstance(hooks, dict):
163
- hooks = {hook: None for hook in (hooks or [])}
163
+ hooks = dict.fromkeys(hooks or [])
164
164
  immutable_imports: ImmutableParsedImportDict = tuple(
165
165
  (k, tuple(v)) for k, v in parse_imports(imports or {}).items()
166
166
  )
@@ -1791,8 +1791,7 @@ class cached_property: # noqa: N801
1791
1791
  if original_del is not None:
1792
1792
  original_del(this)
1793
1793
  return
1794
- if unique_id in GLOBAL_CACHE:
1795
- del GLOBAL_CACHE[unique_id]
1794
+ GLOBAL_CACHE.pop(unique_id, None)
1796
1795
 
1797
1796
  if original_del is not None:
1798
1797
  original_del(this)
@@ -3017,41 +3016,6 @@ _decode_var_pattern = re.compile(_decode_var_pattern_re, flags=re.DOTALL)
3017
3016
  _global_vars: dict[int, Var] = {}
3018
3017
 
3019
3018
 
3020
- def _extract_var_data(value: Iterable) -> list[VarData | None]:
3021
- """Extract the var imports and hooks from an iterable containing a Var.
3022
-
3023
- Args:
3024
- value: The iterable to extract the VarData from
3025
-
3026
- Returns:
3027
- The extracted VarDatas.
3028
- """
3029
- from reflex.style import Style
3030
- from reflex.vars import Var
3031
-
3032
- var_datas = []
3033
- with contextlib.suppress(TypeError):
3034
- for sub in value:
3035
- if isinstance(sub, Var):
3036
- var_datas.append(sub._var_data)
3037
- elif not isinstance(sub, str):
3038
- # Recurse into dict values.
3039
- if hasattr(sub, "values") and callable(sub.values):
3040
- var_datas.extend(_extract_var_data(sub.values())) # pyright: ignore [reportArgumentType]
3041
- # Recurse into iterable values (or dict keys).
3042
- var_datas.extend(_extract_var_data(sub))
3043
-
3044
- # Style objects should already have _var_data.
3045
- if isinstance(value, Style):
3046
- var_datas.append(value._var_data)
3047
- else:
3048
- # Recurse when value is a dict itself.
3049
- values = getattr(value, "values", None)
3050
- if callable(values):
3051
- var_datas.extend(_extract_var_data(values())) # pyright: ignore [reportArgumentType]
3052
- return var_datas
3053
-
3054
-
3055
3019
  dispatchers: dict[GenericType, Callable[[Var], Var]] = {}
3056
3020
 
3057
3021