pytest-neon 2.2.2__py3-none-any.whl → 2.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytest_neon/__init__.py +1 -1
- pytest_neon/plugin.py +911 -292
- {pytest_neon-2.2.2.dist-info → pytest_neon-2.3.0.dist-info}/METADATA +150 -76
- pytest_neon-2.3.0.dist-info/RECORD +8 -0
- pytest_neon-2.2.2.dist-info/RECORD +0 -8
- {pytest_neon-2.2.2.dist-info → pytest_neon-2.3.0.dist-info}/WHEEL +0 -0
- {pytest_neon-2.2.2.dist-info → pytest_neon-2.3.0.dist-info}/entry_points.txt +0 -0
- {pytest_neon-2.2.2.dist-info → pytest_neon-2.3.0.dist-info}/licenses/LICENSE +0 -0
pytest_neon/plugin.py
CHANGED
|
@@ -1,21 +1,38 @@
|
|
|
1
1
|
"""Pytest plugin providing Neon database branch fixtures.
|
|
2
2
|
|
|
3
|
-
This plugin provides fixtures for
|
|
4
|
-
|
|
5
|
-
branch reset after each test.
|
|
3
|
+
This plugin provides fixtures for database testing using Neon's instant
|
|
4
|
+
branching feature. Multiple isolation levels are available:
|
|
6
5
|
|
|
7
6
|
Main fixtures:
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
7
|
+
neon_branch_readonly: True read-only access via read_only endpoint (enforced)
|
|
8
|
+
neon_branch_dirty: Session-scoped read-write, shared state across all tests
|
|
9
|
+
neon_branch_isolated: Per-worker branch with reset after each test (recommended)
|
|
10
|
+
neon_branch_readwrite: Deprecated, use neon_branch_isolated instead
|
|
11
|
+
neon_branch: Deprecated alias for neon_branch_isolated
|
|
11
12
|
neon_branch_shared: Shared branch without reset (module-scoped)
|
|
13
|
+
|
|
14
|
+
Connection fixtures (require extras):
|
|
12
15
|
neon_connection: psycopg2 connection (requires psycopg2 extra)
|
|
13
16
|
neon_connection_psycopg: psycopg v3 connection (requires psycopg extra)
|
|
14
17
|
neon_engine: SQLAlchemy engine (requires sqlalchemy extra)
|
|
15
18
|
|
|
19
|
+
Architecture:
|
|
20
|
+
Parent Branch (configured or project default)
|
|
21
|
+
└── Migration Branch (session-scoped, read_write endpoint)
|
|
22
|
+
│ ↑ migrations run here ONCE
|
|
23
|
+
│
|
|
24
|
+
├── Read-only Endpoint (read_only endpoint ON migration branch)
|
|
25
|
+
│ ↑ neon_branch_readonly uses this
|
|
26
|
+
│
|
|
27
|
+
├── Dirty Branch (session-scoped child, shared across ALL workers)
|
|
28
|
+
│ ↑ neon_branch_dirty uses this
|
|
29
|
+
│
|
|
30
|
+
└── Isolated Branch (one per xdist worker, lazily created)
|
|
31
|
+
↑ neon_branch_isolated uses this, reset after each test
|
|
32
|
+
|
|
16
33
|
SQLAlchemy Users:
|
|
17
34
|
If you create your own SQLAlchemy engine (not using neon_engine fixture),
|
|
18
|
-
you MUST use pool_pre_ping=True when using
|
|
35
|
+
you MUST use pool_pre_ping=True when using neon_branch_isolated:
|
|
19
36
|
|
|
20
37
|
engine = create_engine(DATABASE_URL, pool_pre_ping=True)
|
|
21
38
|
|
|
@@ -23,9 +40,6 @@ SQLAlchemy Users:
|
|
|
23
40
|
Without pool_pre_ping, SQLAlchemy may try to reuse dead pooled connections,
|
|
24
41
|
causing "SSL connection has been closed unexpectedly" errors.
|
|
25
42
|
|
|
26
|
-
Note: pool_pre_ping is not required for neon_branch_readonly since no
|
|
27
|
-
resets occur.
|
|
28
|
-
|
|
29
43
|
Configuration:
|
|
30
44
|
Set NEON_API_KEY and NEON_PROJECT_ID environment variables, or use
|
|
31
45
|
--neon-api-key and --neon-project-id CLI options.
|
|
@@ -322,6 +336,417 @@ class NeonBranch:
|
|
|
322
336
|
connection_string: str
|
|
323
337
|
host: str
|
|
324
338
|
parent_id: str | None = None
|
|
339
|
+
endpoint_id: str | None = None
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
@dataclass
|
|
343
|
+
class NeonConfig:
|
|
344
|
+
"""Configuration for Neon operations. Extracted from pytest config."""
|
|
345
|
+
|
|
346
|
+
api_key: str
|
|
347
|
+
project_id: str
|
|
348
|
+
parent_branch_id: str | None
|
|
349
|
+
database_name: str
|
|
350
|
+
role_name: str
|
|
351
|
+
keep_branches: bool
|
|
352
|
+
branch_expiry: int
|
|
353
|
+
env_var_name: str
|
|
354
|
+
|
|
355
|
+
@classmethod
|
|
356
|
+
def from_pytest_config(cls, config: pytest.Config) -> NeonConfig | None:
|
|
357
|
+
"""
|
|
358
|
+
Extract NeonConfig from pytest configuration.
|
|
359
|
+
|
|
360
|
+
Returns None if required values (api_key, project_id) are missing,
|
|
361
|
+
allowing callers to skip tests gracefully.
|
|
362
|
+
"""
|
|
363
|
+
api_key = _get_config_value(
|
|
364
|
+
config, "neon_api_key", "NEON_API_KEY", "neon_api_key"
|
|
365
|
+
)
|
|
366
|
+
project_id = _get_config_value(
|
|
367
|
+
config, "neon_project_id", "NEON_PROJECT_ID", "neon_project_id"
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
if not api_key or not project_id:
|
|
371
|
+
return None
|
|
372
|
+
|
|
373
|
+
parent_branch_id = _get_config_value(
|
|
374
|
+
config, "neon_parent_branch", "NEON_PARENT_BRANCH_ID", "neon_parent_branch"
|
|
375
|
+
)
|
|
376
|
+
database_name = _get_config_value(
|
|
377
|
+
config, "neon_database", "NEON_DATABASE", "neon_database", "neondb"
|
|
378
|
+
)
|
|
379
|
+
role_name = _get_config_value(
|
|
380
|
+
config, "neon_role", "NEON_ROLE", "neon_role", "neondb_owner"
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
keep_branches = config.getoption("neon_keep_branches", default=None)
|
|
384
|
+
if keep_branches is None:
|
|
385
|
+
keep_branches = config.getini("neon_keep_branches")
|
|
386
|
+
|
|
387
|
+
branch_expiry = config.getoption("neon_branch_expiry", default=None)
|
|
388
|
+
if branch_expiry is None:
|
|
389
|
+
branch_expiry = int(config.getini("neon_branch_expiry"))
|
|
390
|
+
|
|
391
|
+
env_var_name = _get_config_value(
|
|
392
|
+
config, "neon_env_var", "", "neon_env_var", "DATABASE_URL"
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
return cls(
|
|
396
|
+
api_key=api_key,
|
|
397
|
+
project_id=project_id,
|
|
398
|
+
parent_branch_id=parent_branch_id,
|
|
399
|
+
database_name=database_name or "neondb",
|
|
400
|
+
role_name=role_name or "neondb_owner",
|
|
401
|
+
keep_branches=bool(keep_branches),
|
|
402
|
+
branch_expiry=branch_expiry or DEFAULT_BRANCH_EXPIRY_SECONDS,
|
|
403
|
+
env_var_name=env_var_name or "DATABASE_URL",
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
class NeonBranchManager:
|
|
408
|
+
"""
|
|
409
|
+
Manages Neon branch lifecycle operations.
|
|
410
|
+
|
|
411
|
+
This class encapsulates all Neon API interactions for branch management,
|
|
412
|
+
making it easier to test and reason about branch operations.
|
|
413
|
+
"""
|
|
414
|
+
|
|
415
|
+
def __init__(self, config: NeonConfig):
|
|
416
|
+
self.config = config
|
|
417
|
+
self._neon = NeonAPI(api_key=config.api_key)
|
|
418
|
+
self._default_branch_id: str | None = None
|
|
419
|
+
self._default_branch_id_fetched = False
|
|
420
|
+
|
|
421
|
+
def get_default_branch_id(self) -> str | None:
|
|
422
|
+
"""Get the default/primary branch ID (cached)."""
|
|
423
|
+
if not self._default_branch_id_fetched:
|
|
424
|
+
self._default_branch_id = _get_default_branch_id(
|
|
425
|
+
self._neon, self.config.project_id
|
|
426
|
+
)
|
|
427
|
+
self._default_branch_id_fetched = True
|
|
428
|
+
return self._default_branch_id
|
|
429
|
+
|
|
430
|
+
def create_branch(
|
|
431
|
+
self,
|
|
432
|
+
name_suffix: str = "",
|
|
433
|
+
parent_branch_id: str | None = None,
|
|
434
|
+
expiry_seconds: int | None = None,
|
|
435
|
+
) -> NeonBranch:
|
|
436
|
+
"""
|
|
437
|
+
Create a new Neon branch with a read_write endpoint.
|
|
438
|
+
|
|
439
|
+
Args:
|
|
440
|
+
name_suffix: Suffix to add to branch name (e.g., "-migration", "-dirty")
|
|
441
|
+
parent_branch_id: Parent branch ID (defaults to config's parent)
|
|
442
|
+
expiry_seconds: Branch expiry in seconds (0 or None for no expiry)
|
|
443
|
+
|
|
444
|
+
Returns:
|
|
445
|
+
NeonBranch with connection details
|
|
446
|
+
"""
|
|
447
|
+
parent_id = parent_branch_id or self.config.parent_branch_id
|
|
448
|
+
|
|
449
|
+
# Generate unique branch name
|
|
450
|
+
random_suffix = os.urandom(2).hex()
|
|
451
|
+
git_branch = _get_git_branch_name()
|
|
452
|
+
if git_branch:
|
|
453
|
+
git_prefix = git_branch[:15]
|
|
454
|
+
branch_name = f"pytest-{git_prefix}-{random_suffix}{name_suffix}"
|
|
455
|
+
else:
|
|
456
|
+
branch_name = f"pytest-{random_suffix}{name_suffix}"
|
|
457
|
+
|
|
458
|
+
# Build branch config
|
|
459
|
+
branch_config: dict[str, Any] = {"name": branch_name}
|
|
460
|
+
if parent_id:
|
|
461
|
+
branch_config["parent_id"] = parent_id
|
|
462
|
+
|
|
463
|
+
# Set expiry if specified
|
|
464
|
+
if expiry_seconds and expiry_seconds > 0:
|
|
465
|
+
expires_at = datetime.now(timezone.utc) + timedelta(seconds=expiry_seconds)
|
|
466
|
+
branch_config["expires_at"] = expires_at.strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
467
|
+
|
|
468
|
+
# Create branch with read_write endpoint
|
|
469
|
+
result = _retry_on_rate_limit(
|
|
470
|
+
lambda: self._neon.branch_create(
|
|
471
|
+
project_id=self.config.project_id,
|
|
472
|
+
branch=branch_config,
|
|
473
|
+
endpoints=[{"type": "read_write"}],
|
|
474
|
+
),
|
|
475
|
+
operation_name="branch_create",
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
branch = result.branch
|
|
479
|
+
endpoint_id = None
|
|
480
|
+
for op in result.operations:
|
|
481
|
+
if op.endpoint_id:
|
|
482
|
+
endpoint_id = op.endpoint_id
|
|
483
|
+
break
|
|
484
|
+
|
|
485
|
+
if not endpoint_id:
|
|
486
|
+
raise RuntimeError(f"No endpoint created for branch {branch.id}")
|
|
487
|
+
|
|
488
|
+
# Wait for endpoint to be active
|
|
489
|
+
host = self._wait_for_endpoint(endpoint_id)
|
|
490
|
+
|
|
491
|
+
# Safety check: never operate on default branch
|
|
492
|
+
default_branch_id = self.get_default_branch_id()
|
|
493
|
+
if default_branch_id and branch.id == default_branch_id:
|
|
494
|
+
raise RuntimeError(
|
|
495
|
+
f"SAFETY CHECK FAILED: Attempted to operate on default branch "
|
|
496
|
+
f"{branch.id}. Please report this bug."
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
# Get password
|
|
500
|
+
connection_string = self._reset_password_and_build_connection_string(
|
|
501
|
+
branch.id, host
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
return NeonBranch(
|
|
505
|
+
branch_id=branch.id,
|
|
506
|
+
project_id=self.config.project_id,
|
|
507
|
+
connection_string=connection_string,
|
|
508
|
+
host=host,
|
|
509
|
+
parent_id=branch.parent_id,
|
|
510
|
+
endpoint_id=endpoint_id,
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
def create_readonly_endpoint(self, branch: NeonBranch) -> NeonBranch:
|
|
514
|
+
"""
|
|
515
|
+
Create a read_only endpoint on an existing branch.
|
|
516
|
+
|
|
517
|
+
This creates a true read-only endpoint that enforces no writes at the
|
|
518
|
+
database level.
|
|
519
|
+
|
|
520
|
+
Args:
|
|
521
|
+
branch: The branch to create the endpoint on
|
|
522
|
+
|
|
523
|
+
Returns:
|
|
524
|
+
NeonBranch with the read_only endpoint's connection details
|
|
525
|
+
"""
|
|
526
|
+
result = _retry_on_rate_limit(
|
|
527
|
+
lambda: self._neon.endpoint_create(
|
|
528
|
+
project_id=self.config.project_id,
|
|
529
|
+
endpoint={
|
|
530
|
+
"branch_id": branch.branch_id,
|
|
531
|
+
"type": "read_only",
|
|
532
|
+
},
|
|
533
|
+
),
|
|
534
|
+
operation_name="endpoint_create_readonly",
|
|
535
|
+
)
|
|
536
|
+
|
|
537
|
+
endpoint_id = result.endpoint.id
|
|
538
|
+
host = self._wait_for_endpoint(endpoint_id)
|
|
539
|
+
|
|
540
|
+
# Get password for the read_only endpoint
|
|
541
|
+
connection_string = self._reset_password_and_build_connection_string(
|
|
542
|
+
branch.branch_id, host
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
return NeonBranch(
|
|
546
|
+
branch_id=branch.branch_id,
|
|
547
|
+
project_id=self.config.project_id,
|
|
548
|
+
connection_string=connection_string,
|
|
549
|
+
host=host,
|
|
550
|
+
parent_id=branch.parent_id,
|
|
551
|
+
endpoint_id=endpoint_id,
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
def delete_branch(self, branch_id: str) -> None:
|
|
555
|
+
"""Delete a branch (silently ignores errors)."""
|
|
556
|
+
if self.config.keep_branches:
|
|
557
|
+
return
|
|
558
|
+
try:
|
|
559
|
+
_retry_on_rate_limit(
|
|
560
|
+
lambda: self._neon.branch_delete(
|
|
561
|
+
project_id=self.config.project_id, branch_id=branch_id
|
|
562
|
+
),
|
|
563
|
+
operation_name="branch_delete",
|
|
564
|
+
)
|
|
565
|
+
except Exception as e:
|
|
566
|
+
msg = f"Failed to delete Neon branch {branch_id}: {e}"
|
|
567
|
+
warnings.warn(msg, stacklevel=2)
|
|
568
|
+
|
|
569
|
+
def delete_endpoint(self, endpoint_id: str) -> None:
|
|
570
|
+
"""Delete an endpoint (silently ignores errors)."""
|
|
571
|
+
try:
|
|
572
|
+
_retry_on_rate_limit(
|
|
573
|
+
lambda: self._neon.endpoint_delete(
|
|
574
|
+
project_id=self.config.project_id, endpoint_id=endpoint_id
|
|
575
|
+
),
|
|
576
|
+
operation_name="endpoint_delete",
|
|
577
|
+
)
|
|
578
|
+
except Exception as e:
|
|
579
|
+
warnings.warn(
|
|
580
|
+
f"Failed to delete Neon endpoint {endpoint_id}: {e}", stacklevel=2
|
|
581
|
+
)
|
|
582
|
+
|
|
583
|
+
def reset_branch(self, branch: NeonBranch) -> None:
|
|
584
|
+
"""Reset a branch to its parent's state."""
|
|
585
|
+
if not branch.parent_id:
|
|
586
|
+
msg = f"Branch {branch.branch_id} has no parent - cannot reset"
|
|
587
|
+
raise RuntimeError(msg)
|
|
588
|
+
|
|
589
|
+
_reset_branch_to_parent(branch, self.config.api_key)
|
|
590
|
+
|
|
591
|
+
def _wait_for_endpoint(self, endpoint_id: str, max_wait_seconds: float = 60) -> str:
|
|
592
|
+
"""Wait for endpoint to become active and return its host."""
|
|
593
|
+
poll_interval = 0.5
|
|
594
|
+
waited = 0.0
|
|
595
|
+
|
|
596
|
+
while True:
|
|
597
|
+
endpoint_response = _retry_on_rate_limit(
|
|
598
|
+
lambda: self._neon.endpoint(
|
|
599
|
+
project_id=self.config.project_id, endpoint_id=endpoint_id
|
|
600
|
+
),
|
|
601
|
+
operation_name="endpoint_status",
|
|
602
|
+
)
|
|
603
|
+
endpoint = endpoint_response.endpoint
|
|
604
|
+
state = endpoint.current_state
|
|
605
|
+
|
|
606
|
+
if state == EndpointState.active:
|
|
607
|
+
return endpoint.host
|
|
608
|
+
|
|
609
|
+
if waited >= max_wait_seconds:
|
|
610
|
+
raise RuntimeError(
|
|
611
|
+
f"Timeout waiting for endpoint {endpoint_id} to become active "
|
|
612
|
+
f"(current state: {state})"
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
time.sleep(poll_interval)
|
|
616
|
+
waited += poll_interval
|
|
617
|
+
|
|
618
|
+
def _reset_password_and_build_connection_string(
|
|
619
|
+
self, branch_id: str, host: str
|
|
620
|
+
) -> str:
|
|
621
|
+
"""Reset role password and build connection string."""
|
|
622
|
+
password_response = _retry_on_rate_limit(
|
|
623
|
+
lambda: self._neon.role_password_reset(
|
|
624
|
+
project_id=self.config.project_id,
|
|
625
|
+
branch_id=branch_id,
|
|
626
|
+
role_name=self.config.role_name,
|
|
627
|
+
),
|
|
628
|
+
operation_name="role_password_reset",
|
|
629
|
+
)
|
|
630
|
+
password = password_response.role.password
|
|
631
|
+
|
|
632
|
+
return (
|
|
633
|
+
f"postgresql://{self.config.role_name}:{password}@{host}/"
|
|
634
|
+
f"{self.config.database_name}?sslmode=require"
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
|
|
638
|
+
class XdistCoordinator:
|
|
639
|
+
"""
|
|
640
|
+
Coordinates branch sharing across pytest-xdist workers.
|
|
641
|
+
|
|
642
|
+
Uses file locks and JSON cache files to ensure only one worker creates
|
|
643
|
+
shared resources (like the migration branch), while others reuse them.
|
|
644
|
+
"""
|
|
645
|
+
|
|
646
|
+
def __init__(self, tmp_path_factory: pytest.TempPathFactory):
|
|
647
|
+
self.worker_id = _get_xdist_worker_id()
|
|
648
|
+
self.is_xdist = self.worker_id != "main"
|
|
649
|
+
|
|
650
|
+
if self.is_xdist:
|
|
651
|
+
root_tmp_dir = tmp_path_factory.getbasetemp().parent
|
|
652
|
+
self._lock_dir = root_tmp_dir
|
|
653
|
+
else:
|
|
654
|
+
self._lock_dir = None
|
|
655
|
+
|
|
656
|
+
def coordinate_resource(
|
|
657
|
+
self,
|
|
658
|
+
resource_name: str,
|
|
659
|
+
create_fn: Callable[[], dict[str, Any]],
|
|
660
|
+
) -> tuple[dict[str, Any], bool]:
|
|
661
|
+
"""
|
|
662
|
+
Coordinate creation of a shared resource across workers.
|
|
663
|
+
|
|
664
|
+
Args:
|
|
665
|
+
resource_name: Name of the resource (used for cache/lock files)
|
|
666
|
+
create_fn: Function to create the resource, returns dict to cache
|
|
667
|
+
|
|
668
|
+
Returns:
|
|
669
|
+
Tuple of (cached_data, is_creator)
|
|
670
|
+
"""
|
|
671
|
+
if not self.is_xdist:
|
|
672
|
+
return create_fn(), True
|
|
673
|
+
|
|
674
|
+
assert self._lock_dir is not None
|
|
675
|
+
cache_file = self._lock_dir / f"neon_{resource_name}.json"
|
|
676
|
+
lock_file = self._lock_dir / f"neon_{resource_name}.lock"
|
|
677
|
+
|
|
678
|
+
with FileLock(str(lock_file)):
|
|
679
|
+
if cache_file.exists():
|
|
680
|
+
data = json.loads(cache_file.read_text())
|
|
681
|
+
return data, False
|
|
682
|
+
else:
|
|
683
|
+
data = create_fn()
|
|
684
|
+
cache_file.write_text(json.dumps(data))
|
|
685
|
+
return data, True
|
|
686
|
+
|
|
687
|
+
def wait_for_signal(self, signal_name: str, timeout: float = 60) -> None:
|
|
688
|
+
"""Wait for a signal file to be created by another worker."""
|
|
689
|
+
if not self.is_xdist or self._lock_dir is None:
|
|
690
|
+
return
|
|
691
|
+
|
|
692
|
+
signal_file = self._lock_dir / f"neon_{signal_name}"
|
|
693
|
+
waited = 0.0
|
|
694
|
+
poll_interval = 0.5
|
|
695
|
+
|
|
696
|
+
while not signal_file.exists():
|
|
697
|
+
if waited >= timeout:
|
|
698
|
+
raise RuntimeError(
|
|
699
|
+
f"Worker {self.worker_id} timed out waiting for signal "
|
|
700
|
+
f"'{signal_name}' after {timeout}s. This usually means the "
|
|
701
|
+
f"creator worker failed or is still processing."
|
|
702
|
+
)
|
|
703
|
+
time.sleep(poll_interval)
|
|
704
|
+
waited += poll_interval
|
|
705
|
+
|
|
706
|
+
def send_signal(self, signal_name: str) -> None:
|
|
707
|
+
"""Create a signal file for other workers."""
|
|
708
|
+
if not self.is_xdist or self._lock_dir is None:
|
|
709
|
+
return
|
|
710
|
+
|
|
711
|
+
signal_file = self._lock_dir / f"neon_{signal_name}"
|
|
712
|
+
signal_file.write_text("done")
|
|
713
|
+
|
|
714
|
+
|
|
715
|
+
class EnvironmentManager:
|
|
716
|
+
"""Manages DATABASE_URL environment variable lifecycle."""
|
|
717
|
+
|
|
718
|
+
def __init__(self, env_var_name: str = "DATABASE_URL"):
|
|
719
|
+
self.env_var_name = env_var_name
|
|
720
|
+
self._original_value: str | None = None
|
|
721
|
+
self._is_set = False
|
|
722
|
+
|
|
723
|
+
def set(self, connection_string: str) -> None:
|
|
724
|
+
"""Set the environment variable, saving original value."""
|
|
725
|
+
if not self._is_set:
|
|
726
|
+
self._original_value = os.environ.get(self.env_var_name)
|
|
727
|
+
self._is_set = True
|
|
728
|
+
os.environ[self.env_var_name] = connection_string
|
|
729
|
+
|
|
730
|
+
def restore(self) -> None:
|
|
731
|
+
"""Restore the original environment variable value."""
|
|
732
|
+
if not self._is_set:
|
|
733
|
+
return
|
|
734
|
+
|
|
735
|
+
if self._original_value is None:
|
|
736
|
+
os.environ.pop(self.env_var_name, None)
|
|
737
|
+
else:
|
|
738
|
+
os.environ[self.env_var_name] = self._original_value
|
|
739
|
+
|
|
740
|
+
self._is_set = False
|
|
741
|
+
|
|
742
|
+
@contextlib.contextmanager
|
|
743
|
+
def temporary(self, connection_string: str) -> Generator[None, None, None]:
|
|
744
|
+
"""Context manager for temporary environment variable."""
|
|
745
|
+
self.set(connection_string)
|
|
746
|
+
try:
|
|
747
|
+
yield
|
|
748
|
+
finally:
|
|
749
|
+
self.restore()
|
|
325
750
|
|
|
326
751
|
|
|
327
752
|
def _get_default_branch_id(neon: NeonAPI, project_id: str) -> str | None:
|
|
@@ -642,6 +1067,7 @@ def _create_neon_branch(
|
|
|
642
1067
|
connection_string=connection_string,
|
|
643
1068
|
host=host,
|
|
644
1069
|
parent_id=branch.parent_id,
|
|
1070
|
+
endpoint_id=endpoint_id,
|
|
645
1071
|
)
|
|
646
1072
|
|
|
647
1073
|
# Set DATABASE_URL (or configured env var) for the duration of the fixture scope
|
|
@@ -676,6 +1102,117 @@ def _create_neon_branch(
|
|
|
676
1102
|
)
|
|
677
1103
|
|
|
678
1104
|
|
|
1105
|
+
def _create_readonly_endpoint(
|
|
1106
|
+
branch: NeonBranch,
|
|
1107
|
+
api_key: str,
|
|
1108
|
+
database_name: str,
|
|
1109
|
+
role_name: str,
|
|
1110
|
+
) -> NeonBranch:
|
|
1111
|
+
"""
|
|
1112
|
+
Create a read_only endpoint on an existing branch.
|
|
1113
|
+
|
|
1114
|
+
Returns a new NeonBranch object with the read_only endpoint's connection string.
|
|
1115
|
+
The read_only endpoint enforces that no writes can be made through this connection.
|
|
1116
|
+
|
|
1117
|
+
Args:
|
|
1118
|
+
branch: The branch to create a read_only endpoint on
|
|
1119
|
+
api_key: Neon API key
|
|
1120
|
+
database_name: Database name for connection string
|
|
1121
|
+
role_name: Role name for connection string
|
|
1122
|
+
|
|
1123
|
+
Returns:
|
|
1124
|
+
NeonBranch with read_only endpoint connection details
|
|
1125
|
+
"""
|
|
1126
|
+
neon = NeonAPI(api_key=api_key)
|
|
1127
|
+
|
|
1128
|
+
# Create read_only endpoint on the branch
|
|
1129
|
+
# See: https://api-docs.neon.tech/reference/createprojectendpoint
|
|
1130
|
+
result = _retry_on_rate_limit(
|
|
1131
|
+
lambda: neon.endpoint_create(
|
|
1132
|
+
project_id=branch.project_id,
|
|
1133
|
+
endpoint={
|
|
1134
|
+
"branch_id": branch.branch_id,
|
|
1135
|
+
"type": "read_only",
|
|
1136
|
+
},
|
|
1137
|
+
),
|
|
1138
|
+
operation_name="endpoint_create_readonly",
|
|
1139
|
+
)
|
|
1140
|
+
|
|
1141
|
+
endpoint = result.endpoint
|
|
1142
|
+
endpoint_id = endpoint.id
|
|
1143
|
+
|
|
1144
|
+
# Wait for endpoint to be ready
|
|
1145
|
+
max_wait_seconds = 60
|
|
1146
|
+
poll_interval = 0.5
|
|
1147
|
+
waited = 0.0
|
|
1148
|
+
|
|
1149
|
+
while True:
|
|
1150
|
+
endpoint_response = _retry_on_rate_limit(
|
|
1151
|
+
lambda: neon.endpoint(
|
|
1152
|
+
project_id=branch.project_id, endpoint_id=endpoint_id
|
|
1153
|
+
),
|
|
1154
|
+
operation_name="endpoint_status_readonly",
|
|
1155
|
+
)
|
|
1156
|
+
endpoint = endpoint_response.endpoint
|
|
1157
|
+
state = endpoint.current_state
|
|
1158
|
+
|
|
1159
|
+
if state == EndpointState.active:
|
|
1160
|
+
break
|
|
1161
|
+
|
|
1162
|
+
if waited >= max_wait_seconds:
|
|
1163
|
+
raise RuntimeError(
|
|
1164
|
+
f"Timeout waiting for read_only endpoint {endpoint_id} "
|
|
1165
|
+
f"to become active (current state: {state})"
|
|
1166
|
+
)
|
|
1167
|
+
|
|
1168
|
+
time.sleep(poll_interval)
|
|
1169
|
+
waited += poll_interval
|
|
1170
|
+
|
|
1171
|
+
host = endpoint.host
|
|
1172
|
+
|
|
1173
|
+
# Reset password to get the password value for this endpoint
|
|
1174
|
+
password_response = _retry_on_rate_limit(
|
|
1175
|
+
lambda: neon.role_password_reset(
|
|
1176
|
+
project_id=branch.project_id,
|
|
1177
|
+
branch_id=branch.branch_id,
|
|
1178
|
+
role_name=role_name,
|
|
1179
|
+
),
|
|
1180
|
+
operation_name="role_password_reset_readonly",
|
|
1181
|
+
)
|
|
1182
|
+
password = password_response.role.password
|
|
1183
|
+
|
|
1184
|
+
# Build connection string for the read_only endpoint
|
|
1185
|
+
connection_string = (
|
|
1186
|
+
f"postgresql://{role_name}:{password}@{host}/{database_name}?sslmode=require"
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
return NeonBranch(
|
|
1190
|
+
branch_id=branch.branch_id,
|
|
1191
|
+
project_id=branch.project_id,
|
|
1192
|
+
connection_string=connection_string,
|
|
1193
|
+
host=host,
|
|
1194
|
+
parent_id=branch.parent_id,
|
|
1195
|
+
endpoint_id=endpoint_id,
|
|
1196
|
+
)
|
|
1197
|
+
|
|
1198
|
+
|
|
1199
|
+
def _delete_endpoint(project_id: str, endpoint_id: str, api_key: str) -> None:
|
|
1200
|
+
"""Delete a Neon endpoint."""
|
|
1201
|
+
neon = NeonAPI(api_key=api_key)
|
|
1202
|
+
try:
|
|
1203
|
+
_retry_on_rate_limit(
|
|
1204
|
+
lambda: neon.endpoint_delete(
|
|
1205
|
+
project_id=project_id, endpoint_id=endpoint_id
|
|
1206
|
+
),
|
|
1207
|
+
operation_name="endpoint_delete",
|
|
1208
|
+
)
|
|
1209
|
+
except Exception as e:
|
|
1210
|
+
warnings.warn(
|
|
1211
|
+
f"Failed to delete Neon endpoint {endpoint_id}: {e}",
|
|
1212
|
+
stacklevel=2,
|
|
1213
|
+
)
|
|
1214
|
+
|
|
1215
|
+
|
|
679
1216
|
def _reset_branch_to_parent(branch: NeonBranch, api_key: str) -> None:
|
|
680
1217
|
"""Reset a branch to its parent's state using the Neon API.
|
|
681
1218
|
|
|
@@ -822,17 +1359,49 @@ def _dict_to_branch(data: dict[str, Any]) -> NeonBranch:
|
|
|
822
1359
|
_MIGRATION_WAIT_TIMEOUT = 300 # 5 minutes
|
|
823
1360
|
|
|
824
1361
|
|
|
1362
|
+
@pytest.fixture(scope="session")
|
|
1363
|
+
def _neon_config(request: pytest.FixtureRequest) -> NeonConfig:
|
|
1364
|
+
"""
|
|
1365
|
+
Session-scoped Neon configuration extracted from pytest config.
|
|
1366
|
+
|
|
1367
|
+
Skips tests if required configuration (api_key, project_id) is missing.
|
|
1368
|
+
"""
|
|
1369
|
+
config = NeonConfig.from_pytest_config(request.config)
|
|
1370
|
+
if config is None:
|
|
1371
|
+
pytest.skip(
|
|
1372
|
+
"Neon configuration missing. Set NEON_API_KEY and NEON_PROJECT_ID "
|
|
1373
|
+
"environment variables or use --neon-api-key and --neon-project-id."
|
|
1374
|
+
)
|
|
1375
|
+
return config
|
|
1376
|
+
|
|
1377
|
+
|
|
1378
|
+
@pytest.fixture(scope="session")
|
|
1379
|
+
def _neon_branch_manager(_neon_config: NeonConfig) -> NeonBranchManager:
|
|
1380
|
+
"""Session-scoped branch manager for Neon operations."""
|
|
1381
|
+
return NeonBranchManager(_neon_config)
|
|
1382
|
+
|
|
1383
|
+
|
|
1384
|
+
@pytest.fixture(scope="session")
|
|
1385
|
+
def _neon_xdist_coordinator(
|
|
1386
|
+
tmp_path_factory: pytest.TempPathFactory,
|
|
1387
|
+
) -> XdistCoordinator:
|
|
1388
|
+
"""Session-scoped coordinator for xdist worker synchronization."""
|
|
1389
|
+
return XdistCoordinator(tmp_path_factory)
|
|
1390
|
+
|
|
1391
|
+
|
|
825
1392
|
@pytest.fixture(scope="session")
|
|
826
1393
|
def _neon_migration_branch(
|
|
827
1394
|
request: pytest.FixtureRequest,
|
|
828
|
-
|
|
1395
|
+
_neon_config: NeonConfig,
|
|
1396
|
+
_neon_branch_manager: NeonBranchManager,
|
|
1397
|
+
_neon_xdist_coordinator: XdistCoordinator,
|
|
829
1398
|
) -> Generator[NeonBranch, None, None]:
|
|
830
1399
|
"""
|
|
831
1400
|
Session-scoped branch where migrations are applied.
|
|
832
1401
|
|
|
833
|
-
This branch is created from the configured parent and serves as
|
|
834
|
-
the parent for all test branches
|
|
835
|
-
on this branch.
|
|
1402
|
+
This branch is ALWAYS created from the configured parent and serves as
|
|
1403
|
+
the parent for all test branches (dirty, isolated, readonly endpoint).
|
|
1404
|
+
Migrations run once per session on this branch.
|
|
836
1405
|
|
|
837
1406
|
pytest-xdist Support:
|
|
838
1407
|
When running with pytest-xdist, the first worker to acquire the lock
|
|
@@ -844,129 +1413,43 @@ def _neon_migration_branch(
|
|
|
844
1413
|
Note: The migration branch cannot have an expiry because Neon doesn't
|
|
845
1414
|
allow creating child branches from branches with expiration dates.
|
|
846
1415
|
Cleanup relies on the fixture teardown at session end.
|
|
847
|
-
|
|
848
|
-
Smart Migration Detection:
|
|
849
|
-
Before yielding, this fixture captures a schema fingerprint and stores
|
|
850
|
-
it on request.config. After migrations run, _neon_branch_for_reset
|
|
851
|
-
compares the fingerprint to detect if the schema actually changed.
|
|
852
1416
|
"""
|
|
853
|
-
|
|
854
|
-
worker_id = _get_xdist_worker_id()
|
|
855
|
-
is_xdist = worker_id != "main"
|
|
856
|
-
|
|
857
|
-
# Get env var name for DATABASE_URL
|
|
858
|
-
env_var_name = _get_config_value(
|
|
859
|
-
config, "neon_env_var", "", "neon_env_var", "DATABASE_URL"
|
|
860
|
-
)
|
|
861
|
-
|
|
862
|
-
# For xdist, use shared temp directory and filelock
|
|
863
|
-
# tmp_path_factory.getbasetemp().parent is shared across all workers
|
|
864
|
-
if is_xdist:
|
|
865
|
-
root_tmp_dir = tmp_path_factory.getbasetemp().parent
|
|
866
|
-
cache_file = root_tmp_dir / "neon_migration_branch.json"
|
|
867
|
-
lock_file = root_tmp_dir / "neon_migration_branch.lock"
|
|
868
|
-
migrations_done_file = root_tmp_dir / "neon_migrations_done"
|
|
869
|
-
else:
|
|
870
|
-
cache_file = None
|
|
871
|
-
lock_file = None
|
|
872
|
-
migrations_done_file = None
|
|
873
|
-
|
|
874
|
-
is_creator = False
|
|
1417
|
+
env_manager = EnvironmentManager(_neon_config.env_var_name)
|
|
875
1418
|
branch: NeonBranch
|
|
876
|
-
|
|
877
|
-
original_env_value: str | None = None
|
|
1419
|
+
is_creator: bool
|
|
878
1420
|
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
data = json.loads(cache_file.read_text())
|
|
886
|
-
branch = _dict_to_branch(data["branch"])
|
|
887
|
-
pre_migration_fingerprint = tuple(
|
|
888
|
-
tuple(row) for row in data["pre_migration_fingerprint"]
|
|
889
|
-
)
|
|
890
|
-
config._neon_pre_migration_fingerprint = pre_migration_fingerprint # type: ignore[attr-defined]
|
|
1421
|
+
def create_migration_branch() -> dict[str, Any]:
|
|
1422
|
+
b = _neon_branch_manager.create_branch(
|
|
1423
|
+
name_suffix="-migration",
|
|
1424
|
+
expiry_seconds=0, # No expiry - child branches need this
|
|
1425
|
+
)
|
|
1426
|
+
return {"branch": _branch_to_dict(b)}
|
|
891
1427
|
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
is_creator = True
|
|
898
|
-
branch_gen = _create_neon_branch(
|
|
899
|
-
request,
|
|
900
|
-
branch_expiry_override=0,
|
|
901
|
-
branch_name_suffix="-migrated",
|
|
902
|
-
)
|
|
903
|
-
branch = next(branch_gen)
|
|
1428
|
+
# Coordinate branch creation across xdist workers
|
|
1429
|
+
data, is_creator = _neon_xdist_coordinator.coordinate_resource(
|
|
1430
|
+
"migration_branch", create_migration_branch
|
|
1431
|
+
)
|
|
1432
|
+
branch = _dict_to_branch(data["branch"])
|
|
904
1433
|
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
branch.connection_string
|
|
908
|
-
)
|
|
909
|
-
config._neon_pre_migration_fingerprint = pre_migration_fingerprint # type: ignore[attr-defined]
|
|
910
|
-
|
|
911
|
-
# Cache for other workers (they'll read this after lock released)
|
|
912
|
-
# Note: We cache now with pre-migration fingerprint. The branch
|
|
913
|
-
# content will have migrations applied by neon_apply_migrations.
|
|
914
|
-
cache_file.write_text(
|
|
915
|
-
json.dumps(
|
|
916
|
-
{
|
|
917
|
-
"branch": _branch_to_dict(branch),
|
|
918
|
-
"pre_migration_fingerprint": pre_migration_fingerprint,
|
|
919
|
-
}
|
|
920
|
-
)
|
|
921
|
-
)
|
|
1434
|
+
# Store creator status for other fixtures
|
|
1435
|
+
request.config._neon_is_migration_creator = is_creator # type: ignore[attr-defined]
|
|
922
1436
|
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
# concurrently on the same branch, causing race conditions.
|
|
926
|
-
if not is_creator:
|
|
927
|
-
waited = 0.0
|
|
928
|
-
poll_interval = 0.5
|
|
929
|
-
while not migrations_done_file.exists():
|
|
930
|
-
if waited >= _MIGRATION_WAIT_TIMEOUT:
|
|
931
|
-
raise RuntimeError(
|
|
932
|
-
f"Timeout waiting for migrations to complete after "
|
|
933
|
-
f"{_MIGRATION_WAIT_TIMEOUT}s. The creator worker may have "
|
|
934
|
-
f"failed or is still running migrations."
|
|
935
|
-
)
|
|
936
|
-
time.sleep(poll_interval)
|
|
937
|
-
waited += poll_interval
|
|
938
|
-
else:
|
|
939
|
-
# Not using xdist - create branch normally
|
|
940
|
-
is_creator = True
|
|
941
|
-
branch_gen = _create_neon_branch(
|
|
942
|
-
request,
|
|
943
|
-
branch_expiry_override=0,
|
|
944
|
-
branch_name_suffix="-migrated",
|
|
945
|
-
)
|
|
946
|
-
branch = next(branch_gen)
|
|
947
|
-
|
|
948
|
-
# Capture schema fingerprint BEFORE migrations run
|
|
949
|
-
pre_migration_fingerprint = _get_schema_fingerprint(branch.connection_string)
|
|
950
|
-
config._neon_pre_migration_fingerprint = pre_migration_fingerprint # type: ignore[attr-defined]
|
|
1437
|
+
# Set DATABASE_URL
|
|
1438
|
+
env_manager.set(branch.connection_string)
|
|
951
1439
|
|
|
952
|
-
#
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
1440
|
+
# Non-creators wait for migrations to complete
|
|
1441
|
+
if not is_creator:
|
|
1442
|
+
_neon_xdist_coordinator.wait_for_signal(
|
|
1443
|
+
"migrations_done", timeout=_MIGRATION_WAIT_TIMEOUT
|
|
1444
|
+
)
|
|
956
1445
|
|
|
957
1446
|
try:
|
|
958
1447
|
yield branch
|
|
959
1448
|
finally:
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
os.environ.pop(env_var_name, None)
|
|
965
|
-
|
|
966
|
-
# Only the creator cleans up the branch
|
|
967
|
-
if is_creator and branch_gen is not None:
|
|
968
|
-
with contextlib.suppress(StopIteration):
|
|
969
|
-
next(branch_gen)
|
|
1449
|
+
env_manager.restore()
|
|
1450
|
+
# Only creator cleans up
|
|
1451
|
+
if is_creator:
|
|
1452
|
+
_neon_branch_manager.delete_branch(branch.branch_id)
|
|
970
1453
|
|
|
971
1454
|
|
|
972
1455
|
@pytest.fixture(scope="session")
|
|
@@ -1029,6 +1512,7 @@ def neon_apply_migrations(_neon_migration_branch: NeonBranch) -> Any:
|
|
|
1029
1512
|
def _neon_migrations_synchronized(
|
|
1030
1513
|
request: pytest.FixtureRequest,
|
|
1031
1514
|
_neon_migration_branch: NeonBranch,
|
|
1515
|
+
_neon_xdist_coordinator: XdistCoordinator,
|
|
1032
1516
|
neon_apply_migrations: Any,
|
|
1033
1517
|
) -> Any:
|
|
1034
1518
|
"""
|
|
@@ -1042,194 +1526,288 @@ def _neon_migrations_synchronized(
|
|
|
1042
1526
|
|
|
1043
1527
|
Without xdist, this is a simple passthrough.
|
|
1044
1528
|
"""
|
|
1045
|
-
|
|
1046
|
-
is_creator = getattr(config, "_neon_is_migration_creator", True)
|
|
1047
|
-
migrations_done_file = getattr(config, "_neon_migrations_done_file", None)
|
|
1529
|
+
is_creator = getattr(request.config, "_neon_is_migration_creator", True)
|
|
1048
1530
|
|
|
1049
|
-
if is_creator
|
|
1531
|
+
if is_creator:
|
|
1050
1532
|
# Creator: migrations just ran via neon_apply_migrations dependency
|
|
1051
|
-
# Signal completion to other workers
|
|
1052
|
-
|
|
1053
|
-
migrations_done_file.write_text("done")
|
|
1533
|
+
# Signal completion to other workers
|
|
1534
|
+
_neon_xdist_coordinator.send_signal("migrations_done")
|
|
1054
1535
|
|
|
1055
1536
|
return neon_apply_migrations
|
|
1056
1537
|
|
|
1057
1538
|
|
|
1058
1539
|
@pytest.fixture(scope="session")
|
|
1059
|
-
def
|
|
1060
|
-
|
|
1540
|
+
def _neon_dirty_branch(
|
|
1541
|
+
_neon_config: NeonConfig,
|
|
1542
|
+
_neon_branch_manager: NeonBranchManager,
|
|
1543
|
+
_neon_xdist_coordinator: XdistCoordinator,
|
|
1061
1544
|
_neon_migration_branch: NeonBranch,
|
|
1062
|
-
_neon_migrations_synchronized: Any, # Ensures migrations complete
|
|
1545
|
+
_neon_migrations_synchronized: Any, # Ensures migrations complete first
|
|
1063
1546
|
) -> Generator[NeonBranch, None, None]:
|
|
1064
1547
|
"""
|
|
1065
|
-
|
|
1548
|
+
Session-scoped dirty branch shared across ALL xdist workers.
|
|
1549
|
+
|
|
1550
|
+
This branch is a child of the migration branch. All tests using
|
|
1551
|
+
neon_branch_dirty share this single branch - writes persist and
|
|
1552
|
+
are visible to all tests (even across workers).
|
|
1553
|
+
|
|
1554
|
+
This is the "dirty" branch because:
|
|
1555
|
+
- No reset between tests
|
|
1556
|
+
- Shared across all workers (concurrent writes possible)
|
|
1557
|
+
- Fast because no per-test overhead
|
|
1558
|
+
"""
|
|
1559
|
+
env_manager = EnvironmentManager(_neon_config.env_var_name)
|
|
1560
|
+
branch: NeonBranch
|
|
1561
|
+
is_creator: bool
|
|
1066
1562
|
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1563
|
+
def create_dirty_branch() -> dict[str, Any]:
|
|
1564
|
+
b = _neon_branch_manager.create_branch(
|
|
1565
|
+
name_suffix="-dirty",
|
|
1566
|
+
parent_branch_id=_neon_migration_branch.branch_id,
|
|
1567
|
+
expiry_seconds=_neon_config.branch_expiry,
|
|
1568
|
+
)
|
|
1569
|
+
return {"branch": _branch_to_dict(b)}
|
|
1070
1570
|
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1571
|
+
# Coordinate dirty branch creation - shared across ALL workers
|
|
1572
|
+
data, is_creator = _neon_xdist_coordinator.coordinate_resource(
|
|
1573
|
+
"dirty_branch", create_dirty_branch
|
|
1574
|
+
)
|
|
1575
|
+
branch = _dict_to_branch(data["branch"])
|
|
1076
1576
|
|
|
1077
|
-
|
|
1078
|
-
|
|
1577
|
+
# Set DATABASE_URL
|
|
1578
|
+
env_manager.set(branch.connection_string)
|
|
1079
1579
|
|
|
1080
|
-
|
|
1081
|
-
|
|
1580
|
+
try:
|
|
1581
|
+
yield branch
|
|
1582
|
+
finally:
|
|
1583
|
+
env_manager.restore()
|
|
1584
|
+
# Only creator cleans up
|
|
1585
|
+
if is_creator:
|
|
1586
|
+
_neon_branch_manager.delete_branch(branch.branch_id)
|
|
1082
1587
|
|
|
1083
|
-
2. If neon_apply_migrations was overridden, compare schema fingerprints
|
|
1084
|
-
before/after migrations. Only create a child branch if the schema
|
|
1085
|
-
actually changed.
|
|
1086
1588
|
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1589
|
+
@pytest.fixture(scope="session")
|
|
1590
|
+
def _neon_readonly_endpoint(
|
|
1591
|
+
_neon_config: NeonConfig,
|
|
1592
|
+
_neon_branch_manager: NeonBranchManager,
|
|
1593
|
+
_neon_xdist_coordinator: XdistCoordinator,
|
|
1594
|
+
_neon_migration_branch: NeonBranch,
|
|
1595
|
+
_neon_migrations_synchronized: Any, # Ensures migrations complete first
|
|
1596
|
+
) -> Generator[NeonBranch, None, None]:
|
|
1090
1597
|
"""
|
|
1091
|
-
|
|
1092
|
-
# _neon_migrations_synchronized passes through the neon_apply_migrations value
|
|
1093
|
-
migrations_defined = _neon_migrations_synchronized is not _MIGRATIONS_NOT_DEFINED
|
|
1094
|
-
|
|
1095
|
-
# Check if schema actually changed (if we have a pre-migration fingerprint)
|
|
1096
|
-
pre_fingerprint = getattr(request.config, "_neon_pre_migration_fingerprint", ())
|
|
1097
|
-
schema_changed = False
|
|
1098
|
-
|
|
1099
|
-
if migrations_defined and pre_fingerprint:
|
|
1100
|
-
# Compare with current schema
|
|
1101
|
-
conn_str = _neon_migration_branch.connection_string
|
|
1102
|
-
post_fingerprint = _get_schema_fingerprint(conn_str)
|
|
1103
|
-
schema_changed = pre_fingerprint != post_fingerprint
|
|
1104
|
-
elif migrations_defined and not pre_fingerprint:
|
|
1105
|
-
# No fingerprint available (no psycopg/psycopg2 installed)
|
|
1106
|
-
# Assume migrations changed something to be safe
|
|
1107
|
-
schema_changed = True
|
|
1108
|
-
|
|
1109
|
-
# Get worker ID for parallel test support
|
|
1110
|
-
# Each xdist worker gets its own branch to avoid state pollution
|
|
1111
|
-
worker_id = _get_xdist_worker_id()
|
|
1112
|
-
branch_suffix = f"-test-{worker_id}"
|
|
1113
|
-
|
|
1114
|
-
# Only create a child branch if migrations actually modified the schema
|
|
1115
|
-
# OR if we're running under xdist (each worker needs its own branch)
|
|
1116
|
-
if schema_changed or worker_id != "main":
|
|
1117
|
-
yield from _create_neon_branch(
|
|
1118
|
-
request,
|
|
1119
|
-
parent_branch_id_override=_neon_migration_branch.branch_id,
|
|
1120
|
-
branch_name_suffix=branch_suffix,
|
|
1121
|
-
)
|
|
1122
|
-
else:
|
|
1123
|
-
# No schema changes and not parallel - reuse the migration branch directly
|
|
1124
|
-
# This saves creating an unnecessary branch
|
|
1125
|
-
yield _neon_migration_branch
|
|
1598
|
+
Session-scoped read_only endpoint on the migration branch.
|
|
1126
1599
|
|
|
1600
|
+
This is a true read-only endpoint - writes are blocked at the database
|
|
1601
|
+
level. All workers share this endpoint since it's read-only anyway.
|
|
1602
|
+
"""
|
|
1603
|
+
env_manager = EnvironmentManager(_neon_config.env_var_name)
|
|
1604
|
+
branch: NeonBranch
|
|
1605
|
+
is_creator: bool
|
|
1127
1606
|
|
|
1128
|
-
|
|
1129
|
-
|
|
1607
|
+
def create_readonly_endpoint() -> dict[str, Any]:
|
|
1608
|
+
b = _neon_branch_manager.create_readonly_endpoint(_neon_migration_branch)
|
|
1609
|
+
return {"branch": _branch_to_dict(b)}
|
|
1610
|
+
|
|
1611
|
+
# Coordinate endpoint creation - shared across ALL workers
|
|
1612
|
+
data, is_creator = _neon_xdist_coordinator.coordinate_resource(
|
|
1613
|
+
"readonly_endpoint", create_readonly_endpoint
|
|
1614
|
+
)
|
|
1615
|
+
branch = _dict_to_branch(data["branch"])
|
|
1616
|
+
|
|
1617
|
+
# Set DATABASE_URL
|
|
1618
|
+
env_manager.set(branch.connection_string)
|
|
1619
|
+
|
|
1620
|
+
try:
|
|
1621
|
+
yield branch
|
|
1622
|
+
finally:
|
|
1623
|
+
env_manager.restore()
|
|
1624
|
+
# Only creator cleans up the endpoint
|
|
1625
|
+
if is_creator and branch.endpoint_id:
|
|
1626
|
+
_neon_branch_manager.delete_endpoint(branch.endpoint_id)
|
|
1627
|
+
|
|
1628
|
+
|
|
1629
|
+
@pytest.fixture(scope="session")
|
|
1630
|
+
def _neon_isolated_branch(
|
|
1130
1631
|
request: pytest.FixtureRequest,
|
|
1131
|
-
|
|
1632
|
+
_neon_config: NeonConfig,
|
|
1633
|
+
_neon_branch_manager: NeonBranchManager,
|
|
1634
|
+
_neon_xdist_coordinator: XdistCoordinator,
|
|
1635
|
+
_neon_migration_branch: NeonBranch,
|
|
1636
|
+
_neon_migrations_synchronized: Any, # Ensures migrations complete first
|
|
1132
1637
|
) -> Generator[NeonBranch, None, None]:
|
|
1133
1638
|
"""
|
|
1134
|
-
|
|
1639
|
+
Session-scoped isolated branch, one per xdist worker.
|
|
1135
1640
|
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
branch's state after each test. This provides test isolation with
|
|
1139
|
-
~0.5s overhead per test.
|
|
1641
|
+
Each worker gets its own branch. Unlike the dirty branch, this is
|
|
1642
|
+
per-worker to allow reset operations without affecting other workers.
|
|
1140
1643
|
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1644
|
+
The branch is reset after each test that uses neon_branch_isolated.
|
|
1645
|
+
"""
|
|
1646
|
+
env_manager = EnvironmentManager(_neon_config.env_var_name)
|
|
1647
|
+
worker_id = _neon_xdist_coordinator.worker_id
|
|
1648
|
+
|
|
1649
|
+
# Each worker creates its own isolated branch - no coordination needed
|
|
1650
|
+
# because each worker has a unique ID
|
|
1651
|
+
branch = _neon_branch_manager.create_branch(
|
|
1652
|
+
name_suffix=f"-isolated-{worker_id}",
|
|
1653
|
+
parent_branch_id=_neon_migration_branch.branch_id,
|
|
1654
|
+
expiry_seconds=_neon_config.branch_expiry,
|
|
1655
|
+
)
|
|
1144
1656
|
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
10 minutes by default (configurable via --neon-branch-expiry) as a safety net
|
|
1148
|
-
for interrupted test runs.
|
|
1657
|
+
# Store branch manager on config for reset operations
|
|
1658
|
+
request.config._neon_isolated_branch_manager = _neon_branch_manager # type: ignore[attr-defined]
|
|
1149
1659
|
|
|
1150
|
-
|
|
1151
|
-
|
|
1660
|
+
# Set DATABASE_URL
|
|
1661
|
+
env_manager.set(branch.connection_string)
|
|
1152
1662
|
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1663
|
+
try:
|
|
1664
|
+
yield branch
|
|
1665
|
+
finally:
|
|
1666
|
+
env_manager.restore()
|
|
1667
|
+
_neon_branch_manager.delete_branch(branch.branch_id)
|
|
1156
1668
|
|
|
1157
|
-
engine = create_engine(DATABASE_URL, pool_pre_ping=True)
|
|
1158
1669
|
|
|
1159
|
-
|
|
1160
|
-
|
|
1670
|
+
@pytest.fixture(scope="session")
|
|
1671
|
+
def neon_branch_readonly(
|
|
1672
|
+
_neon_config: NeonConfig,
|
|
1673
|
+
_neon_readonly_endpoint: NeonBranch,
|
|
1674
|
+
) -> NeonBranch:
|
|
1675
|
+
"""
|
|
1676
|
+
Provide a true read-only Neon database connection.
|
|
1677
|
+
|
|
1678
|
+
This fixture uses a read_only endpoint on the migration branch, which
|
|
1679
|
+
enforces read-only access at the database level. Any attempt to write
|
|
1680
|
+
will result in a database error.
|
|
1681
|
+
|
|
1682
|
+
This is the recommended fixture for tests that only read data (SELECT queries).
|
|
1683
|
+
It's session-scoped and shared across all tests and workers since it's read-only.
|
|
1684
|
+
|
|
1685
|
+
Use this fixture when your tests only perform SELECT queries.
|
|
1686
|
+
For tests that INSERT, UPDATE, or DELETE data, use ``neon_branch_dirty``
|
|
1687
|
+
(for shared state) or ``neon_branch_isolated`` (for test isolation).
|
|
1688
|
+
|
|
1689
|
+
The connection string is automatically set in the DATABASE_URL environment
|
|
1690
|
+
variable (configurable via --neon-env-var).
|
|
1161
1691
|
|
|
1162
1692
|
Requires either:
|
|
1163
1693
|
- NEON_API_KEY and NEON_PROJECT_ID environment variables, or
|
|
1164
1694
|
- --neon-api-key and --neon-project-id command line options
|
|
1165
1695
|
|
|
1166
|
-
|
|
1696
|
+
Returns:
|
|
1167
1697
|
NeonBranch: Object with branch_id, project_id, connection_string, and host.
|
|
1168
1698
|
|
|
1169
1699
|
Example::
|
|
1170
1700
|
|
|
1171
|
-
def
|
|
1701
|
+
def test_query_users(neon_branch_readonly):
|
|
1172
1702
|
# DATABASE_URL is automatically set
|
|
1173
1703
|
conn_string = os.environ["DATABASE_URL"]
|
|
1174
|
-
# or use directly
|
|
1175
|
-
conn_string = neon_branch_readwrite.connection_string
|
|
1176
1704
|
|
|
1177
|
-
#
|
|
1705
|
+
# Read-only query
|
|
1178
1706
|
with psycopg.connect(conn_string) as conn:
|
|
1179
|
-
conn.execute("
|
|
1180
|
-
|
|
1707
|
+
result = conn.execute("SELECT * FROM users").fetchall()
|
|
1708
|
+
assert len(result) > 0
|
|
1709
|
+
|
|
1710
|
+
# This would fail with a database error:
|
|
1711
|
+
# conn.execute("INSERT INTO users (name) VALUES ('test')")
|
|
1181
1712
|
"""
|
|
1182
|
-
|
|
1183
|
-
|
|
1713
|
+
# DATABASE_URL is already set by _neon_readonly_endpoint
|
|
1714
|
+
return _neon_readonly_endpoint
|
|
1184
1715
|
|
|
1185
|
-
# Validate that branch has a parent for reset functionality
|
|
1186
|
-
if not _neon_branch_for_reset.parent_id:
|
|
1187
|
-
pytest.fail(
|
|
1188
|
-
f"\n\nBranch {_neon_branch_for_reset.branch_id} has no parent. "
|
|
1189
|
-
f"The neon_branch_readwrite fixture requires a parent branch for "
|
|
1190
|
-
f"reset.\n\n"
|
|
1191
|
-
f"Use neon_branch_readonly if you don't need reset, or specify "
|
|
1192
|
-
f"a parent branch with --neon-parent-branch or NEON_PARENT_BRANCH_ID."
|
|
1193
|
-
)
|
|
1194
1716
|
|
|
1195
|
-
|
|
1717
|
+
@pytest.fixture(scope="session")
|
|
1718
|
+
def neon_branch_dirty(
|
|
1719
|
+
_neon_config: NeonConfig,
|
|
1720
|
+
_neon_dirty_branch: NeonBranch,
|
|
1721
|
+
) -> NeonBranch:
|
|
1722
|
+
"""
|
|
1723
|
+
Provide a session-scoped Neon database branch for read-write access.
|
|
1196
1724
|
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
_reset_branch_to_parent(branch=_neon_branch_for_reset, api_key=api_key)
|
|
1201
|
-
except Exception as e:
|
|
1202
|
-
pytest.fail(
|
|
1203
|
-
f"\n\nFailed to reset branch {_neon_branch_for_reset.branch_id} "
|
|
1204
|
-
f"after test. Subsequent tests in this module may see dirty "
|
|
1205
|
-
f"database state.\n\nError: {e}\n\n"
|
|
1206
|
-
f"To keep the branch for debugging, use --neon-keep-branches"
|
|
1207
|
-
)
|
|
1725
|
+
All tests share the same branch and writes persist across tests (no cleanup
|
|
1726
|
+
between tests). This is faster than neon_branch_isolated because there's no
|
|
1727
|
+
reset overhead.
|
|
1208
1728
|
|
|
1729
|
+
Use this fixture when:
|
|
1730
|
+
- Most tests can share database state without interference
|
|
1731
|
+
- You want maximum performance with minimal API calls
|
|
1732
|
+
- You manually manage test data cleanup if needed
|
|
1733
|
+
- You're using it alongside ``neon_branch_isolated`` for specific tests
|
|
1734
|
+
that need guaranteed clean state
|
|
1209
1735
|
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1736
|
+
The connection string is automatically set in the DATABASE_URL environment
|
|
1737
|
+
variable (configurable via --neon-env-var).
|
|
1738
|
+
|
|
1739
|
+
Warning:
|
|
1740
|
+
Data written by one test WILL be visible to subsequent tests AND to
|
|
1741
|
+
other xdist workers. This is truly shared - use ``neon_branch_isolated``
|
|
1742
|
+
for tests that require guaranteed clean state.
|
|
1743
|
+
|
|
1744
|
+
pytest-xdist:
|
|
1745
|
+
ALL workers share the same dirty branch. Concurrent writes from different
|
|
1746
|
+
workers may conflict. This is "dirty" by design - for isolation, use
|
|
1747
|
+
``neon_branch_isolated``.
|
|
1748
|
+
|
|
1749
|
+
Requires either:
|
|
1750
|
+
- NEON_API_KEY and NEON_PROJECT_ID environment variables, or
|
|
1751
|
+
- --neon-api-key and --neon-project-id command line options
|
|
1752
|
+
|
|
1753
|
+
Returns:
|
|
1754
|
+
NeonBranch: Object with branch_id, project_id, connection_string, and host.
|
|
1755
|
+
|
|
1756
|
+
Example::
|
|
1757
|
+
|
|
1758
|
+
def test_insert_user(neon_branch_dirty):
|
|
1759
|
+
# DATABASE_URL is automatically set
|
|
1760
|
+
import psycopg
|
|
1761
|
+
with psycopg.connect(neon_branch_dirty.connection_string) as conn:
|
|
1762
|
+
conn.execute("INSERT INTO users (name) VALUES ('test')")
|
|
1763
|
+
conn.commit()
|
|
1764
|
+
# Data persists - next test will see this user
|
|
1765
|
+
|
|
1766
|
+
def test_count_users(neon_branch_dirty):
|
|
1767
|
+
# This test sees data from previous tests
|
|
1768
|
+
import psycopg
|
|
1769
|
+
with psycopg.connect(neon_branch_dirty.connection_string) as conn:
|
|
1770
|
+
result = conn.execute("SELECT COUNT(*) FROM users").fetchone()
|
|
1771
|
+
# Count includes users from previous tests
|
|
1214
1772
|
"""
|
|
1215
|
-
|
|
1773
|
+
# DATABASE_URL is already set by _neon_dirty_branch
|
|
1774
|
+
return _neon_dirty_branch
|
|
1216
1775
|
|
|
1217
|
-
This is the recommended fixture for tests that only read data (SELECT queries).
|
|
1218
|
-
No branch reset occurs after each test, making it faster than
|
|
1219
|
-
``neon_branch_readwrite`` (~0.5s saved per test).
|
|
1220
1776
|
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1777
|
+
@pytest.fixture(scope="function")
|
|
1778
|
+
def neon_branch_isolated(
|
|
1779
|
+
request: pytest.FixtureRequest,
|
|
1780
|
+
_neon_config: NeonConfig,
|
|
1781
|
+
_neon_isolated_branch: NeonBranch,
|
|
1782
|
+
) -> Generator[NeonBranch, None, None]:
|
|
1783
|
+
"""
|
|
1784
|
+
Provide an isolated Neon database branch with reset after each test.
|
|
1224
1785
|
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1786
|
+
This is the recommended fixture for tests that modify database state and
|
|
1787
|
+
need isolation. Each xdist worker has its own branch, and the branch is
|
|
1788
|
+
reset to the migration state after each test.
|
|
1789
|
+
|
|
1790
|
+
Use this fixture when:
|
|
1791
|
+
- Tests modify database state (INSERT, UPDATE, DELETE)
|
|
1792
|
+
- You need test isolation (each test starts with clean state)
|
|
1793
|
+
- You're using it alongside ``neon_branch_dirty`` for specific tests
|
|
1229
1794
|
|
|
1230
1795
|
The connection string is automatically set in the DATABASE_URL environment
|
|
1231
1796
|
variable (configurable via --neon-env-var).
|
|
1232
1797
|
|
|
1798
|
+
SQLAlchemy Users:
|
|
1799
|
+
If you create your own engine (not using the neon_engine fixture),
|
|
1800
|
+
you MUST use pool_pre_ping=True::
|
|
1801
|
+
|
|
1802
|
+
engine = create_engine(DATABASE_URL, pool_pre_ping=True)
|
|
1803
|
+
|
|
1804
|
+
Branch resets terminate server-side connections. Without pool_pre_ping,
|
|
1805
|
+
SQLAlchemy may reuse dead pooled connections, causing SSL errors.
|
|
1806
|
+
|
|
1807
|
+
pytest-xdist:
|
|
1808
|
+
Each worker has its own isolated branch. Resets only affect that worker's
|
|
1809
|
+
branch, so workers don't interfere with each other.
|
|
1810
|
+
|
|
1233
1811
|
Requires either:
|
|
1234
1812
|
- NEON_API_KEY and NEON_PROJECT_ID environment variables, or
|
|
1235
1813
|
- --neon-api-key and --neon-project-id command line options
|
|
@@ -1239,43 +1817,79 @@ def neon_branch_readonly(
|
|
|
1239
1817
|
|
|
1240
1818
|
Example::
|
|
1241
1819
|
|
|
1242
|
-
def
|
|
1820
|
+
def test_insert_user(neon_branch_isolated):
|
|
1243
1821
|
# DATABASE_URL is automatically set
|
|
1244
1822
|
conn_string = os.environ["DATABASE_URL"]
|
|
1245
1823
|
|
|
1246
|
-
#
|
|
1824
|
+
# Insert data - branch will reset after this test
|
|
1247
1825
|
with psycopg.connect(conn_string) as conn:
|
|
1248
|
-
|
|
1249
|
-
|
|
1826
|
+
conn.execute("INSERT INTO users (name) VALUES ('test')")
|
|
1827
|
+
conn.commit()
|
|
1828
|
+
# Next test starts with clean state
|
|
1250
1829
|
"""
|
|
1251
|
-
|
|
1830
|
+
# DATABASE_URL is already set by _neon_isolated_branch
|
|
1831
|
+
yield _neon_isolated_branch
|
|
1832
|
+
|
|
1833
|
+
# Reset branch to migration state after each test
|
|
1834
|
+
branch_manager = getattr(request.config, "_neon_isolated_branch_manager", None)
|
|
1835
|
+
if branch_manager is not None:
|
|
1836
|
+
try:
|
|
1837
|
+
branch_manager.reset_branch(_neon_isolated_branch)
|
|
1838
|
+
except Exception as e:
|
|
1839
|
+
pytest.fail(
|
|
1840
|
+
f"\n\nFailed to reset branch {_neon_isolated_branch.branch_id} "
|
|
1841
|
+
f"after test. Subsequent tests may see dirty state.\n\n"
|
|
1842
|
+
f"Error: {e}\n\n"
|
|
1843
|
+
f"To keep the branch for debugging, use --neon-keep-branches"
|
|
1844
|
+
)
|
|
1252
1845
|
|
|
1253
1846
|
|
|
1254
1847
|
@pytest.fixture(scope="function")
|
|
1255
|
-
def
|
|
1256
|
-
|
|
1257
|
-
neon_branch_readwrite: NeonBranch,
|
|
1848
|
+
def neon_branch_readwrite(
|
|
1849
|
+
neon_branch_isolated: NeonBranch,
|
|
1258
1850
|
) -> Generator[NeonBranch, None, None]:
|
|
1259
1851
|
"""
|
|
1260
|
-
Deprecated: Use ``
|
|
1852
|
+
Deprecated: Use ``neon_branch_isolated`` instead.
|
|
1853
|
+
|
|
1854
|
+
This fixture is now an alias for ``neon_branch_isolated``.
|
|
1855
|
+
|
|
1856
|
+
.. deprecated:: 2.3.0
|
|
1857
|
+
Use ``neon_branch_isolated`` for tests that modify data with reset,
|
|
1858
|
+
``neon_branch_dirty`` for shared state, or ``neon_branch_readonly``
|
|
1859
|
+
for read-only access.
|
|
1860
|
+
"""
|
|
1861
|
+
warnings.warn(
|
|
1862
|
+
"neon_branch_readwrite is deprecated. Use neon_branch_isolated (for tests "
|
|
1863
|
+
"that modify data with isolation) or neon_branch_dirty (for shared state).",
|
|
1864
|
+
DeprecationWarning,
|
|
1865
|
+
stacklevel=2,
|
|
1866
|
+
)
|
|
1867
|
+
yield neon_branch_isolated
|
|
1261
1868
|
|
|
1262
|
-
This fixture is an alias for ``neon_branch_readwrite`` and will be removed
|
|
1263
|
-
in a future version. Please migrate to the explicit fixture names:
|
|
1264
1869
|
|
|
1265
|
-
|
|
1266
|
-
|
|
1870
|
+
@pytest.fixture(scope="function")
|
|
1871
|
+
def neon_branch(
|
|
1872
|
+
neon_branch_isolated: NeonBranch,
|
|
1873
|
+
) -> Generator[NeonBranch, None, None]:
|
|
1874
|
+
"""
|
|
1875
|
+
Deprecated: Use ``neon_branch_isolated``, ``neon_branch_dirty``, or
|
|
1876
|
+
``neon_branch_readonly`` instead.
|
|
1877
|
+
|
|
1878
|
+
This fixture is now an alias for ``neon_branch_isolated``.
|
|
1267
1879
|
|
|
1268
1880
|
.. deprecated:: 1.1.0
|
|
1269
|
-
Use ``
|
|
1270
|
-
|
|
1881
|
+
Use ``neon_branch_isolated`` for tests that modify data with reset,
|
|
1882
|
+
``neon_branch_dirty`` for shared state, or ``neon_branch_readonly``
|
|
1883
|
+
for read-only access.
|
|
1271
1884
|
"""
|
|
1272
1885
|
warnings.warn(
|
|
1273
|
-
"neon_branch is deprecated. Use
|
|
1274
|
-
"modify data)
|
|
1886
|
+
"neon_branch is deprecated. Use neon_branch_isolated (for tests that "
|
|
1887
|
+
"modify data), neon_branch_dirty (for shared state), or "
|
|
1888
|
+
"neon_branch_readonly (for read-only tests).",
|
|
1275
1889
|
DeprecationWarning,
|
|
1276
1890
|
stacklevel=2,
|
|
1277
1891
|
)
|
|
1278
|
-
yield
|
|
1892
|
+
yield neon_branch_isolated
|
|
1279
1893
|
|
|
1280
1894
|
|
|
1281
1895
|
@pytest.fixture(scope="module")
|
|
@@ -1319,7 +1933,7 @@ def neon_branch_shared(
|
|
|
1319
1933
|
|
|
1320
1934
|
|
|
1321
1935
|
@pytest.fixture
|
|
1322
|
-
def neon_connection(
|
|
1936
|
+
def neon_connection(neon_branch_isolated: NeonBranch):
|
|
1323
1937
|
"""
|
|
1324
1938
|
Provide a psycopg2 connection to the test branch.
|
|
1325
1939
|
|
|
@@ -1327,6 +1941,7 @@ def neon_connection(neon_branch: NeonBranch):
|
|
|
1327
1941
|
pip install pytest-neon[psycopg2]
|
|
1328
1942
|
|
|
1329
1943
|
The connection is rolled back and closed after each test.
|
|
1944
|
+
Uses neon_branch_isolated for test isolation.
|
|
1330
1945
|
|
|
1331
1946
|
Yields:
|
|
1332
1947
|
psycopg2 connection object
|
|
@@ -1348,21 +1963,22 @@ def neon_connection(neon_branch: NeonBranch):
|
|
|
1348
1963
|
" The 'neon_connection' fixture requires psycopg2.\n\n"
|
|
1349
1964
|
" To fix this, install the psycopg2 extra:\n\n"
|
|
1350
1965
|
" pip install pytest-neon[psycopg2]\n\n"
|
|
1351
|
-
" Or use the '
|
|
1352
|
-
" def test_example(
|
|
1966
|
+
" Or use the 'neon_branch_isolated' fixture with your own driver:\n\n"
|
|
1967
|
+
" def test_example(neon_branch_isolated):\n"
|
|
1353
1968
|
" import your_driver\n"
|
|
1354
|
-
" conn = your_driver.connect(
|
|
1969
|
+
" conn = your_driver.connect(\n"
|
|
1970
|
+
" neon_branch_isolated.connection_string)\n\n"
|
|
1355
1971
|
"═══════════════════════════════════════════════════════════════════\n"
|
|
1356
1972
|
)
|
|
1357
1973
|
|
|
1358
|
-
conn = psycopg2.connect(
|
|
1974
|
+
conn = psycopg2.connect(neon_branch_isolated.connection_string)
|
|
1359
1975
|
yield conn
|
|
1360
1976
|
conn.rollback()
|
|
1361
1977
|
conn.close()
|
|
1362
1978
|
|
|
1363
1979
|
|
|
1364
1980
|
@pytest.fixture
|
|
1365
|
-
def neon_connection_psycopg(
|
|
1981
|
+
def neon_connection_psycopg(neon_branch_isolated: NeonBranch):
|
|
1366
1982
|
"""
|
|
1367
1983
|
Provide a psycopg (v3) connection to the test branch.
|
|
1368
1984
|
|
|
@@ -1370,6 +1986,7 @@ def neon_connection_psycopg(neon_branch: NeonBranch):
|
|
|
1370
1986
|
pip install pytest-neon[psycopg]
|
|
1371
1987
|
|
|
1372
1988
|
The connection is rolled back and closed after each test.
|
|
1989
|
+
Uses neon_branch_isolated for test isolation.
|
|
1373
1990
|
|
|
1374
1991
|
Yields:
|
|
1375
1992
|
psycopg connection object
|
|
@@ -1391,21 +2008,22 @@ def neon_connection_psycopg(neon_branch: NeonBranch):
|
|
|
1391
2008
|
" The 'neon_connection_psycopg' fixture requires psycopg v3.\n\n"
|
|
1392
2009
|
" To fix this, install the psycopg extra:\n\n"
|
|
1393
2010
|
" pip install pytest-neon[psycopg]\n\n"
|
|
1394
|
-
" Or use the '
|
|
1395
|
-
" def test_example(
|
|
2011
|
+
" Or use the 'neon_branch_isolated' fixture with your own driver:\n\n"
|
|
2012
|
+
" def test_example(neon_branch_isolated):\n"
|
|
1396
2013
|
" import your_driver\n"
|
|
1397
|
-
" conn = your_driver.connect(
|
|
2014
|
+
" conn = your_driver.connect(\n"
|
|
2015
|
+
" neon_branch_isolated.connection_string)\n\n"
|
|
1398
2016
|
"═══════════════════════════════════════════════════════════════════\n"
|
|
1399
2017
|
)
|
|
1400
2018
|
|
|
1401
|
-
conn = psycopg.connect(
|
|
2019
|
+
conn = psycopg.connect(neon_branch_isolated.connection_string)
|
|
1402
2020
|
yield conn
|
|
1403
2021
|
conn.rollback()
|
|
1404
2022
|
conn.close()
|
|
1405
2023
|
|
|
1406
2024
|
|
|
1407
2025
|
@pytest.fixture
|
|
1408
|
-
def neon_engine(
|
|
2026
|
+
def neon_engine(neon_branch_isolated: NeonBranch):
|
|
1409
2027
|
"""
|
|
1410
2028
|
Provide a SQLAlchemy engine connected to the test branch.
|
|
1411
2029
|
|
|
@@ -1413,7 +2031,7 @@ def neon_engine(neon_branch: NeonBranch):
|
|
|
1413
2031
|
pip install pytest-neon[sqlalchemy]
|
|
1414
2032
|
|
|
1415
2033
|
The engine is disposed after each test, which handles stale connections
|
|
1416
|
-
after branch resets automatically.
|
|
2034
|
+
after branch resets automatically. Uses neon_branch_isolated for test isolation.
|
|
1417
2035
|
|
|
1418
2036
|
Note:
|
|
1419
2037
|
If you create your own module-level engine instead of using this
|
|
@@ -1445,13 +2063,14 @@ def neon_engine(neon_branch: NeonBranch):
|
|
|
1445
2063
|
" The 'neon_engine' fixture requires SQLAlchemy.\n\n"
|
|
1446
2064
|
" To fix this, install the sqlalchemy extra:\n\n"
|
|
1447
2065
|
" pip install pytest-neon[sqlalchemy]\n\n"
|
|
1448
|
-
" Or use the '
|
|
1449
|
-
" def test_example(
|
|
2066
|
+
" Or use the 'neon_branch_isolated' fixture with your own driver:\n\n"
|
|
2067
|
+
" def test_example(neon_branch_isolated):\n"
|
|
1450
2068
|
" from sqlalchemy import create_engine\n"
|
|
1451
|
-
" engine = create_engine(
|
|
2069
|
+
" engine = create_engine(\n"
|
|
2070
|
+
" neon_branch_isolated.connection_string)\n\n"
|
|
1452
2071
|
"═══════════════════════════════════════════════════════════════════\n"
|
|
1453
2072
|
)
|
|
1454
2073
|
|
|
1455
|
-
engine = create_engine(
|
|
2074
|
+
engine = create_engine(neon_branch_isolated.connection_string)
|
|
1456
2075
|
yield engine
|
|
1457
2076
|
engine.dispose()
|