iceaxe 0.7.0.dev3__tar.gz → 0.7.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of iceaxe might be problematic. Click here for more details.
- {iceaxe-0.7.0.dev3/iceaxe.egg-info → iceaxe-0.7.1}/PKG-INFO +1 -1
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/conftest.py +97 -28
- iceaxe-0.7.1/iceaxe/__tests__/docker_helpers.py +208 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/migrations/test_action_sorter.py +1 -1
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_session.py +32 -30
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/migrations/migration.py +22 -2
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/migrations/migrator.py +13 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/schemas/db_stubs.py +22 -16
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/session_optimized.c +244 -68
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1/iceaxe.egg-info}/PKG-INFO +1 -1
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe.egg-info/SOURCES.txt +1 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/pyproject.toml +2 -2
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/LICENSE +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/MANIFEST.in +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/README.md +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/benchmarks/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/benchmarks/test_bulk_insert.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/benchmarks/test_select.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/conf_models.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/helpers.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/migrations/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/migrations/conftest.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/migrations/test_generator.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/migrations/test_generics.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/mountaineer/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/mountaineer/dependencies/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/mountaineer/dependencies/test_core.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/schemas/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/schemas/test_actions.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/schemas/test_cli.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/schemas/test_db_memory_serializer.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/schemas/test_db_serializer.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/schemas/test_db_stubs.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_alias.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_base.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_comparison.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_field.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_helpers.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_modifications.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_queries.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_queries_str.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/__tests__/test_text_search.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/alias_values.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/base.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/comparison.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/field.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/functions.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/generics.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/io.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/logging.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/migrations/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/migrations/action_sorter.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/migrations/cli.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/migrations/client_io.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/migrations/generator.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/modifications.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/mountaineer/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/mountaineer/cli.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/mountaineer/config.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/mountaineer/dependencies/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/mountaineer/dependencies/core.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/postgres.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/py.typed +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/queries.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/queries_str.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/schemas/__init__.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/schemas/actions.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/schemas/cli.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/schemas/db_memory_serializer.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/schemas/db_serializer.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/session.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/session_optimized.pyx +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/sql_types.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe/typing.py +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe.egg-info/dependency_links.txt +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe.egg-info/requires.txt +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/iceaxe.egg-info/top_level.txt +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/setup.cfg +0 -0
- {iceaxe-0.7.0.dev3 → iceaxe-0.7.1}/setup.py +0 -0
|
@@ -1,84 +1,151 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
1
3
|
import asyncpg
|
|
2
4
|
import pytest
|
|
3
5
|
import pytest_asyncio
|
|
4
6
|
|
|
7
|
+
from iceaxe.__tests__ import docker_helpers
|
|
5
8
|
from iceaxe.base import DBModelMetaclass
|
|
6
9
|
from iceaxe.session import DBConnection
|
|
7
10
|
|
|
11
|
+
# Configure logging
|
|
12
|
+
logging.basicConfig(level=logging.INFO)
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@pytest.fixture(scope="session")
|
|
17
|
+
def docker_postgres():
|
|
18
|
+
"""
|
|
19
|
+
Fixture that creates a PostgreSQL container using the Python Docker API.
|
|
20
|
+
This allows running individual tests without needing Docker Compose.
|
|
21
|
+
"""
|
|
22
|
+
# Create and start a PostgreSQL container
|
|
23
|
+
postgres_container = docker_helpers.PostgresContainer()
|
|
24
|
+
|
|
25
|
+
# Start the container and yield connection details
|
|
26
|
+
connection_info = postgres_container.start()
|
|
27
|
+
yield connection_info
|
|
28
|
+
|
|
29
|
+
# Cleanup: stop the container
|
|
30
|
+
postgres_container.stop()
|
|
31
|
+
|
|
8
32
|
|
|
9
33
|
@pytest_asyncio.fixture
|
|
10
|
-
async def db_connection():
|
|
34
|
+
async def db_connection(docker_postgres):
|
|
35
|
+
"""
|
|
36
|
+
Create a database connection using the PostgreSQL container.
|
|
37
|
+
"""
|
|
11
38
|
conn = DBConnection(
|
|
12
39
|
await asyncpg.connect(
|
|
13
|
-
host="
|
|
14
|
-
port=
|
|
15
|
-
user="
|
|
16
|
-
password="
|
|
17
|
-
database="
|
|
40
|
+
host=docker_postgres["host"],
|
|
41
|
+
port=docker_postgres["port"],
|
|
42
|
+
user=docker_postgres["user"],
|
|
43
|
+
password=docker_postgres["password"],
|
|
44
|
+
database=docker_postgres["database"],
|
|
18
45
|
)
|
|
19
46
|
)
|
|
20
47
|
|
|
21
48
|
# Drop all tables first to ensure clean state
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
49
|
+
known_tables = [
|
|
50
|
+
"artifactdemo",
|
|
51
|
+
"userdemo",
|
|
52
|
+
"complexdemo",
|
|
53
|
+
"article",
|
|
54
|
+
"employee",
|
|
55
|
+
"department",
|
|
56
|
+
"projectassignment",
|
|
57
|
+
"employeemetadata",
|
|
58
|
+
"functiondemomodel",
|
|
59
|
+
"demomodela",
|
|
60
|
+
"demomodelb",
|
|
61
|
+
"jsondemo",
|
|
62
|
+
"complextypedemo",
|
|
63
|
+
]
|
|
64
|
+
known_types = ["statusenum", "employeestatus"]
|
|
65
|
+
|
|
66
|
+
for table in known_tables:
|
|
67
|
+
await conn.conn.execute(f"DROP TABLE IF EXISTS {table} CASCADE", timeout=30.0)
|
|
68
|
+
|
|
69
|
+
for known_type in known_types:
|
|
70
|
+
await conn.conn.execute(
|
|
71
|
+
f"DROP TYPE IF EXISTS {known_type} CASCADE", timeout=30.0
|
|
72
|
+
)
|
|
26
73
|
|
|
27
74
|
# Create tables
|
|
28
|
-
await conn.conn.execute(
|
|
75
|
+
await conn.conn.execute(
|
|
76
|
+
"""
|
|
29
77
|
CREATE TABLE IF NOT EXISTS userdemo (
|
|
30
78
|
id SERIAL PRIMARY KEY,
|
|
31
79
|
name TEXT,
|
|
32
80
|
email TEXT
|
|
33
81
|
)
|
|
34
|
-
"""
|
|
82
|
+
""",
|
|
83
|
+
timeout=30.0,
|
|
84
|
+
)
|
|
35
85
|
|
|
36
|
-
await conn.conn.execute(
|
|
86
|
+
await conn.conn.execute(
|
|
87
|
+
"""
|
|
37
88
|
CREATE TABLE IF NOT EXISTS artifactdemo (
|
|
38
89
|
id SERIAL PRIMARY KEY,
|
|
39
90
|
title TEXT,
|
|
40
91
|
user_id INT REFERENCES userdemo(id)
|
|
41
92
|
)
|
|
42
|
-
"""
|
|
93
|
+
""",
|
|
94
|
+
timeout=30.0,
|
|
95
|
+
)
|
|
43
96
|
|
|
44
|
-
await conn.conn.execute(
|
|
97
|
+
await conn.conn.execute(
|
|
98
|
+
"""
|
|
45
99
|
CREATE TABLE IF NOT EXISTS complexdemo (
|
|
46
100
|
id SERIAL PRIMARY KEY,
|
|
47
101
|
string_list TEXT[],
|
|
48
102
|
json_data JSON
|
|
49
103
|
)
|
|
50
|
-
"""
|
|
104
|
+
""",
|
|
105
|
+
timeout=30.0,
|
|
106
|
+
)
|
|
51
107
|
|
|
52
|
-
await conn.conn.execute(
|
|
108
|
+
await conn.conn.execute(
|
|
109
|
+
"""
|
|
53
110
|
CREATE TABLE IF NOT EXISTS article (
|
|
54
111
|
id SERIAL PRIMARY KEY,
|
|
55
112
|
title TEXT,
|
|
56
113
|
content TEXT,
|
|
57
114
|
summary TEXT
|
|
58
115
|
)
|
|
59
|
-
"""
|
|
116
|
+
""",
|
|
117
|
+
timeout=30.0,
|
|
118
|
+
)
|
|
60
119
|
|
|
61
120
|
# Create each index separately to handle errors better
|
|
62
121
|
yield conn
|
|
63
122
|
|
|
64
123
|
# Drop all tables after tests
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
124
|
+
for table in known_tables:
|
|
125
|
+
await conn.conn.execute(f"DROP TABLE IF EXISTS {table} CASCADE", timeout=30.0)
|
|
126
|
+
|
|
127
|
+
# Drop all types after tests
|
|
128
|
+
for known_type in known_types:
|
|
129
|
+
await conn.conn.execute(
|
|
130
|
+
f"DROP TYPE IF EXISTS {known_type} CASCADE", timeout=30.0
|
|
131
|
+
)
|
|
132
|
+
|
|
69
133
|
await conn.conn.close()
|
|
70
134
|
|
|
71
135
|
|
|
72
136
|
@pytest_asyncio.fixture()
|
|
73
137
|
async def indexed_db_connection(db_connection: DBConnection):
|
|
74
138
|
await db_connection.conn.execute(
|
|
75
|
-
"CREATE INDEX IF NOT EXISTS article_title_tsv_idx ON article USING GIN (to_tsvector('english', title))"
|
|
139
|
+
"CREATE INDEX IF NOT EXISTS article_title_tsv_idx ON article USING GIN (to_tsvector('english', title))",
|
|
140
|
+
timeout=30.0,
|
|
76
141
|
)
|
|
77
142
|
await db_connection.conn.execute(
|
|
78
|
-
"CREATE INDEX IF NOT EXISTS article_content_tsv_idx ON article USING GIN (to_tsvector('english', content))"
|
|
143
|
+
"CREATE INDEX IF NOT EXISTS article_content_tsv_idx ON article USING GIN (to_tsvector('english', content))",
|
|
144
|
+
timeout=30.0,
|
|
79
145
|
)
|
|
80
146
|
await db_connection.conn.execute(
|
|
81
|
-
"CREATE INDEX IF NOT EXISTS article_summary_tsv_idx ON article USING GIN (to_tsvector('english', summary))"
|
|
147
|
+
"CREATE INDEX IF NOT EXISTS article_summary_tsv_idx ON article USING GIN (to_tsvector('english', summary))",
|
|
148
|
+
timeout=30.0,
|
|
82
149
|
)
|
|
83
150
|
|
|
84
151
|
yield db_connection
|
|
@@ -88,7 +155,7 @@ async def indexed_db_connection(db_connection: DBConnection):
|
|
|
88
155
|
async def clear_table(db_connection):
|
|
89
156
|
# Clear all tables and reset sequences
|
|
90
157
|
await db_connection.conn.execute(
|
|
91
|
-
"TRUNCATE TABLE userdemo, article RESTART IDENTITY CASCADE"
|
|
158
|
+
"TRUNCATE TABLE userdemo, article RESTART IDENTITY CASCADE", timeout=30.0
|
|
92
159
|
)
|
|
93
160
|
|
|
94
161
|
|
|
@@ -107,7 +174,8 @@ async def clear_all_database_objects(db_connection: DBConnection):
|
|
|
107
174
|
EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE';
|
|
108
175
|
END LOOP;
|
|
109
176
|
END $$;
|
|
110
|
-
"""
|
|
177
|
+
""",
|
|
178
|
+
timeout=30.0,
|
|
111
179
|
)
|
|
112
180
|
|
|
113
181
|
# Step 2: Drop all custom types in the public schema
|
|
@@ -120,7 +188,8 @@ async def clear_all_database_objects(db_connection: DBConnection):
|
|
|
120
188
|
EXECUTE 'DROP TYPE IF EXISTS ' || quote_ident(r.typname) || ' CASCADE';
|
|
121
189
|
END LOOP;
|
|
122
190
|
END $$;
|
|
123
|
-
"""
|
|
191
|
+
""",
|
|
192
|
+
timeout=30.0,
|
|
124
193
|
)
|
|
125
194
|
|
|
126
195
|
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Docker helper utilities for testing.
|
|
3
|
+
|
|
4
|
+
This module provides classes and functions to manage Docker containers for testing,
|
|
5
|
+
particularly focusing on PostgreSQL database containers.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import socket
|
|
10
|
+
import time
|
|
11
|
+
import uuid
|
|
12
|
+
from typing import Any, Dict, Optional, cast
|
|
13
|
+
|
|
14
|
+
import docker
|
|
15
|
+
from docker.errors import APIError
|
|
16
|
+
|
|
17
|
+
# Configure logging
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def get_free_port() -> int:
|
|
22
|
+
"""Find a free port on the host machine."""
|
|
23
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
24
|
+
s.bind(("", 0))
|
|
25
|
+
return s.getsockname()[1]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class PostgresContainer:
|
|
29
|
+
"""
|
|
30
|
+
A class that manages a PostgreSQL Docker container for testing.
|
|
31
|
+
|
|
32
|
+
This class handles the lifecycle of a PostgreSQL container, including:
|
|
33
|
+
- Starting the container with appropriate configuration
|
|
34
|
+
- Finding available ports
|
|
35
|
+
- Waiting for the container to be ready
|
|
36
|
+
- Providing connection information
|
|
37
|
+
- Cleaning up after tests
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
pg_user: str = "iceaxe",
|
|
43
|
+
pg_password: str = "mysecretpassword",
|
|
44
|
+
pg_db: str = "iceaxe_test_db",
|
|
45
|
+
postgres_version: str = "16",
|
|
46
|
+
):
|
|
47
|
+
self.pg_user = pg_user
|
|
48
|
+
self.pg_password = pg_password
|
|
49
|
+
self.pg_db = pg_db
|
|
50
|
+
self.postgres_version = postgres_version
|
|
51
|
+
self.port = get_free_port()
|
|
52
|
+
self.container: Optional[Any] = None
|
|
53
|
+
self.client = docker.from_env()
|
|
54
|
+
self.container_name = f"iceaxe-postgres-test-{uuid.uuid4().hex[:8]}"
|
|
55
|
+
|
|
56
|
+
def start(self) -> Dict[str, Any]:
|
|
57
|
+
"""
|
|
58
|
+
Start the PostgreSQL container.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
Dict[str, Any]: Connection information for the PostgreSQL container
|
|
62
|
+
|
|
63
|
+
Raises:
|
|
64
|
+
RuntimeError: If the container fails to start or become ready
|
|
65
|
+
"""
|
|
66
|
+
logger.info(f"Starting PostgreSQL container on port {self.port}")
|
|
67
|
+
|
|
68
|
+
max_attempts = 3
|
|
69
|
+
attempt = 0
|
|
70
|
+
|
|
71
|
+
while attempt < max_attempts:
|
|
72
|
+
attempt += 1
|
|
73
|
+
try:
|
|
74
|
+
self.container = self._run_container(self.port)
|
|
75
|
+
break
|
|
76
|
+
except APIError as e:
|
|
77
|
+
if "port is already allocated" in str(e) and attempt < max_attempts:
|
|
78
|
+
logger.warning(
|
|
79
|
+
f"Port {self.port} is still in use. Trying with a new port (attempt {attempt}/{max_attempts})."
|
|
80
|
+
)
|
|
81
|
+
self.port = get_free_port()
|
|
82
|
+
else:
|
|
83
|
+
raise RuntimeError(f"Failed to start PostgreSQL container: {e}")
|
|
84
|
+
|
|
85
|
+
# Wait for PostgreSQL to be ready
|
|
86
|
+
if not self._wait_for_container_ready():
|
|
87
|
+
self.stop()
|
|
88
|
+
raise RuntimeError("Failed to connect to PostgreSQL container")
|
|
89
|
+
|
|
90
|
+
return self.get_connection_info()
|
|
91
|
+
|
|
92
|
+
def _run_container(
|
|
93
|
+
self, port: int
|
|
94
|
+
) -> Any: # Type as Any since docker.models.containers.Container isn't imported
|
|
95
|
+
"""
|
|
96
|
+
Run the Docker container with the specified port.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
port: The port to map PostgreSQL to on the host
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
The Docker container object
|
|
103
|
+
"""
|
|
104
|
+
return self.client.containers.run(
|
|
105
|
+
f"postgres:{self.postgres_version}",
|
|
106
|
+
name=self.container_name,
|
|
107
|
+
detach=True,
|
|
108
|
+
environment={
|
|
109
|
+
"POSTGRES_USER": self.pg_user,
|
|
110
|
+
"POSTGRES_PASSWORD": self.pg_password,
|
|
111
|
+
"POSTGRES_DB": self.pg_db,
|
|
112
|
+
# Additional settings for faster startup in testing
|
|
113
|
+
"POSTGRES_HOST_AUTH_METHOD": "trust",
|
|
114
|
+
},
|
|
115
|
+
ports={"5432/tcp": port},
|
|
116
|
+
remove=True, # Auto-remove container when stopped
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
def _wait_for_container_ready(self) -> bool:
|
|
120
|
+
"""
|
|
121
|
+
Wait for the PostgreSQL container to be ready.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
bool: True if the container is ready, False otherwise
|
|
125
|
+
"""
|
|
126
|
+
max_retries = 30
|
|
127
|
+
retry_interval = 1
|
|
128
|
+
|
|
129
|
+
for i in range(max_retries):
|
|
130
|
+
try:
|
|
131
|
+
if self.container is None:
|
|
132
|
+
logger.warning("Container is None, cannot proceed")
|
|
133
|
+
return False
|
|
134
|
+
|
|
135
|
+
# We've already checked that self.container is not None
|
|
136
|
+
container = cast(Any, self.container)
|
|
137
|
+
container.reload() # Refresh container status
|
|
138
|
+
if container.status != "running":
|
|
139
|
+
logger.warning(f"Container status: {container.status}")
|
|
140
|
+
return False
|
|
141
|
+
|
|
142
|
+
# Try to connect to PostgreSQL
|
|
143
|
+
conn = socket.create_connection(("localhost", self.port), timeout=1)
|
|
144
|
+
conn.close()
|
|
145
|
+
# Wait a bit more to ensure PostgreSQL is fully initialized
|
|
146
|
+
time.sleep(2)
|
|
147
|
+
logger.info(f"PostgreSQL container is ready after {i + 1} attempt(s)")
|
|
148
|
+
return True
|
|
149
|
+
except (socket.error, ConnectionRefusedError) as e:
|
|
150
|
+
if i == max_retries - 1:
|
|
151
|
+
logger.warning(
|
|
152
|
+
f"Failed to connect after {max_retries} attempts: {e}"
|
|
153
|
+
)
|
|
154
|
+
return False
|
|
155
|
+
time.sleep(retry_interval)
|
|
156
|
+
except Exception as e:
|
|
157
|
+
logger.warning(f"Unexpected error checking container readiness: {e}")
|
|
158
|
+
if i == max_retries - 1:
|
|
159
|
+
return False
|
|
160
|
+
time.sleep(retry_interval)
|
|
161
|
+
|
|
162
|
+
return False
|
|
163
|
+
|
|
164
|
+
def stop(self) -> None:
|
|
165
|
+
"""
|
|
166
|
+
Stop the PostgreSQL container.
|
|
167
|
+
|
|
168
|
+
This method ensures the container is properly stopped and removed.
|
|
169
|
+
"""
|
|
170
|
+
if self.container is not None:
|
|
171
|
+
try:
|
|
172
|
+
logger.info(f"Stopping PostgreSQL container {self.container_name}")
|
|
173
|
+
# We've already checked that self.container is not None
|
|
174
|
+
container = cast(Any, self.container)
|
|
175
|
+
container.stop(timeout=10) # Allow 10 seconds for graceful shutdown
|
|
176
|
+
except Exception as e:
|
|
177
|
+
logger.warning(f"Failed to stop container: {e}")
|
|
178
|
+
try:
|
|
179
|
+
# Force remove as a fallback
|
|
180
|
+
if self.container is not None:
|
|
181
|
+
self.container.remove(force=True)
|
|
182
|
+
logger.info("Forced container removal")
|
|
183
|
+
except Exception as e2:
|
|
184
|
+
logger.warning(f"Failed to force remove container: {e2}")
|
|
185
|
+
|
|
186
|
+
def get_connection_info(self) -> Dict[str, Any]:
|
|
187
|
+
"""
|
|
188
|
+
Get the connection information for the PostgreSQL container.
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Dict[str, Any]: A dictionary containing connection parameters
|
|
192
|
+
"""
|
|
193
|
+
return {
|
|
194
|
+
"host": "localhost",
|
|
195
|
+
"port": self.port,
|
|
196
|
+
"user": self.pg_user,
|
|
197
|
+
"password": self.pg_password,
|
|
198
|
+
"database": self.pg_db,
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
def get_connection_string(self) -> str:
|
|
202
|
+
"""
|
|
203
|
+
Get a PostgreSQL connection string.
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
str: A connection string in the format 'postgresql://user:password@host:port/database'
|
|
207
|
+
"""
|
|
208
|
+
return f"postgresql://{self.pg_user}:{self.pg_password}@localhost:{self.port}/{self.pg_db}"
|
|
@@ -19,7 +19,7 @@ class MockNode(DBObject):
|
|
|
19
19
|
async def create(self, actor: DatabaseActions):
|
|
20
20
|
pass
|
|
21
21
|
|
|
22
|
-
async def migrate(self, previous:
|
|
22
|
+
async def migrate(self, previous: DBObject, actor: DatabaseActions):
|
|
23
23
|
pass
|
|
24
24
|
|
|
25
25
|
async def destroy(self, actor: DatabaseActions):
|
|
@@ -780,7 +780,9 @@ async def test_upsert_multiple_conflict_fields(db_connection: DBConnection):
|
|
|
780
780
|
|
|
781
781
|
|
|
782
782
|
@pytest.mark.asyncio
|
|
783
|
-
async def test_for_update_prevents_concurrent_modification(
|
|
783
|
+
async def test_for_update_prevents_concurrent_modification(
|
|
784
|
+
db_connection: DBConnection, docker_postgres
|
|
785
|
+
):
|
|
784
786
|
"""
|
|
785
787
|
Test that FOR UPDATE actually locks the row for concurrent modifications.
|
|
786
788
|
"""
|
|
@@ -799,11 +801,11 @@ async def test_for_update_prevents_concurrent_modification(db_connection: DBConn
|
|
|
799
801
|
# until our transaction is done
|
|
800
802
|
other_conn = DBConnection(
|
|
801
803
|
await asyncpg.connect(
|
|
802
|
-
host="
|
|
803
|
-
port=
|
|
804
|
-
user="
|
|
805
|
-
password="
|
|
806
|
-
database="
|
|
804
|
+
host=docker_postgres["host"],
|
|
805
|
+
port=docker_postgres["port"],
|
|
806
|
+
user=docker_postgres["user"],
|
|
807
|
+
password=docker_postgres["password"],
|
|
808
|
+
database=docker_postgres["database"],
|
|
807
809
|
)
|
|
808
810
|
)
|
|
809
811
|
try:
|
|
@@ -820,7 +822,7 @@ async def test_for_update_prevents_concurrent_modification(db_connection: DBConn
|
|
|
820
822
|
|
|
821
823
|
|
|
822
824
|
@pytest.mark.asyncio
|
|
823
|
-
async def test_for_update_skip_locked(db_connection: DBConnection):
|
|
825
|
+
async def test_for_update_skip_locked(db_connection: DBConnection, docker_postgres):
|
|
824
826
|
"""
|
|
825
827
|
Test that SKIP LOCKED works as expected.
|
|
826
828
|
"""
|
|
@@ -844,11 +846,11 @@ async def test_for_update_skip_locked(db_connection: DBConnection):
|
|
|
844
846
|
# From another connection, try to select both users with SKIP LOCKED
|
|
845
847
|
other_conn = DBConnection(
|
|
846
848
|
await asyncpg.connect(
|
|
847
|
-
host="
|
|
848
|
-
port=
|
|
849
|
-
user="
|
|
850
|
-
password="
|
|
851
|
-
database="
|
|
849
|
+
host=docker_postgres["host"],
|
|
850
|
+
port=docker_postgres["port"],
|
|
851
|
+
user=docker_postgres["user"],
|
|
852
|
+
password=docker_postgres["password"],
|
|
853
|
+
database=docker_postgres["database"],
|
|
852
854
|
)
|
|
853
855
|
)
|
|
854
856
|
try:
|
|
@@ -866,7 +868,7 @@ async def test_for_update_skip_locked(db_connection: DBConnection):
|
|
|
866
868
|
|
|
867
869
|
|
|
868
870
|
@pytest.mark.asyncio
|
|
869
|
-
async def test_for_update_of_with_join(db_connection: DBConnection):
|
|
871
|
+
async def test_for_update_of_with_join(db_connection: DBConnection, docker_postgres):
|
|
870
872
|
"""
|
|
871
873
|
Test FOR UPDATE OF with JOINed tables.
|
|
872
874
|
"""
|
|
@@ -892,11 +894,11 @@ async def test_for_update_of_with_join(db_connection: DBConnection):
|
|
|
892
894
|
# but not the artifact
|
|
893
895
|
other_conn = DBConnection(
|
|
894
896
|
await asyncpg.connect(
|
|
895
|
-
host="
|
|
896
|
-
port=
|
|
897
|
-
user="
|
|
898
|
-
password="
|
|
899
|
-
database="
|
|
897
|
+
host=docker_postgres["host"],
|
|
898
|
+
port=docker_postgres["port"],
|
|
899
|
+
user=docker_postgres["user"],
|
|
900
|
+
password=docker_postgres["password"],
|
|
901
|
+
database=docker_postgres["database"],
|
|
900
902
|
)
|
|
901
903
|
)
|
|
902
904
|
try:
|
|
@@ -1356,7 +1358,7 @@ async def test_batch_upsert_multiple_with_real_db(db_connection: DBConnection):
|
|
|
1356
1358
|
|
|
1357
1359
|
|
|
1358
1360
|
@pytest.mark.asyncio
|
|
1359
|
-
async def test_initialize_types_caching():
|
|
1361
|
+
async def test_initialize_types_caching(docker_postgres):
|
|
1360
1362
|
# Clear the global cache for isolation.
|
|
1361
1363
|
TYPE_CACHE.clear()
|
|
1362
1364
|
|
|
@@ -1374,11 +1376,11 @@ async def test_initialize_types_caching():
|
|
|
1374
1376
|
|
|
1375
1377
|
# Establish the first connection.
|
|
1376
1378
|
conn1 = await asyncpg.connect(
|
|
1377
|
-
host="
|
|
1378
|
-
port=
|
|
1379
|
-
user="
|
|
1380
|
-
password="
|
|
1381
|
-
database="
|
|
1379
|
+
host=docker_postgres["host"],
|
|
1380
|
+
port=docker_postgres["port"],
|
|
1381
|
+
user=docker_postgres["user"],
|
|
1382
|
+
password=docker_postgres["password"],
|
|
1383
|
+
database=docker_postgres["database"],
|
|
1382
1384
|
)
|
|
1383
1385
|
db1 = DBConnection(conn1)
|
|
1384
1386
|
|
|
@@ -1417,11 +1419,11 @@ async def test_initialize_types_caching():
|
|
|
1417
1419
|
|
|
1418
1420
|
# Create a second connection to the same database.
|
|
1419
1421
|
conn2 = await asyncpg.connect(
|
|
1420
|
-
host="
|
|
1421
|
-
port=
|
|
1422
|
-
user="
|
|
1423
|
-
password="
|
|
1424
|
-
database="
|
|
1422
|
+
host=docker_postgres["host"],
|
|
1423
|
+
port=docker_postgres["port"],
|
|
1424
|
+
user=docker_postgres["user"],
|
|
1425
|
+
password=docker_postgres["password"],
|
|
1426
|
+
database=docker_postgres["database"],
|
|
1425
1427
|
)
|
|
1426
1428
|
db2 = DBConnection(conn2)
|
|
1427
1429
|
|
|
@@ -1475,7 +1477,7 @@ async def test_get_dsn(db_connection: DBConnection):
|
|
|
1475
1477
|
assert dsn.startswith("postgresql://")
|
|
1476
1478
|
assert "iceaxe" in dsn
|
|
1477
1479
|
assert "localhost" in dsn
|
|
1478
|
-
assert "
|
|
1480
|
+
assert ":" in dsn # Just verify there is a port
|
|
1479
1481
|
assert "iceaxe_test_db" in dsn
|
|
1480
1482
|
|
|
1481
1483
|
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from abc import abstractmethod
|
|
2
|
+
from contextlib import asynccontextmanager
|
|
2
3
|
|
|
3
4
|
from iceaxe.migrations.migrator import Migrator
|
|
4
5
|
from iceaxe.session import DBConnection
|
|
@@ -20,12 +21,23 @@ class MigrationRevisionBase:
|
|
|
20
21
|
up_revision: str
|
|
21
22
|
down_revision: str | None
|
|
22
23
|
|
|
24
|
+
use_transaction: bool = True
|
|
25
|
+
"""
|
|
26
|
+
Disables the transaction for the current migration. Only do this if you're
|
|
27
|
+
confident that the migration will succeed on the first try, or is otherwise
|
|
28
|
+
independent so it can be run multiple times.
|
|
29
|
+
|
|
30
|
+
This can speed up migrations, and in some cases might be even fully required for your
|
|
31
|
+
production database to avoid deadlocks when interacting with hot tables.
|
|
32
|
+
|
|
33
|
+
"""
|
|
34
|
+
|
|
23
35
|
async def _handle_up(self, db_connection: DBConnection):
|
|
24
36
|
"""
|
|
25
37
|
Internal method to handle the up migration.
|
|
26
38
|
"""
|
|
27
39
|
# Isolated migrator context just for this migration
|
|
28
|
-
async with
|
|
40
|
+
async with self._optional_transaction(db_connection):
|
|
29
41
|
migrator = Migrator(db_connection)
|
|
30
42
|
await self.up(migrator)
|
|
31
43
|
await migrator.set_active_revision(self.up_revision)
|
|
@@ -34,11 +46,19 @@ class MigrationRevisionBase:
|
|
|
34
46
|
"""
|
|
35
47
|
Internal method to handle the down migration.
|
|
36
48
|
"""
|
|
37
|
-
async with
|
|
49
|
+
async with self._optional_transaction(db_connection):
|
|
38
50
|
migrator = Migrator(db_connection)
|
|
39
51
|
await self.down(migrator)
|
|
40
52
|
await migrator.set_active_revision(self.down_revision)
|
|
41
53
|
|
|
54
|
+
@asynccontextmanager
|
|
55
|
+
async def _optional_transaction(self, db_connection: DBConnection):
|
|
56
|
+
if self.use_transaction:
|
|
57
|
+
async with db_connection.transaction():
|
|
58
|
+
yield
|
|
59
|
+
else:
|
|
60
|
+
yield
|
|
61
|
+
|
|
42
62
|
@abstractmethod
|
|
43
63
|
async def up(self, migrator: Migrator):
|
|
44
64
|
"""
|
|
@@ -86,3 +86,16 @@ class Migrator:
|
|
|
86
86
|
|
|
87
87
|
result = await self.db_connection.conn.fetch(query)
|
|
88
88
|
return cast(str | None, result[0]["active_revision"] if result else None)
|
|
89
|
+
|
|
90
|
+
async def raw_sql(self, query: str, *args):
|
|
91
|
+
"""
|
|
92
|
+
Shortcut to execute a raw SQL query against the database. Raw SQL can be more useful
|
|
93
|
+
than using ORM objects within migrations, because you can interact with the old & new data
|
|
94
|
+
schemas via text (whereas the runtime ORM is only aware of the current schema).
|
|
95
|
+
|
|
96
|
+
```python {{sticky: True}}
|
|
97
|
+
await migrator.execute("CREATE TABLE IF NOT EXISTS users (id SERIAL PRIMARY KEY, name VARCHAR(255))")
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
"""
|
|
101
|
+
await self.db_connection.conn.execute(query, *args)
|