iceaxe 0.7.1__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of iceaxe might be problematic. Click here for more details.

Files changed (75) hide show
  1. iceaxe/__init__.py +20 -0
  2. iceaxe/__tests__/__init__.py +0 -0
  3. iceaxe/__tests__/benchmarks/__init__.py +0 -0
  4. iceaxe/__tests__/benchmarks/test_bulk_insert.py +45 -0
  5. iceaxe/__tests__/benchmarks/test_select.py +114 -0
  6. iceaxe/__tests__/conf_models.py +133 -0
  7. iceaxe/__tests__/conftest.py +204 -0
  8. iceaxe/__tests__/docker_helpers.py +208 -0
  9. iceaxe/__tests__/helpers.py +268 -0
  10. iceaxe/__tests__/migrations/__init__.py +0 -0
  11. iceaxe/__tests__/migrations/conftest.py +36 -0
  12. iceaxe/__tests__/migrations/test_action_sorter.py +237 -0
  13. iceaxe/__tests__/migrations/test_generator.py +140 -0
  14. iceaxe/__tests__/migrations/test_generics.py +91 -0
  15. iceaxe/__tests__/mountaineer/__init__.py +0 -0
  16. iceaxe/__tests__/mountaineer/dependencies/__init__.py +0 -0
  17. iceaxe/__tests__/mountaineer/dependencies/test_core.py +76 -0
  18. iceaxe/__tests__/schemas/__init__.py +0 -0
  19. iceaxe/__tests__/schemas/test_actions.py +1264 -0
  20. iceaxe/__tests__/schemas/test_cli.py +25 -0
  21. iceaxe/__tests__/schemas/test_db_memory_serializer.py +1525 -0
  22. iceaxe/__tests__/schemas/test_db_serializer.py +398 -0
  23. iceaxe/__tests__/schemas/test_db_stubs.py +190 -0
  24. iceaxe/__tests__/test_alias.py +83 -0
  25. iceaxe/__tests__/test_base.py +52 -0
  26. iceaxe/__tests__/test_comparison.py +383 -0
  27. iceaxe/__tests__/test_field.py +11 -0
  28. iceaxe/__tests__/test_helpers.py +9 -0
  29. iceaxe/__tests__/test_modifications.py +151 -0
  30. iceaxe/__tests__/test_queries.py +605 -0
  31. iceaxe/__tests__/test_queries_str.py +173 -0
  32. iceaxe/__tests__/test_session.py +1511 -0
  33. iceaxe/__tests__/test_text_search.py +287 -0
  34. iceaxe/alias_values.py +67 -0
  35. iceaxe/base.py +350 -0
  36. iceaxe/comparison.py +560 -0
  37. iceaxe/field.py +250 -0
  38. iceaxe/functions.py +906 -0
  39. iceaxe/generics.py +140 -0
  40. iceaxe/io.py +107 -0
  41. iceaxe/logging.py +91 -0
  42. iceaxe/migrations/__init__.py +5 -0
  43. iceaxe/migrations/action_sorter.py +98 -0
  44. iceaxe/migrations/cli.py +228 -0
  45. iceaxe/migrations/client_io.py +62 -0
  46. iceaxe/migrations/generator.py +404 -0
  47. iceaxe/migrations/migration.py +86 -0
  48. iceaxe/migrations/migrator.py +101 -0
  49. iceaxe/modifications.py +176 -0
  50. iceaxe/mountaineer/__init__.py +10 -0
  51. iceaxe/mountaineer/cli.py +74 -0
  52. iceaxe/mountaineer/config.py +46 -0
  53. iceaxe/mountaineer/dependencies/__init__.py +6 -0
  54. iceaxe/mountaineer/dependencies/core.py +67 -0
  55. iceaxe/postgres.py +133 -0
  56. iceaxe/py.typed +0 -0
  57. iceaxe/queries.py +1455 -0
  58. iceaxe/queries_str.py +294 -0
  59. iceaxe/schemas/__init__.py +0 -0
  60. iceaxe/schemas/actions.py +864 -0
  61. iceaxe/schemas/cli.py +30 -0
  62. iceaxe/schemas/db_memory_serializer.py +705 -0
  63. iceaxe/schemas/db_serializer.py +346 -0
  64. iceaxe/schemas/db_stubs.py +525 -0
  65. iceaxe/session.py +860 -0
  66. iceaxe/session_optimized.c +12035 -0
  67. iceaxe/session_optimized.cpython-313-darwin.so +0 -0
  68. iceaxe/session_optimized.pyx +212 -0
  69. iceaxe/sql_types.py +148 -0
  70. iceaxe/typing.py +73 -0
  71. iceaxe-0.7.1.dist-info/METADATA +261 -0
  72. iceaxe-0.7.1.dist-info/RECORD +75 -0
  73. iceaxe-0.7.1.dist-info/WHEEL +6 -0
  74. iceaxe-0.7.1.dist-info/licenses/LICENSE +21 -0
  75. iceaxe-0.7.1.dist-info/top_level.txt +1 -0
iceaxe/__init__.py ADDED
@@ -0,0 +1,20 @@
1
+ from .alias_values import alias as alias
2
+ from .base import (
3
+ IndexConstraint as IndexConstraint,
4
+ TableBase as TableBase,
5
+ UniqueConstraint as UniqueConstraint,
6
+ )
7
+ from .field import Field as Field
8
+ from .functions import func as func
9
+ from .postgres import PostgresDateTime as PostgresDateTime, PostgresTime as PostgresTime
10
+ from .queries import (
11
+ QueryBuilder as QueryBuilder,
12
+ and_ as and_,
13
+ delete as delete,
14
+ or_ as or_,
15
+ select as select,
16
+ update as update,
17
+ )
18
+ from .queries_str import sql as sql
19
+ from .session import DBConnection as DBConnection
20
+ from .typing import column as column
File without changes
File without changes
@@ -0,0 +1,45 @@
1
+ import time
2
+ from typing import Sequence
3
+
4
+ import pytest
5
+
6
+ from iceaxe.__tests__.conf_models import UserDemo
7
+ from iceaxe.logging import CONSOLE, LOGGER
8
+ from iceaxe.session import DBConnection
9
+
10
+
11
+ def generate_test_users(count: int) -> Sequence[UserDemo]:
12
+ """
13
+ Generate a sequence of test users for bulk insertion.
14
+
15
+ :param count: Number of users to generate
16
+ :return: Sequence of UserDemo instances
17
+ """
18
+ return [
19
+ UserDemo(name=f"User {i}", email=f"user{i}@example.com") for i in range(count)
20
+ ]
21
+
22
+
23
+ @pytest.mark.asyncio
24
+ @pytest.mark.integration_tests
25
+ async def test_bulk_insert_performance(db_connection: DBConnection):
26
+ """
27
+ Test the performance of bulk inserting 500k records.
28
+ """
29
+ NUM_USERS = 500_000
30
+ users = generate_test_users(NUM_USERS)
31
+ LOGGER.info(f"Generated {NUM_USERS} test users")
32
+
33
+ start_time = time.time()
34
+
35
+ await db_connection.insert(users)
36
+
37
+ total_time = time.time() - start_time
38
+ records_per_second = NUM_USERS / total_time
39
+
40
+ CONSOLE.print("\nBulk Insert Performance:")
41
+ CONSOLE.print(f"Total time: {total_time:.2f} seconds")
42
+ CONSOLE.print(f"Records per second: {records_per_second:.2f}")
43
+
44
+ result = await db_connection.conn.fetchval("SELECT COUNT(*) FROM userdemo")
45
+ assert result == NUM_USERS
@@ -0,0 +1,114 @@
1
+ from enum import Enum
2
+ from time import monotonic_ns
3
+ from typing import Any
4
+
5
+ import asyncpg
6
+ import pytest
7
+
8
+ from iceaxe.__tests__.conf_models import UserDemo, run_profile
9
+ from iceaxe.logging import CONSOLE, LOGGER
10
+ from iceaxe.queries import QueryBuilder
11
+ from iceaxe.session import DBConnection
12
+
13
+
14
+ class FetchType(Enum):
15
+ ID = "id"
16
+ OBJ = "obj"
17
+
18
+
19
+ async def insert_users(conn: asyncpg.Connection, num_users: int):
20
+ users = [(f"User {i}", f"user{i}@example.com") for i in range(num_users)]
21
+ await conn.executemany("INSERT INTO userdemo (name, email) VALUES ($1, $2)", users)
22
+
23
+
24
+ async def fetch_users_raw(conn: asyncpg.Connection, fetch_type: FetchType) -> list[Any]:
25
+ if fetch_type == FetchType.OBJ:
26
+ return await conn.fetch("SELECT * FROM userdemo") # type: ignore
27
+ elif fetch_type == FetchType.ID:
28
+ return await conn.fetch("SELECT id FROM userdemo") # type: ignore
29
+ else:
30
+ raise ValueError(f"Invalid run profile: {fetch_type}")
31
+
32
+
33
+ def build_iceaxe_query(fetch_type: FetchType):
34
+ if fetch_type == FetchType.OBJ:
35
+ return QueryBuilder().select(UserDemo)
36
+ elif fetch_type == FetchType.ID:
37
+ return QueryBuilder().select(UserDemo.id)
38
+ else:
39
+ raise ValueError(f"Invalid run profile: {fetch_type}")
40
+
41
+
42
+ @pytest.mark.asyncio
43
+ @pytest.mark.integration_tests
44
+ @pytest.mark.parametrize(
45
+ "fetch_type, allowed_overhead",
46
+ [
47
+ (FetchType.ID, 10),
48
+ (FetchType.OBJ, 800),
49
+ ],
50
+ )
51
+ async def test_benchmark(
52
+ db_connection: DBConnection, request, fetch_type: FetchType, allowed_overhead: float
53
+ ):
54
+ num_users = 500_000
55
+ num_loops = 100
56
+
57
+ # Insert users using raw asyncpg
58
+ await insert_users(db_connection.conn, num_users)
59
+
60
+ # Benchmark raw asyncpg query
61
+ start_time = monotonic_ns()
62
+ raw_results: list[Any] = []
63
+ for _ in range(num_loops):
64
+ raw_results = await fetch_users_raw(db_connection.conn, fetch_type)
65
+ raw_time = monotonic_ns() - start_time
66
+ raw_time_seconds = raw_time / 1e9
67
+ raw_time_per_query = (raw_time / num_loops) / 1e9
68
+
69
+ LOGGER.info(
70
+ f"Raw asyncpg query time: {raw_time_per_query:.4f} (total: {raw_time_seconds:.4f}) seconds"
71
+ )
72
+ CONSOLE.print(
73
+ f"Raw asyncpg query time: {raw_time_per_query:.4f} (total: {raw_time_seconds:.4f}) seconds"
74
+ )
75
+
76
+ # Benchmark DBConnection.exec query
77
+ start_time = monotonic_ns()
78
+ query = build_iceaxe_query(fetch_type)
79
+ db_results: list[UserDemo] | list[int] = []
80
+ for _ in range(num_loops):
81
+ db_results = await db_connection.exec(query)
82
+ db_time = monotonic_ns() - start_time
83
+ db_time_seconds = db_time / 1e9
84
+ db_time_per_query = (db_time / num_loops) / 1e9
85
+
86
+ LOGGER.info(
87
+ f"DBConnection.exec query time: {db_time_per_query:.4f} (total: {db_time_seconds:.4f}) seconds"
88
+ )
89
+ CONSOLE.print(
90
+ f"DBConnection.exec query time: {db_time_per_query:.4f} (total: {db_time_seconds:.4f}) seconds"
91
+ )
92
+
93
+ # Slower than the raw run since we need to run the performance instrumentation
94
+ if False:
95
+ with run_profile(request):
96
+ # Right now we don't cache results so we can run multiple times to get a better measure of samples
97
+ for _ in range(num_loops):
98
+ query = build_iceaxe_query(fetch_type)
99
+ db_results = await db_connection.exec(query)
100
+
101
+ # Compare results
102
+ assert len(raw_results) == len(db_results) == num_users, "Result count mismatch"
103
+
104
+ # Calculate performance difference
105
+ performance_diff = (db_time - raw_time) / raw_time * 100
106
+ LOGGER.info(f"Performance difference: {performance_diff:.2f}%")
107
+ CONSOLE.print(f"Performance difference: {performance_diff:.2f}%")
108
+
109
+ # Assert that DBConnection.exec is at most X% slower than raw query
110
+ assert performance_diff <= allowed_overhead, (
111
+ f"DBConnection.exec is {performance_diff:.2f}% slower than raw query, which exceeds the {allowed_overhead}% threshold"
112
+ )
113
+
114
+ LOGGER.info("Benchmark completed successfully.")
@@ -0,0 +1,133 @@
1
+ from contextlib import contextmanager
2
+ from enum import StrEnum
3
+ from pathlib import Path
4
+ from typing import Any
5
+
6
+ from pyinstrument import Profiler
7
+
8
+ from iceaxe.base import Field, TableBase, UniqueConstraint
9
+
10
+
11
+ class UserDemo(TableBase):
12
+ id: int = Field(primary_key=True, default=None)
13
+ name: str
14
+ email: str
15
+
16
+
17
+ class ArtifactDemo(TableBase):
18
+ id: int = Field(primary_key=True, default=None)
19
+ title: str
20
+ user_id: int = Field(foreign_key="userdemo.id")
21
+
22
+
23
+ class ComplexDemo(TableBase):
24
+ id: int = Field(primary_key=True, default=None)
25
+ string_list: list[str]
26
+ json_data: dict[str, str] = Field(is_json=True)
27
+
28
+
29
+ class Employee(TableBase):
30
+ id: int = Field(primary_key=True, default=None)
31
+ email: str = Field(unique=True)
32
+ first_name: str
33
+ last_name: str
34
+ department: str
35
+ salary: float
36
+
37
+
38
+ class Department(TableBase):
39
+ id: int = Field(primary_key=True, default=None)
40
+ name: str = Field(unique=True)
41
+ budget: float
42
+ location: str
43
+
44
+
45
+ class ProjectAssignment(TableBase):
46
+ id: int = Field(primary_key=True, default=None)
47
+ employee_id: int = Field(foreign_key="employee.id")
48
+ project_name: str
49
+ role: str
50
+ start_date: str
51
+
52
+
53
+ class EmployeeStatus(StrEnum):
54
+ ACTIVE = "active"
55
+ INACTIVE = "inactive"
56
+ ON_LEAVE = "on_leave"
57
+
58
+
59
+ class EmployeeMetadata(TableBase):
60
+ id: int = Field(primary_key=True, default=None)
61
+ employee_id: int = Field(foreign_key="employee.id")
62
+ status: EmployeeStatus
63
+ tags: list[str] = Field(is_json=True)
64
+ additional_info: dict[str, Any] = Field(is_json=True)
65
+
66
+
67
+ class FunctionDemoModel(TableBase):
68
+ id: int = Field(primary_key=True, default=None)
69
+ balance: float
70
+ created_at: str
71
+ birth_date: str
72
+ start_date: str
73
+ end_date: str
74
+ year: int
75
+ month: int
76
+ day: int
77
+ hour: int
78
+ minute: int
79
+ second: int
80
+ years: int
81
+ months: int
82
+ days: int
83
+ weeks: int
84
+ hours: int
85
+ minutes: int
86
+ seconds: int
87
+ name: str
88
+ balance_str: str
89
+ timestamp_str: str
90
+
91
+
92
+ class DemoModelA(TableBase):
93
+ id: int = Field(primary_key=True, default=None)
94
+ name: str
95
+ description: str
96
+ code: str = Field(unique=True)
97
+
98
+
99
+ class DemoModelB(TableBase):
100
+ id: int = Field(primary_key=True, default=None)
101
+ name: str
102
+ category: str
103
+ code: str = Field(unique=True)
104
+
105
+
106
+ class JsonDemo(TableBase):
107
+ """
108
+ Model for testing JSON field updates.
109
+ """
110
+
111
+ id: int | None = Field(primary_key=True, default=None)
112
+ settings: dict[Any, Any] = Field(is_json=True)
113
+ metadata: dict[Any, Any] | None = Field(is_json=True)
114
+ unique_val: str
115
+
116
+ table_args = [UniqueConstraint(columns=["unique_val"])]
117
+
118
+
119
+ @contextmanager
120
+ def run_profile(request):
121
+ TESTS_ROOT = Path.cwd()
122
+ PROFILE_ROOT = TESTS_ROOT / ".profiles"
123
+
124
+ # Turn profiling on
125
+ profiler = Profiler()
126
+ profiler.start()
127
+
128
+ yield # Run test
129
+
130
+ profiler.stop()
131
+ PROFILE_ROOT.mkdir(exist_ok=True)
132
+ results_file = PROFILE_ROOT / f"{request.node.name}.html"
133
+ profiler.write_html(results_file)
@@ -0,0 +1,204 @@
1
+ import logging
2
+
3
+ import asyncpg
4
+ import pytest
5
+ import pytest_asyncio
6
+
7
+ from iceaxe.__tests__ import docker_helpers
8
+ from iceaxe.base import DBModelMetaclass
9
+ from iceaxe.session import DBConnection
10
+
11
+ # Configure logging
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @pytest.fixture(scope="session")
17
+ def docker_postgres():
18
+ """
19
+ Fixture that creates a PostgreSQL container using the Python Docker API.
20
+ This allows running individual tests without needing Docker Compose.
21
+ """
22
+ # Create and start a PostgreSQL container
23
+ postgres_container = docker_helpers.PostgresContainer()
24
+
25
+ # Start the container and yield connection details
26
+ connection_info = postgres_container.start()
27
+ yield connection_info
28
+
29
+ # Cleanup: stop the container
30
+ postgres_container.stop()
31
+
32
+
33
+ @pytest_asyncio.fixture
34
+ async def db_connection(docker_postgres):
35
+ """
36
+ Create a database connection using the PostgreSQL container.
37
+ """
38
+ conn = DBConnection(
39
+ await asyncpg.connect(
40
+ host=docker_postgres["host"],
41
+ port=docker_postgres["port"],
42
+ user=docker_postgres["user"],
43
+ password=docker_postgres["password"],
44
+ database=docker_postgres["database"],
45
+ )
46
+ )
47
+
48
+ # Drop all tables first to ensure clean state
49
+ known_tables = [
50
+ "artifactdemo",
51
+ "userdemo",
52
+ "complexdemo",
53
+ "article",
54
+ "employee",
55
+ "department",
56
+ "projectassignment",
57
+ "employeemetadata",
58
+ "functiondemomodel",
59
+ "demomodela",
60
+ "demomodelb",
61
+ "jsondemo",
62
+ "complextypedemo",
63
+ ]
64
+ known_types = ["statusenum", "employeestatus"]
65
+
66
+ for table in known_tables:
67
+ await conn.conn.execute(f"DROP TABLE IF EXISTS {table} CASCADE", timeout=30.0)
68
+
69
+ for known_type in known_types:
70
+ await conn.conn.execute(
71
+ f"DROP TYPE IF EXISTS {known_type} CASCADE", timeout=30.0
72
+ )
73
+
74
+ # Create tables
75
+ await conn.conn.execute(
76
+ """
77
+ CREATE TABLE IF NOT EXISTS userdemo (
78
+ id SERIAL PRIMARY KEY,
79
+ name TEXT,
80
+ email TEXT
81
+ )
82
+ """,
83
+ timeout=30.0,
84
+ )
85
+
86
+ await conn.conn.execute(
87
+ """
88
+ CREATE TABLE IF NOT EXISTS artifactdemo (
89
+ id SERIAL PRIMARY KEY,
90
+ title TEXT,
91
+ user_id INT REFERENCES userdemo(id)
92
+ )
93
+ """,
94
+ timeout=30.0,
95
+ )
96
+
97
+ await conn.conn.execute(
98
+ """
99
+ CREATE TABLE IF NOT EXISTS complexdemo (
100
+ id SERIAL PRIMARY KEY,
101
+ string_list TEXT[],
102
+ json_data JSON
103
+ )
104
+ """,
105
+ timeout=30.0,
106
+ )
107
+
108
+ await conn.conn.execute(
109
+ """
110
+ CREATE TABLE IF NOT EXISTS article (
111
+ id SERIAL PRIMARY KEY,
112
+ title TEXT,
113
+ content TEXT,
114
+ summary TEXT
115
+ )
116
+ """,
117
+ timeout=30.0,
118
+ )
119
+
120
+ # Create each index separately to handle errors better
121
+ yield conn
122
+
123
+ # Drop all tables after tests
124
+ for table in known_tables:
125
+ await conn.conn.execute(f"DROP TABLE IF EXISTS {table} CASCADE", timeout=30.0)
126
+
127
+ # Drop all types after tests
128
+ for known_type in known_types:
129
+ await conn.conn.execute(
130
+ f"DROP TYPE IF EXISTS {known_type} CASCADE", timeout=30.0
131
+ )
132
+
133
+ await conn.conn.close()
134
+
135
+
136
+ @pytest_asyncio.fixture()
137
+ async def indexed_db_connection(db_connection: DBConnection):
138
+ await db_connection.conn.execute(
139
+ "CREATE INDEX IF NOT EXISTS article_title_tsv_idx ON article USING GIN (to_tsvector('english', title))",
140
+ timeout=30.0,
141
+ )
142
+ await db_connection.conn.execute(
143
+ "CREATE INDEX IF NOT EXISTS article_content_tsv_idx ON article USING GIN (to_tsvector('english', content))",
144
+ timeout=30.0,
145
+ )
146
+ await db_connection.conn.execute(
147
+ "CREATE INDEX IF NOT EXISTS article_summary_tsv_idx ON article USING GIN (to_tsvector('english', summary))",
148
+ timeout=30.0,
149
+ )
150
+
151
+ yield db_connection
152
+
153
+
154
+ @pytest_asyncio.fixture(autouse=True)
155
+ async def clear_table(db_connection):
156
+ # Clear all tables and reset sequences
157
+ await db_connection.conn.execute(
158
+ "TRUNCATE TABLE userdemo, article RESTART IDENTITY CASCADE", timeout=30.0
159
+ )
160
+
161
+
162
+ @pytest_asyncio.fixture
163
+ async def clear_all_database_objects(db_connection: DBConnection):
164
+ """
165
+ Clear all database objects.
166
+ """
167
+ # Step 1: Drop all tables in the public schema
168
+ await db_connection.conn.execute(
169
+ """
170
+ DO $$ DECLARE
171
+ r RECORD;
172
+ BEGIN
173
+ FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'public') LOOP
174
+ EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE';
175
+ END LOOP;
176
+ END $$;
177
+ """,
178
+ timeout=30.0,
179
+ )
180
+
181
+ # Step 2: Drop all custom types in the public schema
182
+ await db_connection.conn.execute(
183
+ """
184
+ DO $$ DECLARE
185
+ r RECORD;
186
+ BEGIN
187
+ FOR r IN (SELECT typname FROM pg_type WHERE typtype = 'e' AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'public')) LOOP
188
+ EXECUTE 'DROP TYPE IF EXISTS ' || quote_ident(r.typname) || ' CASCADE';
189
+ END LOOP;
190
+ END $$;
191
+ """,
192
+ timeout=30.0,
193
+ )
194
+
195
+
196
+ @pytest.fixture
197
+ def clear_registry():
198
+ current_registry = DBModelMetaclass._registry
199
+ DBModelMetaclass._registry = []
200
+
201
+ try:
202
+ yield
203
+ finally:
204
+ DBModelMetaclass._registry = current_registry