iceaxe 0.2.3.dev1__tar.gz → 0.2.3.dev3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/PKG-INFO +1 -1
  2. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/test_queries.py +15 -1
  3. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/test_session.py +50 -0
  4. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/base.py +8 -0
  5. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/queries.py +100 -15
  6. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/schemas/db_memory_serializer.py +1 -6
  7. iceaxe-0.2.3.dev3/iceaxe/session_optimized.pyx +199 -0
  8. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/pyproject.toml +1 -1
  9. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/setup.py +1 -1
  10. iceaxe-0.2.3.dev1/iceaxe/session_optimized.pyx +0 -102
  11. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/LICENSE +0 -0
  12. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/README.md +0 -0
  13. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/build.py +0 -0
  14. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/.DS_Store +0 -0
  15. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__init__.py +0 -0
  16. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/__init__.py +0 -0
  17. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/benchmarks/__init__.py +0 -0
  18. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/benchmarks/test_select.py +0 -0
  19. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/conf_models.py +0 -0
  20. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/conftest.py +0 -0
  21. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/migrations/__init__.py +0 -0
  22. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/migrations/conftest.py +0 -0
  23. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/migrations/test_action_sorter.py +0 -0
  24. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/migrations/test_generator.py +0 -0
  25. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/migrations/test_generics.py +0 -0
  26. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/mountaineer/__init__.py +0 -0
  27. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/mountaineer/dependencies/__init__.py +0 -0
  28. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/mountaineer/dependencies/test_core.py +0 -0
  29. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/schemas/__init__.py +0 -0
  30. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/schemas/test_actions.py +0 -0
  31. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/schemas/test_cli.py +0 -0
  32. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/schemas/test_db_memory_serializer.py +0 -0
  33. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/schemas/test_db_serializer.py +0 -0
  34. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/schemas/test_db_stubs.py +0 -0
  35. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/test_base.py +0 -0
  36. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/test_comparison.py +0 -0
  37. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/__tests__/test_field.py +0 -0
  38. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/comparison.py +0 -0
  39. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/field.py +0 -0
  40. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/functions.py +0 -0
  41. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/generics.py +0 -0
  42. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/io.py +0 -0
  43. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/logging.py +0 -0
  44. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/migrations/__init__.py +0 -0
  45. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/migrations/action_sorter.py +0 -0
  46. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/migrations/cli.py +0 -0
  47. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/migrations/client_io.py +0 -0
  48. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/migrations/generator.py +0 -0
  49. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/migrations/migration.py +0 -0
  50. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/migrations/migrator.py +0 -0
  51. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/mountaineer/__init__.py +0 -0
  52. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/mountaineer/cli.py +0 -0
  53. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/mountaineer/config.py +0 -0
  54. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/mountaineer/dependencies/__init__.py +0 -0
  55. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/mountaineer/dependencies/core.py +0 -0
  56. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/postgres.py +0 -0
  57. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/py.typed +0 -0
  58. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/queries_str.py +0 -0
  59. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/schemas/__init__.py +0 -0
  60. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/schemas/actions.py +0 -0
  61. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/schemas/cli.py +0 -0
  62. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/schemas/db_serializer.py +0 -0
  63. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/schemas/db_stubs.py +0 -0
  64. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/session.py +0 -0
  65. {iceaxe-0.2.3.dev1 → iceaxe-0.2.3.dev3}/iceaxe/typing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: iceaxe
3
- Version: 0.2.3.dev1
3
+ Version: 0.2.3.dev3
4
4
  Summary: A modern, fast ORM for Python.
5
5
  Author: Pierce Freeman
6
6
  Author-email: pierce@freeman.vc
@@ -9,7 +9,11 @@ from iceaxe.queries import QueryBuilder, and_, or_, select
9
9
 
10
10
  def test_select():
11
11
  new_query = QueryBuilder().select(UserDemo)
12
- assert new_query.build() == ('SELECT "userdemo".* FROM "userdemo"', [])
12
+ assert new_query.build() == (
13
+ 'SELECT "userdemo"."id" as "userdemo_id", "userdemo"."name" as '
14
+ '"userdemo_name", "userdemo"."email" as "userdemo_email" FROM "userdemo"',
15
+ [],
16
+ )
13
17
 
14
18
 
15
19
  def test_select_single_field():
@@ -263,3 +267,13 @@ def test_select_multiple_typehints():
263
267
  query = select((UserDemo, UserDemo.id, UserDemo.name))
264
268
  if TYPE_CHECKING:
265
269
  _: QueryBuilder[tuple[UserDemo, int, str], Literal["SELECT"]] = query
270
+
271
+
272
+ def test_allow_branching():
273
+ base_query = select(UserDemo)
274
+
275
+ query_1 = base_query.limit(1)
276
+ query_2 = base_query.limit(2)
277
+
278
+ assert query_1.limit_value == 1
279
+ assert query_2.limit_value == 2
@@ -281,6 +281,30 @@ async def test_select_join(db_connection: DBConnection):
281
281
  ]
282
282
 
283
283
 
284
+ @pytest.mark.asyncio
285
+ async def test_select_join_multiple_tables(db_connection: DBConnection):
286
+ user = UserDemo(name="John Doe", email="john@example.com")
287
+ await db_connection.insert([user])
288
+ assert user.id is not None
289
+
290
+ artifact = ArtifactDemo(title="Artifact 1", user_id=user.id)
291
+ await db_connection.insert([artifact])
292
+
293
+ new_query = (
294
+ QueryBuilder()
295
+ .select((ArtifactDemo, UserDemo))
296
+ .join(UserDemo, UserDemo.id == ArtifactDemo.user_id)
297
+ .where(UserDemo.name == "John Doe")
298
+ )
299
+ result = await db_connection.exec(new_query)
300
+ assert result == [
301
+ (
302
+ ArtifactDemo(id=artifact.id, title="Artifact 1", user_id=user.id),
303
+ UserDemo(id=user.id, name="John Doe", email="john@example.com"),
304
+ )
305
+ ]
306
+
307
+
284
308
  @pytest.mark.asyncio
285
309
  async def test_select_with_limit_and_offset(db_connection: DBConnection):
286
310
  users = [
@@ -418,6 +442,32 @@ async def test_select_with_left_join(db_connection: DBConnection):
418
442
  assert result[1] == ("John", 2)
419
443
 
420
444
 
445
+ @pytest.mark.asyncio
446
+ async def test_select_with_left_join_object(db_connection: DBConnection):
447
+ users = [
448
+ UserDemo(name="John", email="john@example.com"),
449
+ UserDemo(name="Jane", email="jane@example.com"),
450
+ ]
451
+ await db_connection.insert(users)
452
+
453
+ posts = [
454
+ ArtifactDemo(title="John's Post", user_id=users[0].id),
455
+ ArtifactDemo(title="Another Post", user_id=users[0].id),
456
+ ]
457
+ await db_connection.insert(posts)
458
+
459
+ query = (
460
+ QueryBuilder()
461
+ .select((UserDemo, ArtifactDemo))
462
+ .join(ArtifactDemo, UserDemo.id == ArtifactDemo.user_id, "LEFT")
463
+ )
464
+ result = await db_connection.exec(query)
465
+ assert len(result) == 3
466
+ assert result[0] == (users[0], posts[0])
467
+ assert result[1] == (users[0], posts[1])
468
+ assert result[2] == (users[1], None)
469
+
470
+
421
471
  # @pytest.mark.asyncio
422
472
  # async def test_select_with_subquery(db_connection: DBConnection):
423
473
  # users = [
@@ -136,3 +136,11 @@ class TableBase(BaseModel, metaclass=DBModelMetaclass):
136
136
  if cls.table_name == PydanticUndefined:
137
137
  return cls.__name__.lower()
138
138
  return cls.table_name
139
+
140
+ @classmethod
141
+ def get_client_fields(cls):
142
+ return {
143
+ field: info
144
+ for field, info in cls.model_fields.items()
145
+ if field not in INTERNAL_TABLE_FIELDS
146
+ }
@@ -1,5 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from copy import copy
4
+ from functools import wraps
3
5
  from typing import Any, Generic, Literal, Type, TypeVar, TypeVarTuple, cast, overload
4
6
 
5
7
  from iceaxe.base import (
@@ -29,17 +31,20 @@ from iceaxe.typing import (
29
31
 
30
32
  P = TypeVar("P")
31
33
 
32
- T = TypeVar(
33
- "T",
34
- bound=TableBase
34
+ SUPPORTED_SELECTS = (
35
+ TableBase
35
36
  | DBModelMetaclass
36
37
  | ALL_ENUM_TYPES
37
38
  | PRIMITIVE_TYPES
38
39
  | PRIMITIVE_WRAPPER_TYPES
39
40
  | DATE_TYPES
40
41
  | JSON_WRAPPER_FALLBACK
41
- | None,
42
+ | None
42
43
  )
44
+
45
+ T = TypeVar("T", bound=SUPPORTED_SELECTS)
46
+ T2 = TypeVar("T2", bound=SUPPORTED_SELECTS)
47
+ T3 = TypeVar("T3", bound=SUPPORTED_SELECTS)
43
48
  Ts = TypeVarTuple("Ts")
44
49
 
45
50
 
@@ -50,6 +55,22 @@ JoinType = Literal["INNER", "LEFT", "RIGHT", "FULL"]
50
55
  OrderDirection = Literal["ASC", "DESC"]
51
56
 
52
57
 
58
+ def allow_branching(fn):
59
+ """
60
+ Allows query method modifiers to implement their logic as if `self` is being
61
+ modified, but in the background we'll actually return a new instance of the
62
+ query builder to allow for branching of the same underlying query.
63
+
64
+ """
65
+
66
+ @wraps(fn)
67
+ def new_fn(self, *args, **kwargs):
68
+ self = copy(self)
69
+ return fn(self, *args, **kwargs)
70
+
71
+ return new_fn
72
+
73
+
53
74
  class QueryBuilder(Generic[P, QueryType]):
54
75
  """
55
76
  The QueryBuilder owns all construction of the SQL string given
@@ -102,19 +123,44 @@ class QueryBuilder(Generic[P, QueryType]):
102
123
 
103
124
  @overload
104
125
  def select(
105
- self, fields: tuple[T | Type[T], *Ts]
106
- ) -> QueryBuilder[tuple[T, *Ts], Literal["SELECT"]]: ...
126
+ self, fields: tuple[T | Type[T]]
127
+ ) -> QueryBuilder[tuple[T], Literal["SELECT"]]: ...
128
+
129
+ @overload
130
+ def select(
131
+ self, fields: tuple[T | Type[T], T2 | Type[T2]]
132
+ ) -> QueryBuilder[tuple[T, T2], Literal["SELECT"]]: ...
107
133
 
134
+ @overload
108
135
  def select(
109
- self, fields: T | Type[T] | tuple[T | Type[T], *Ts]
136
+ self, fields: tuple[T | Type[T], T2 | Type[T2], T3 | Type[T3], *Ts]
137
+ ) -> QueryBuilder[tuple[T, T2, T3, *Ts], Literal["SELECT"]]: ...
138
+
139
+ @allow_branching
140
+ def select(
141
+ self,
142
+ fields: (
143
+ T
144
+ | Type[T]
145
+ | tuple[T | Type[T]]
146
+ | tuple[T | Type[T], T2 | Type[T2]]
147
+ | tuple[T | Type[T], T2 | Type[T2], T3 | Type[T3], *Ts]
148
+ ),
110
149
  ) -> (
111
- QueryBuilder[tuple[T, *Ts], Literal["SELECT"]]
112
- | QueryBuilder[T, Literal["SELECT"]]
150
+ QueryBuilder[T, Literal["SELECT"]]
151
+ | QueryBuilder[tuple[T], Literal["SELECT"]]
152
+ | QueryBuilder[tuple[T, T2], Literal["SELECT"]]
153
+ | QueryBuilder[tuple[T, T2, T3, *Ts], Literal["SELECT"]]
113
154
  ):
114
155
  """
115
156
  Creates a new select query for the given fields. Returns the same
116
157
  QueryBuilder that is now flagged as a SELECT query.
117
158
 
159
+ Our select @overrides here support the required conversion from a table class (which
160
+ is specified as raw input) to individual instances which are returned. This is only
161
+ relevant for table classes since field selections should be 1:1 mirrored from the
162
+ request field annotation to the response type.
163
+
118
164
  """
119
165
  all_fields: tuple[
120
166
  DBFieldClassDefinition | Type[TableBase] | FunctionMetadata, ...
@@ -165,8 +211,15 @@ class QueryBuilder(Generic[P, QueryType]):
165
211
  self.select_raw.append(field)
166
212
  elif is_base_table(field):
167
213
  table_token = QueryIdentifier(field.get_table_name())
168
- field_token = QueryLiteral("*")
169
- self.select_fields.append(QueryLiteral(f"{table_token}.{field_token}"))
214
+
215
+ for field_name in field.get_client_fields():
216
+ field_token = QueryIdentifier(field_name)
217
+ return_field = QueryIdentifier(
218
+ f"{field.get_table_name()}_{field_name}"
219
+ )
220
+ self.select_fields.append(
221
+ QueryLiteral(f"{table_token}.{field_token} as {return_field}")
222
+ )
170
223
  self.select_raw.append(field)
171
224
  elif is_function_metadata(field):
172
225
  field.local_name = f"aggregate_{self.select_aggregate_count}"
@@ -178,6 +231,7 @@ class QueryBuilder(Generic[P, QueryType]):
178
231
  self.select_raw.append(field)
179
232
  self.select_aggregate_count += 1
180
233
 
234
+ @allow_branching
181
235
  def update(self, model: Type[TableBase]) -> QueryBuilder[None, Literal["UPDATE"]]:
182
236
  """
183
237
  Creates a new update query for the given model. Returns the same
@@ -188,6 +242,7 @@ class QueryBuilder(Generic[P, QueryType]):
188
242
  self.main_model = model
189
243
  return self # type: ignore
190
244
 
245
+ @allow_branching
191
246
  def delete(self, model: Type[TableBase]) -> QueryBuilder[None, Literal["DELETE"]]:
192
247
  """
193
248
  Creates a new delete query for the given model. Returns the same
@@ -198,6 +253,7 @@ class QueryBuilder(Generic[P, QueryType]):
198
253
  self.main_model = model
199
254
  return self # type: ignore
200
255
 
256
+ @allow_branching
201
257
  def where(self, *conditions: bool):
202
258
  """
203
259
  Adds a where condition to the query. The conditions are combined with
@@ -216,6 +272,7 @@ class QueryBuilder(Generic[P, QueryType]):
216
272
  self.where_conditions += validated_comparisons
217
273
  return self
218
274
 
275
+ @allow_branching
219
276
  def order_by(self, field: Any, direction: OrderDirection = "ASC"):
220
277
  """
221
278
  Adds an order by clause to the query. The field must be a column.
@@ -231,6 +288,7 @@ class QueryBuilder(Generic[P, QueryType]):
231
288
  self.order_by_clauses.append(f"{field_token} {direction}")
232
289
  return self
233
290
 
291
+ @allow_branching
234
292
  def join(self, table: Type[TableBase], on: bool, join_type: JoinType = "INNER"):
235
293
  """
236
294
  Adds a join clause to the query. The `on` parameter should be a comparison
@@ -255,6 +313,7 @@ class QueryBuilder(Generic[P, QueryType]):
255
313
  self.join_clauses.append(join_sql)
256
314
  return self
257
315
 
316
+ @allow_branching
258
317
  def set(self, column: T, value: T | None):
259
318
  """
260
319
  Sets a column to a specific value in an update query.
@@ -266,6 +325,7 @@ class QueryBuilder(Generic[P, QueryType]):
266
325
  self.update_values.append((column, value))
267
326
  return self
268
327
 
328
+ @allow_branching
269
329
  def limit(self, value: int):
270
330
  """
271
331
  Limit the number of rows returned by the query. Useful in pagination
@@ -275,6 +335,7 @@ class QueryBuilder(Generic[P, QueryType]):
275
335
  self.limit_value = value
276
336
  return self
277
337
 
338
+ @allow_branching
278
339
  def offset(self, value: int):
279
340
  """
280
341
  Offset the number of rows returned by the query.
@@ -283,6 +344,7 @@ class QueryBuilder(Generic[P, QueryType]):
283
344
  self.offset_value = value
284
345
  return self
285
346
 
347
+ @allow_branching
286
348
  def group_by(self, *fields: Any):
287
349
  """
288
350
  Groups the results of the query by the given fields. This allows
@@ -300,6 +362,7 @@ class QueryBuilder(Generic[P, QueryType]):
300
362
  self.group_by_fields = valid_fields
301
363
  return self
302
364
 
365
+ @allow_branching
303
366
  def having(self, *conditions: bool):
304
367
  """
305
368
  Require the result of an aggregation query like func.sum(MyTable.column)
@@ -317,6 +380,7 @@ class QueryBuilder(Generic[P, QueryType]):
317
380
  self.having_conditions += valid_conditions
318
381
  return self
319
382
 
383
+ @allow_branching
320
384
  def text(self, query: str, *variables: Any):
321
385
  """
322
386
  Override the ORM builder and use a raw SQL query instead.
@@ -460,14 +524,35 @@ def select(fields: T | Type[T]) -> QueryBuilder[T, Literal["SELECT"]]: ...
460
524
 
461
525
  @overload
462
526
  def select(
463
- fields: tuple[T | Type[T], *Ts],
464
- ) -> QueryBuilder[tuple[T, *Ts], Literal["SELECT"]]: ...
527
+ fields: tuple[T | Type[T]],
528
+ ) -> QueryBuilder[tuple[T], Literal["SELECT"]]: ...
529
+
530
+
531
+ @overload
532
+ def select(
533
+ fields: tuple[T | Type[T], T2 | Type[T2]],
534
+ ) -> QueryBuilder[tuple[T, T2], Literal["SELECT"]]: ...
535
+
536
+
537
+ @overload
538
+ def select(
539
+ fields: tuple[T | Type[T], T2 | Type[T2], T3 | Type[T3], *Ts],
540
+ ) -> QueryBuilder[tuple[T, T2, T3, *Ts], Literal["SELECT"]]: ...
465
541
 
466
542
 
467
543
  def select(
468
- fields: T | Type[T] | tuple[T | Type[T], *Ts],
544
+ fields: (
545
+ T
546
+ | Type[T]
547
+ | tuple[T | Type[T]]
548
+ | tuple[T | Type[T], T2 | Type[T2]]
549
+ | tuple[T | Type[T], T2 | Type[T2], T3 | Type[T3], *Ts]
550
+ ),
469
551
  ) -> (
470
- QueryBuilder[tuple[T, *Ts], Literal["SELECT"]] | QueryBuilder[T, Literal["SELECT"]]
552
+ QueryBuilder[T, Literal["SELECT"]]
553
+ | QueryBuilder[tuple[T], Literal["SELECT"]]
554
+ | QueryBuilder[tuple[T, T2], Literal["SELECT"]]
555
+ | QueryBuilder[tuple[T, T2, T3, *Ts], Literal["SELECT"]]
471
556
  ):
472
557
  """
473
558
  Shortcut to create a SELECT query with a new QueryBuilder.
@@ -7,7 +7,6 @@ from uuid import UUID
7
7
  from pydantic_core import PydanticUndefined
8
8
 
9
9
  from iceaxe.base import (
10
- INTERNAL_TABLE_FIELDS,
11
10
  DBFieldInfo,
12
11
  IndexConstraint,
13
12
  TableBase,
@@ -244,11 +243,7 @@ class DatabaseHandler:
244
243
 
245
244
  # Handle the columns
246
245
  all_column_nodes: list[NodeDefinition] = []
247
- for field_name, field in table.model_fields.items():
248
- # Only create user-columns
249
- if field_name in INTERNAL_TABLE_FIELDS:
250
- continue
251
-
246
+ for field_name, field in table.get_client_fields().items():
252
247
  column_nodes = self._yield_nodes(
253
248
  self.convert_column(field_name, field, table), dependencies=table_nodes
254
249
  )
@@ -0,0 +1,199 @@
1
+ from typing import Any, List, Tuple
2
+ from iceaxe.base import TableBase
3
+ from iceaxe.queries import FunctionMetadata
4
+ from json import loads as json_loads
5
+ from cpython.ref cimport PyObject
6
+ from cpython.object cimport PyObject_GetItem
7
+ from libc.stdlib cimport malloc, free
8
+ from libc.string cimport memcpy
9
+ from cpython.ref cimport Py_INCREF, Py_DECREF
10
+
11
+ cdef struct FieldInfo:
12
+ char* name # Field name
13
+ char* select_attribute # Corresponding attribute in the select_raw
14
+ bint is_json # Flag indicating if the field is JSON
15
+
16
+ cdef char* allocate_cstring(bytes data):
17
+ cdef Py_ssize_t length = len(data)
18
+ cdef char* c_str = <char*>malloc((length + 1) * sizeof(char))
19
+ if not c_str:
20
+ raise MemoryError("Failed to allocate memory for C string.")
21
+ memcpy(c_str, <char*>data, length) # Cast bytes to char* for memcpy
22
+ c_str[length] = 0 # Null-terminate the string
23
+ return c_str
24
+
25
+ cdef void free_fields(FieldInfo** fields, Py_ssize_t* num_fields_array, Py_ssize_t num_selects):
26
+ cdef Py_ssize_t j, k
27
+ if fields:
28
+ for j in range(num_selects):
29
+ if fields[j]:
30
+ for k in range(num_fields_array[j]):
31
+ free(fields[j][k].name)
32
+ free(fields[j][k].select_attribute)
33
+ free(fields[j])
34
+ free(fields)
35
+ if num_fields_array:
36
+ free(num_fields_array)
37
+
38
+ cdef FieldInfo** precompute_fields(list select_raws, list select_types, Py_ssize_t num_selects, Py_ssize_t* num_fields_array):
39
+ cdef FieldInfo** fields = <FieldInfo**>malloc(num_selects * sizeof(FieldInfo*))
40
+ cdef Py_ssize_t j, k, num_fields
41
+ cdef dict field_dict
42
+ cdef bytes select_bytes, field_bytes
43
+ cdef char* c_select
44
+ cdef char* c_field
45
+ cdef object select_raw
46
+ cdef bint raw_is_table, raw_is_column, raw_is_function_metadata
47
+
48
+ if not fields:
49
+ raise MemoryError("Failed to allocate memory for fields.")
50
+
51
+ for j in range(num_selects):
52
+ select_raw = select_raws[j]
53
+ raw_is_table, raw_is_column, raw_is_function_metadata = select_types[j]
54
+
55
+ if raw_is_table:
56
+ field_dict = {field: info.is_json for field, info in select_raw.get_client_fields().items() if not info.exclude}
57
+ num_fields = len(field_dict)
58
+ num_fields_array[j] = num_fields
59
+ fields[j] = <FieldInfo*>malloc(num_fields * sizeof(FieldInfo))
60
+ if not fields[j]:
61
+ raise MemoryError("Failed to allocate memory for FieldInfo.")
62
+
63
+ for k, (field, is_json) in enumerate(field_dict.items()):
64
+ select_bytes = f"{select_raw.get_table_name()}_{field}".encode('utf-8')
65
+ c_select = allocate_cstring(select_bytes)
66
+
67
+ field_bytes = field.encode('utf-8')
68
+ c_field = allocate_cstring(field_bytes)
69
+
70
+ fields[j][k].select_attribute = c_select
71
+ fields[j][k].name = c_field
72
+ fields[j][k].is_json = is_json
73
+ else:
74
+ num_fields_array[j] = 0
75
+ fields[j] = NULL
76
+
77
+ return fields
78
+
79
+ cdef list process_values(
80
+ list values,
81
+ FieldInfo** fields,
82
+ Py_ssize_t* num_fields_array,
83
+ list select_raws,
84
+ list select_types,
85
+ Py_ssize_t num_selects
86
+ ):
87
+ cdef Py_ssize_t num_values = len(values)
88
+ cdef list result_all = [None] * num_values
89
+ cdef Py_ssize_t i, j, k, num_fields
90
+ cdef PyObject** result_value
91
+ cdef object value, obj, item
92
+ cdef dict obj_dict
93
+ cdef bint raw_is_table, raw_is_column, raw_is_function_metadata
94
+ cdef char* field_name_c
95
+ cdef char* select_name_c
96
+ cdef str field_name
97
+ cdef str select_name
98
+ cdef object field_value
99
+ cdef object select_raw
100
+ cdef PyObject* temp_obj
101
+ cdef bint all_none
102
+
103
+ for i in range(num_values):
104
+ value = values[i]
105
+ result_value = <PyObject**>malloc(num_selects * sizeof(PyObject*))
106
+ if not result_value:
107
+ raise MemoryError("Failed to allocate memory for result_value.")
108
+ try:
109
+ for j in range(num_selects):
110
+ select_raw = select_raws[j]
111
+ raw_is_table, raw_is_column, raw_is_function_metadata = select_types[j]
112
+
113
+ if raw_is_table:
114
+ obj_dict = {}
115
+ num_fields = num_fields_array[j]
116
+ all_none = True
117
+
118
+ # First pass: collect all fields and check if they're all None
119
+ for k in range(num_fields):
120
+ field_name_c = fields[j][k].name
121
+ select_name_c = fields[j][k].select_attribute
122
+ field_name = field_name_c.decode('utf-8')
123
+ select_name = select_name_c.decode('utf-8')
124
+
125
+ try:
126
+ field_value = value[select_name]
127
+ except KeyError:
128
+ raise KeyError(f"Key '{select_name}' not found in value.")
129
+
130
+ if field_value is not None:
131
+ all_none = False
132
+ if fields[j][k].is_json:
133
+ field_value = json_loads(field_value)
134
+
135
+ obj_dict[field_name] = field_value
136
+
137
+ # If all fields are None, store None instead of creating the table object
138
+ if all_none:
139
+ result_value[j] = <PyObject*>None
140
+ Py_INCREF(None)
141
+ else:
142
+ obj = select_raw(**obj_dict)
143
+ result_value[j] = <PyObject*>obj
144
+ Py_INCREF(obj)
145
+
146
+ elif raw_is_column:
147
+ try:
148
+ item = value[select_raw.key]
149
+ except KeyError:
150
+ raise KeyError(f"Key '{select_raw.key}' not found in value.")
151
+ result_value[j] = <PyObject*>item
152
+ Py_INCREF(item)
153
+
154
+ elif raw_is_function_metadata:
155
+ try:
156
+ item = value[select_raw.local_name]
157
+ except KeyError:
158
+ raise KeyError(f"Key '{select_raw.local_name}' not found in value.")
159
+ result_value[j] = <PyObject*>item
160
+ Py_INCREF(item)
161
+
162
+ # Assemble the result
163
+ if num_selects == 1:
164
+ result_all[i] = <object>result_value[0]
165
+ Py_DECREF(<object>result_value[0])
166
+ else:
167
+ result_tuple = tuple([<object>result_value[j] for j in range(num_selects)])
168
+ for j in range(num_selects):
169
+ Py_DECREF(<object>result_value[j])
170
+ result_all[i] = result_tuple
171
+
172
+ finally:
173
+ free(result_value)
174
+
175
+ return result_all
176
+
177
+ cdef list optimize_casting(list values, list select_raws, list select_types):
178
+ cdef Py_ssize_t num_selects = len(select_raws)
179
+ cdef Py_ssize_t* num_fields_array = <Py_ssize_t*>malloc(num_selects * sizeof(Py_ssize_t))
180
+ cdef FieldInfo** fields
181
+ cdef list result_all
182
+
183
+ if not num_fields_array:
184
+ raise MemoryError("Failed to allocate memory for num_fields_array.")
185
+
186
+ try:
187
+ fields = precompute_fields(select_raws, select_types, num_selects, num_fields_array)
188
+ result_all = process_values(values, fields, num_fields_array, select_raws, select_types, num_selects)
189
+ finally:
190
+ free_fields(fields, num_fields_array, num_selects)
191
+
192
+ return result_all
193
+
194
+ def optimize_exec_casting(
195
+ values: List[Any],
196
+ select_raws: List[Any],
197
+ select_types: List[Tuple[bool, bool, bool]]
198
+ ) -> List[Any]:
199
+ return optimize_casting(values, select_raws, select_types)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "iceaxe"
3
- version = "0.2.3.dev1"
3
+ version = "0.2.3.dev3"
4
4
  description = "A modern, fast ORM for Python."
5
5
  authors = ["Pierce Freeman <pierce@freeman.vc>"]
6
6
  readme = "README.md"
@@ -22,7 +22,7 @@ install_requires = \
22
22
 
23
23
  setup_kwargs = {
24
24
  'name': 'iceaxe',
25
- 'version': '0.2.3.dev1',
25
+ 'version': '0.2.3.dev3',
26
26
  'description': 'A modern, fast ORM for Python.',
27
27
  'long_description': '# iceaxe\n\nA modern, fast ORM for Python. We have the following goals:\n\n- 🏎️ **Performance**: We want to exceed or match the fastest ORMs in Python. We want our ORM\nto be as close as possible to raw-[asyncpg](https://github.com/MagicStack/asyncpg) speeds. See the "Benchmarks" section for more.\n- 📝 **Typehinting**: Everything should be typehinted with expected types. Declare your data as you\nexpect in Python and it should bidirectionally sync to the database.\n- 🐘 **Postgres only**: Leverage native Postgres features and simplify the implementation.\n- ⚡ **Common things are easy, rare things are possible**: 99% of the SQL queries we write are\nvanilla SELECT/INSERT/UPDATEs. These should be natively supported by your ORM. If you\'re writing _really_\ncomplex queries, these are better done by hand so you can see exactly what SQL will be run.\n\nIceaxe is in early alpha. It\'s also an independent project. It\'s compatible with the [Mountaineer](https://github.com/piercefreeman/mountaineer) ecosystem, but you can use it in whatever\nproject and web framework you\'re using.\n\n## Installation\n\nIf you\'re using poetry to manage your dependencies:\n\n```bash\npoetry add iceaxe\n```\n\nOtherwise install with pip:\n\n```bash\npip install iceaxe\n```\n\n## Usage\n\nDefine your models as a `TableBase` subclass:\n\n```python\nfrom iceaxe import TableBase\n\nclass Person(TableBase):\n id: int\n name: str\n age: int\n```\n\nTableBase is a subclass of Pydantic\'s `BaseModel`, so you get all of the validation and Field customization\nout of the box. We provide our own `Field` constructor that adds database-specific configuration. For instance, to make the\n`id` field a primary key / auto-incrementing you can do:\n\n```python\nfrom iceaxe import Field\n\nclass Person(TableBase):\n id: int = Field(primary_key=True)\n name: str\n age: int\n```\n\nOkay now you have a model. How do you interact with it?\n\nDatabases are based on a few core primitives to insert data, update it, and fetch it out again.\nTo do so you\'ll need a _database connection_, which is a connection over the network from your code\nto your Postgres database. The `DBConnection` is the core class for all ORM actions against the database.\n\n```python\nfrom iceaxe import DBConnection\nimport asyncpg\n\nconn = DBConnection(\n await asyncpg.connect(\n host="localhost",\n port=5432,\n user="db_user",\n password="yoursecretpassword",\n database="your_db",\n )\n)\n```\n\nThe Person class currently just lives in memory. To back it with a full\ndatabase table, we can run raw SQL or run a migration to add it:\n\n```python\nawait conn.conn.execute(\n """\n CREATE TABLE IF NOT EXISTS person (\n id SERIAL PRIMARY KEY,\n name TEXT NOT NULL,\n age INT NOT NULL\n )\n """\n)\n```\n\n### Inserting Data\n\nInstantiate object classes as you normally do:\n\n```python\npeople = [\n Person(name="Alice", age=30),\n Person(name="Bob", age=40),\n Person(name="Charlie", age=50),\n]\nawait conn.insert(people)\n\nprint(people[0].id) # 1\nprint(people[1].id) # 2\n```\n\nBecause we\'re using an auto-incrementing primary key, the `id` field will be populated after the insert.\nIceaxe will automatically update the object in place with the newly assigned value.\n\n### Updating data\n\nNow that we have these lovely people, let\'s modify them.\n\n```python\nperson = people[0]\nperson.name = "Blice"\n```\n\nRight now, we have a Python object that\'s out of state with the database. But that\'s often okay. We can inspect it\nand further write logic - it\'s fully decoupled from the database.\n\n```python\ndef ensure_b_letter(person: Person):\n if person.name[0].lower() != "b":\n raise ValueError("Name must start with \'B\'")\n\nensure_b_letter(person)\n```\n\nTo sync the values back to the database, we can call `update`:\n\n```python\nawait conn.update([person])\n```\n\nIf we were to query the database directly, we see that the name has been updated:\n\n```\nid | name | age\n----+-------+-----\n 1 | Blice | 31\n 2 | Bob | 40\n 3 | Charlie | 50\n```\n\nBut no other fields have been touched. This lets a potentially concurrent process\nmodify `Alice`\'s record - say, updating the age to 31. By the time we update the data, we\'ll\nchange the name but nothing else. Under the hood we do this by tracking the fields that\nhave been modified in-memory and creating a targeted UPDATE to modify only those values.\n\n### Selecting data\n\nTo select data, we can use a `QueryBuilder`. For a shortcut to `select` query functions,\nyou can also just import select directly. This method takes the desired value parameters\nand returns a list of the desired objects.\n\n```python\nfrom iceaxe import select\n\nquery = select(Person).where(Person.name == "Blice", Person.age > 25)\nresults = await conn.exec(query)\n```\n\nIf we inspect the typing of `results`, we see that it\'s a `list[Person]` objects. This matches\nthe typehint of the `select` function. You can also target columns directly:\n\n```python\nquery = select((Person.id, Person.name)).where(Person.age > 25)\nresults = await conn.exec(query)\n```\n\nThis will return a list of tuples, where each tuple is the id and name of the person: `list[tuple[int, str]]`.\n\nWe support most of the common SQL operations. Just like the results, these are typehinted\nto their proper types as well. Static typecheckers and your IDE will throw an error if you try to compare\na string column to an integer, for instance. A more complex example of a query:\n\n```python\nquery = select((\n Person.id,\n FavoriteColor,\n)).join(\n FavoriteColor,\n Person.id == FavoriteColor.person_id,\n).where(\n Person.age > 25,\n Person.name == "Blice",\n).order_by(\n Person.age.desc(),\n).limit(10)\nresults = await conn.exec(query)\n```\n\nAs expected this will deliver results - and typehint - as a `list[tuple[int, FavoriteColor]]`\n\n## Production\n\n> [!IMPORTANT]\n> Iceaxe is in early alpha. We\'re using it internally and showly rolling out to our production\napplications, but we\'re not yet ready to recommend it for general use. The API and larger\nstability is subject to change.\n\nNote that underlying Postgres connection wrapped by `conn` will be alive for as long as your object is in memory. This uses up one\nof the allowable connections to your database. Your overall limit depends on your Postgres configuration\nor hosting provider, but most managed solutions top out around 150-300. If you need more concurrent clients\nconnected (and even if you don\'t - connection creation at the Postgres level is expensive), you can adopt\na load balancer like `pgbouncer` to better scale to traffic. More deployment notes to come.\n\nIt\'s also worth noting the absence of request pooling in this initialization. This is a feature of many ORMs that lets you limit\nthe overall connections you make to Postgres, and re-use these over time. We specifically don\'t offer request\npooling as part of Iceaxe, despite being supported by our underlying engine `asyncpg`. This is a bit more\naligned to how things should be structured in production. Python apps are always bound to one process thanks to\nthe GIL. So no matter what your connection pool will always be tied to the current Python process / runtime. When you\'re deploying onto a server with multiple cores, the pool will be duplicated across CPUs and largely defeats the purpose of capping\nnetwork connections in the first place.\n\n## Benchmarking\n\nWe have basic benchmarking tests in the `__tests__/benchmarks` directory. To run them, you\'ll need to execute the pytest suite:\n\n```bash\npoetry run pytest -m integration_tests\n```\n\nCurrent benchmarking as of October 11 2024 is:\n\n| | raw asyncpg | iceaxe | external overhead | |\n|-------------------|-------------|--------|-----------------------------------------------|---|\n| TableBase columns | 0.098s | 0.093s | | |\n| TableBase full | 0.164s | 1.345s | 10%: dict construction | 90%: pydantic overhead | |\n\n## Development\n\nIf you update your Cython implementation during development, you\'ll need to re-compile the Cython code. This can be done with\na simple poetry install. Poetry is set up to create a dynamic `setup.py` based on our `build.py` definition.\n\n```bash\npoetry install\n```\n\n## TODOs\n\n- [ ] Additional documentation with usage examples.\n',
28
28
  'author': 'Pierce Freeman',
@@ -1,102 +0,0 @@
1
- # cython_optimizations.pyx
2
- from typing import Any, List, Tuple, Type
3
- from iceaxe.base import TableBase
4
- from iceaxe.queries import FunctionMetadata
5
- from json import loads as json_loads
6
- from cpython.ref cimport PyObject
7
- from cpython.object cimport PyObject_GetItem
8
- from libc.stdlib cimport malloc, free
9
- from libc.string cimport strcpy
10
-
11
- cdef struct FieldInfo:
12
- char* name
13
- bint is_json
14
-
15
- cdef list optimize_casting(list values, list select_raws, list select_types):
16
- cdef:
17
- Py_ssize_t i, j, k, num_values, num_selects
18
- list result_all
19
- PyObject **result_value
20
- object value, obj, item
21
- tuple select_type
22
- bint raw_is_table, raw_is_column, raw_is_function_metadata
23
- FieldInfo **fields
24
- Py_ssize_t num_fields
25
- dict field_dict
26
- bytes field_bytes
27
- char* c_field_name
28
-
29
- num_values = len(values)
30
- num_selects = len(select_raws)
31
- result_all = [None] * num_values
32
-
33
- # Pre-calculate field information
34
- fields = <FieldInfo**>malloc(num_selects * sizeof(FieldInfo*))
35
- if not fields:
36
- raise MemoryError()
37
-
38
- try:
39
- for j in range(num_selects):
40
- select_raw = select_raws[j]
41
- raw_is_table, raw_is_column, raw_is_function_metadata = select_types[j]
42
- if raw_is_table:
43
- field_dict = {}
44
- for field, info in select_raw.model_fields.items():
45
- if not info.exclude:
46
- field_dict[field] = info.is_json
47
- num_fields = len(field_dict)
48
- fields[j] = <FieldInfo*>malloc(num_fields * sizeof(FieldInfo))
49
- if not fields[j]:
50
- raise MemoryError()
51
- for k, (field, is_json) in enumerate(field_dict.items()):
52
- field_bytes = field.encode('utf-8')
53
- c_field_name = <char*>malloc((len(field_bytes) + 1) * sizeof(char))
54
- if not c_field_name:
55
- raise MemoryError()
56
- strcpy(c_field_name, field_bytes)
57
- fields[j][k].name = c_field_name
58
- fields[j][k].is_json = is_json
59
-
60
- for i in range(num_values):
61
- value = values[i]
62
- result_value = <PyObject**>malloc(num_selects * sizeof(PyObject*))
63
- if not result_value:
64
- raise MemoryError()
65
- try:
66
- for j in range(num_selects):
67
- select_raw = select_raws[j]
68
- raw_is_table, raw_is_column, raw_is_function_metadata = select_types[j]
69
- if raw_is_table:
70
- obj_dict = {}
71
- for k in range(num_fields):
72
- field_name = fields[j][k].name.decode('utf-8')
73
- field_value = PyObject_GetItem(value, field_name)
74
- if fields[j][k].is_json:
75
- field_value = json_loads(field_value)
76
- obj_dict[field_name] = field_value
77
- obj = select_raw(**obj_dict)
78
- result_value[j] = <PyObject*>obj
79
- elif raw_is_column:
80
- item = PyObject_GetItem(value, select_raw.key)
81
- result_value[j] = <PyObject*>item
82
- elif raw_is_function_metadata:
83
- item = PyObject_GetItem(value, select_raw.local_name)
84
- result_value[j] = <PyObject*>item
85
- if num_selects == 1:
86
- result_all[i] = <object>result_value[0]
87
- else:
88
- result_all[i] = tuple([<object>result_value[j] for j in range(num_selects)])
89
- finally:
90
- free(result_value)
91
- finally:
92
- for j in range(num_selects):
93
- if select_types[j][0]: # if raw_is_table
94
- for k in range(num_fields):
95
- free(fields[j][k].name)
96
- free(fields[j])
97
- free(fields)
98
-
99
- return result_all
100
-
101
- def optimize_exec_casting(values: List[Any], select_raw: List[Any], select_types: List[Tuple[bool, bool, bool]]) -> List[Any]:
102
- return optimize_casting(values, select_raw, select_types)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes