django-bulk-hooks 0.1.204__py3-none-any.whl → 0.1.205__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of django-bulk-hooks might be problematic. Click here for more details.

@@ -32,82 +32,73 @@ def select_related(*related_fields):
32
32
 
33
33
  def decorator(func):
34
34
  sig = inspect.signature(func)
35
- # Precompute the positional index of 'new_records' to avoid per-call binding
36
- param_names = list(sig.parameters.keys())
37
- new_records_pos = param_names.index("new_records") if "new_records" in param_names else None
38
- # Fail fast on nested fields (not supported)
39
- for f in related_fields:
40
- if "." in f:
41
- raise ValueError(f"@select_related does not support nested fields like '{f}'")
42
35
 
43
36
  @wraps(func)
44
37
  def wrapper(*args, **kwargs):
45
- # Fast retrieval of new_records without full signature binding
46
- new_records = kwargs.get("new_records")
47
- if new_records is None and new_records_pos is not None and len(args) > new_records_pos:
48
- new_records = args[new_records_pos]
49
- if new_records is None:
50
- # Fallback for uncommon signatures
51
- bound = sig.bind_partial(*args, **kwargs)
52
- bound.apply_defaults()
53
- if "new_records" not in bound.arguments:
54
- raise TypeError("@select_related requires a 'new_records' argument in the decorated function")
55
- new_records = bound.arguments["new_records"]
38
+ bound = sig.bind_partial(*args, **kwargs)
39
+ bound.apply_defaults()
40
+
41
+ if "new_records" not in bound.arguments:
42
+ raise TypeError(
43
+ "@preload_related requires a 'new_records' argument in the decorated function"
44
+ )
45
+
46
+ new_records = bound.arguments["new_records"]
56
47
 
57
48
  if not isinstance(new_records, list):
58
- raise TypeError(f"@select_related expects a list of model instances, got {type(new_records)}")
49
+ raise TypeError(
50
+ f"@preload_related expects a list of model instances, got {type(new_records)}"
51
+ )
59
52
 
60
53
  if not new_records:
61
54
  return func(*args, **kwargs)
62
55
 
63
56
  # Determine which instances actually need preloading
64
57
  model_cls = new_records[0].__class__
65
-
66
- # Validate fields once per model class for this call
67
- valid_fields = []
68
- for field in related_fields:
69
- try:
70
- f = model_cls._meta.get_field(field)
71
- if f.is_relation and not f.many_to_many and not f.one_to_many:
72
- valid_fields.append(field)
73
- except FieldDoesNotExist:
74
- continue
75
-
76
- if not valid_fields:
77
- return func(*args, **kwargs)
78
-
79
58
  ids_to_fetch = []
80
59
  for obj in new_records:
81
60
  if obj.pk is None:
82
61
  continue
83
- # If any valid related field is not cached, fetch this object
84
- if any(field not in obj._state.fields_cache for field in valid_fields):
62
+ # if any related field is not already cached on the instance,
63
+ # mark it for fetching
64
+ if any(field not in obj._state.fields_cache for field in related_fields):
85
65
  ids_to_fetch.append(obj.pk)
86
66
 
87
- if not ids_to_fetch:
88
- return func(*args, **kwargs)
89
-
90
- # Deduplicate while preserving order
91
- seen = set()
92
- ids_to_fetch = [i for i in ids_to_fetch if not (i in seen or seen.add(i))]
93
-
94
- # Use the base manager to avoid recursion and preload in one query
95
- fetched = model_cls._base_manager.select_related(*valid_fields).in_bulk(ids_to_fetch)
67
+ fetched = {}
68
+ if ids_to_fetch:
69
+ # Use the base manager to avoid recursion
70
+ fetched = model_cls._base_manager.select_related(*related_fields).in_bulk(ids_to_fetch)
96
71
 
97
72
  for obj in new_records:
98
- if obj.pk not in fetched:
73
+ preloaded = fetched.get(obj.pk)
74
+ if not preloaded:
99
75
  continue
100
- preloaded = fetched[obj.pk]
101
- for field in valid_fields:
76
+ for field in related_fields:
102
77
  if field in obj._state.fields_cache:
78
+ # don't override values that were explicitly set or already loaded
103
79
  continue
104
- rel_obj = getattr(preloaded, field, None)
105
- if rel_obj is None:
80
+ if "." in field:
81
+ raise ValueError(
82
+ f"@preload_related does not support nested fields like '{field}'"
83
+ )
84
+
85
+ try:
86
+ f = model_cls._meta.get_field(field)
87
+ if not (
88
+ f.is_relation and not f.many_to_many and not f.one_to_many
89
+ ):
90
+ continue
91
+ except FieldDoesNotExist:
106
92
  continue
107
- setattr(obj, field, rel_obj)
108
- obj._state.fields_cache[field] = rel_obj
109
93
 
110
- return func(*args, **kwargs)
94
+ try:
95
+ rel_obj = getattr(preloaded, field)
96
+ setattr(obj, field, rel_obj)
97
+ obj._state.fields_cache[field] = rel_obj
98
+ except AttributeError:
99
+ pass
100
+
101
+ return func(*bound.args, **bound.kwargs)
111
102
 
112
103
  return wrapper
113
104
 
@@ -1,7 +1,4 @@
1
1
  import logging
2
- import os
3
- import time
4
- from itertools import repeat
5
2
 
6
3
  from django.core.exceptions import ValidationError
7
4
 
@@ -10,20 +7,6 @@ from django_bulk_hooks.registry import get_hooks
10
7
  logger = logging.getLogger(__name__)
11
8
 
12
9
 
13
- _PROFILE_ENABLED = bool(
14
- int(os.getenv("DJANGO_BULK_HOOKS_PROFILE", os.getenv("BULK_HOOKS_PROFILE", "0")))
15
- )
16
- _PROFILE_MIN_MS = float(os.getenv("DJANGO_BULK_HOOKS_PROFILE_MIN_MS", "0"))
17
-
18
-
19
- def _log_profile(message: str, duration_ms: float | None = None) -> None:
20
- if not _PROFILE_ENABLED:
21
- return
22
- if duration_ms is not None and duration_ms < _PROFILE_MIN_MS:
23
- return
24
- print(f"[bulk_hooks.profile] {message}", flush=True)
25
-
26
-
27
10
  def run(model_cls, event, new_records, old_records=None, ctx=None):
28
11
  """
29
12
  Run hooks for a given model, event, and records.
@@ -32,96 +15,42 @@ def run(model_cls, event, new_records, old_records=None, ctx=None):
32
15
  return
33
16
 
34
17
  # Get hooks for this model and event
35
- t0 = time.perf_counter() if _PROFILE_ENABLED else None
36
18
  hooks = get_hooks(model_cls, event)
37
- if _PROFILE_ENABLED:
38
- dt = (time.perf_counter() - t0) * 1000 if t0 is not None else 0.0
39
- _log_profile(
40
- f"engine.get_hooks model={model_cls.__name__} event={event} took {dt:.2f}ms",
41
- dt,
42
- )
43
19
 
44
20
  if not hooks:
45
21
  return
46
22
 
47
23
  # For BEFORE_* events, run model.clean() first for validation
48
- if event.startswith("before_") and not getattr(ctx, "skip_model_clean", False):
49
- t_clean = time.perf_counter() if _PROFILE_ENABLED else None
24
+ if event.startswith("before_"):
50
25
  for instance in new_records:
51
26
  try:
52
27
  instance.clean()
53
28
  except ValidationError as e:
54
29
  logger.error("Validation failed for %s: %s", instance, e)
55
30
  raise
56
- if _PROFILE_ENABLED:
57
- dt = (time.perf_counter() - t_clean) * 1000 if t_clean is not None else 0.0
58
- _log_profile(
59
- f"engine.model_clean model={model_cls.__name__} event={event} n={len(new_records)} took {dt:.2f}ms",
60
- dt,
61
- )
62
31
 
63
32
  # Process hooks
64
- t_hooks_total = time.perf_counter() if _PROFILE_ENABLED else None
65
33
  for handler_cls, method_name, condition, priority in hooks:
66
34
  handler_instance = handler_cls()
67
35
  func = getattr(handler_instance, method_name)
68
36
 
69
- # Fast path: if no condition, pass-through all records
70
- if not condition:
71
- try:
72
- t_handler = time.perf_counter() if _PROFILE_ENABLED else None
73
- func(
74
- new_records=new_records,
75
- old_records=old_records if old_records and any(old_records) else None,
76
- )
77
- if _PROFILE_ENABLED:
78
- dt = (time.perf_counter() - t_handler) * 1000 if t_handler is not None else 0.0
79
- _log_profile(
80
- f"engine.handler handler={handler_cls.__name__}.{method_name} event={event} n={len(new_records)} took {dt:.2f}ms",
81
- dt,
82
- )
83
- except Exception:
84
- raise
85
- continue
86
-
87
- # Conditional path: select matching records
88
37
  to_process_new = []
89
38
  to_process_old = []
90
39
 
91
- t_select = time.perf_counter() if _PROFILE_ENABLED else None
92
40
  for new, original in zip(
93
41
  new_records,
94
- old_records if old_records is not None else repeat(None),
42
+ old_records or [None] * len(new_records),
43
+ strict=True,
95
44
  ):
96
- if condition.check(new, original):
45
+ if not condition or condition.check(new, original):
97
46
  to_process_new.append(new)
98
47
  to_process_old.append(original)
99
- if _PROFILE_ENABLED:
100
- dt = (time.perf_counter() - t_select) * 1000 if t_select is not None else 0.0
101
- _log_profile(
102
- f"engine.select_records handler={handler_cls.__name__}.{method_name} event={event} n={len(new_records)} selected={len(to_process_new)} took {dt:.2f}ms",
103
- dt,
104
- )
105
48
 
106
49
  if to_process_new:
107
50
  try:
108
- t_handler = time.perf_counter() if _PROFILE_ENABLED else None
109
51
  func(
110
52
  new_records=to_process_new,
111
53
  old_records=to_process_old if any(to_process_old) else None,
112
54
  )
113
- if _PROFILE_ENABLED:
114
- dt = (time.perf_counter() - t_handler) * 1000 if t_handler is not None else 0.0
115
- _log_profile(
116
- f"engine.handler handler={handler_cls.__name__}.{method_name} event={event} n={len(to_process_new)} took {dt:.2f}ms",
117
- dt,
118
- )
119
- except Exception:
55
+ except Exception as e:
120
56
  raise
121
-
122
- if _PROFILE_ENABLED:
123
- dt = (time.perf_counter() - t_hooks_total) * 1000 if t_hooks_total is not None else 0.0
124
- _log_profile(
125
- f"engine.run model={model_cls.__name__} event={event} n={len(new_records)} took {dt:.2f}ms (handlers only)",
126
- dt,
127
- )
@@ -1,7 +1,6 @@
1
1
  import logging
2
2
  import threading
3
3
  from collections import deque
4
- from itertools import zip_longest
5
4
 
6
5
  from django.db import transaction
7
6
 
@@ -32,13 +31,6 @@ def get_hook_queue():
32
31
  return _hook_context.queue
33
32
 
34
33
 
35
- def get_handler_cache():
36
- """Thread-local cache for handler instances, scoped per outermost run."""
37
- if not hasattr(_hook_context, "handler_cache"):
38
- _hook_context.handler_cache = {}
39
- return _hook_context.handler_cache
40
-
41
-
42
34
  class HookContextState:
43
35
  @property
44
36
  def is_before(self):
@@ -112,8 +104,6 @@ class Hook(metaclass=HookMeta):
112
104
  return # nested call, will be processed by outermost
113
105
 
114
106
  # only outermost handle will process the queue
115
- # initialize a fresh handler cache for this run
116
- _hook_context.handler_cache = {}
117
107
  while queue:
118
108
  cls_, event_, model_, new_, old_, kw_ = queue.popleft()
119
109
  cls_._process(event_, model_, new_, old_, **kw_)
@@ -133,56 +123,29 @@ class Hook(metaclass=HookMeta):
133
123
  hook_vars.event = event
134
124
  hook_vars.model = model
135
125
 
136
- # Hooks are already kept sorted by priority in the registry
137
- hooks = get_hooks(model, event)
126
+ hooks = sorted(get_hooks(model, event), key=lambda x: x[3])
138
127
 
139
128
  def _execute():
140
129
  new_local = new_records or []
141
130
  old_local = old_records or []
142
- cache = get_handler_cache()
131
+ if len(old_local) < len(new_local):
132
+ old_local += [None] * (len(new_local) - len(old_local))
143
133
 
144
134
  for handler_cls, method_name, condition, priority in hooks:
145
- # If there's no condition, pass through all records fast
146
- if condition is None:
147
- handler = cache.get(handler_cls)
148
- if handler is None:
149
- handler = handler_cls()
150
- cache[handler_cls] = handler
151
- method = getattr(handler, method_name)
152
- try:
153
- method(
154
- new_records=new_local,
155
- old_records=old_local,
156
- **kwargs,
157
- )
158
- except Exception:
159
- logger.exception(
160
- "Error in hook %s.%s", handler_cls.__name__, method_name
161
- )
162
- continue
163
-
164
- # Filter matching records without allocating full boolean list
165
- to_process_new = []
166
- to_process_old = []
167
- for n, o in zip_longest(new_local, old_local, fillvalue=None):
168
- if n is None:
135
+ if condition is not None:
136
+ checks = [
137
+ condition.check(n, o) for n, o in zip(new_local, old_local)
138
+ ]
139
+ if not any(checks):
169
140
  continue
170
- if condition.check(n, o):
171
- to_process_new.append(n)
172
- to_process_old.append(o)
173
-
174
- if not to_process_new:
175
- continue
176
141
 
177
- handler = cache.get(handler_cls)
178
- if handler is None:
179
- handler = handler_cls()
180
- cache[handler_cls] = handler
142
+ handler = handler_cls()
181
143
  method = getattr(handler, method_name)
144
+
182
145
  try:
183
146
  method(
184
- new_records=to_process_new,
185
- old_records=to_process_old,
147
+ new_records=new_local,
148
+ old_records=old_local,
186
149
  **kwargs,
187
150
  )
188
151
  except Exception:
@@ -202,7 +165,3 @@ class Hook(metaclass=HookMeta):
202
165
  hook_vars.event = None
203
166
  hook_vars.model = None
204
167
  hook_vars.depth -= 1
205
- # Clear cache only when queue is empty (outermost completion)
206
- if not get_hook_queue():
207
- if hasattr(_hook_context, "handler_cache"):
208
- _hook_context.handler_cache.clear()