ami-djAPI-analyzing 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,19 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Ami
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software.
10
+
11
+ The above copyright notice and this permission notice shall be included in all
12
+ copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
17
+
18
+ IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY.
@@ -0,0 +1,54 @@
1
+ Metadata-Version: 2.4
2
+ Name: ami-djAPI-analyzing
3
+ Version: 0.1.0
4
+ Summary: Lightweight Django middleware for profiling SQL queries, detecting performance issues, and analyzing request execution.
5
+ Author: Ami
6
+ License: MIT
7
+ Keywords: django,django-profiler,sql-profiler,performance,django-middleware,debugging
8
+ Classifier: Framework :: Django
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.9
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Operating System :: OS Independent
15
+ Requires-Python: >=3.9
16
+ Description-Content-Type: text/markdown
17
+ License-File: LICENSE
18
+ Requires-Dist: Django>=3.2
19
+ Requires-Dist: cryptography>=41.0
20
+ Provides-Extra: dev
21
+ Requires-Dist: pytest; extra == "dev"
22
+ Requires-Dist: black; extra == "dev"
23
+ Requires-Dist: flake8; extra == "dev"
24
+ Dynamic: license-file
25
+
26
+ # ami-djAPI-analyzing
27
+
28
+ Lightweight **ami-djAPI-analyzing** for analyzing SQL queries, request performance, and detecting common database issues like **N+1 queries, duplicate queries, slow queries, and missing indexes**.
29
+
30
+ It provides detailed profiling information directly in the response headers and API responses.
31
+
32
+ ---
33
+
34
+ # Features
35
+
36
+ • Capture all SQL queries executed during a request
37
+ • Detect **N+1 queries automatically**
38
+ • Detect **duplicate queries**
39
+ • Detect **slow SQL queries**
40
+ • Detect **potential missing indexes**
41
+ • Analyze **database time vs total response time**
42
+ • **Performance score system (A–F grading)**
43
+ • Track **request history**
44
+ • Capture **memory usage**
45
+ • Generate **EXPLAIN plans for slow queries**
46
+ • Optional **step-by-step view execution profiler**
47
+ • Encrypted performance payload in response headers
48
+
49
+ ---
50
+
51
+ # Installation
52
+
53
+ ```bash
54
+ pip install ami-djAPI-analyzing
@@ -0,0 +1,29 @@
1
+ # ami-djAPI-analyzing
2
+
3
+ Lightweight **ami-djAPI-analyzing** for analyzing SQL queries, request performance, and detecting common database issues like **N+1 queries, duplicate queries, slow queries, and missing indexes**.
4
+
5
+ It provides detailed profiling information directly in the response headers and API responses.
6
+
7
+ ---
8
+
9
+ # Features
10
+
11
+ • Capture all SQL queries executed during a request
12
+ • Detect **N+1 queries automatically**
13
+ • Detect **duplicate queries**
14
+ • Detect **slow SQL queries**
15
+ • Detect **potential missing indexes**
16
+ • Analyze **database time vs total response time**
17
+ • **Performance score system (A–F grading)**
18
+ • Track **request history**
19
+ • Capture **memory usage**
20
+ • Generate **EXPLAIN plans for slow queries**
21
+ • Optional **step-by-step view execution profiler**
22
+ • Encrypted performance payload in response headers
23
+
24
+ ---
25
+
26
+ # Installation
27
+
28
+ ```bash
29
+ pip install ami-djAPI-analyzing
@@ -0,0 +1,54 @@
1
+ Metadata-Version: 2.4
2
+ Name: ami-djAPI-analyzing
3
+ Version: 0.1.0
4
+ Summary: Lightweight Django middleware for profiling SQL queries, detecting performance issues, and analyzing request execution.
5
+ Author: Ami
6
+ License: MIT
7
+ Keywords: django,django-profiler,sql-profiler,performance,django-middleware,debugging
8
+ Classifier: Framework :: Django
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.9
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Operating System :: OS Independent
15
+ Requires-Python: >=3.9
16
+ Description-Content-Type: text/markdown
17
+ License-File: LICENSE
18
+ Requires-Dist: Django>=3.2
19
+ Requires-Dist: cryptography>=41.0
20
+ Provides-Extra: dev
21
+ Requires-Dist: pytest; extra == "dev"
22
+ Requires-Dist: black; extra == "dev"
23
+ Requires-Dist: flake8; extra == "dev"
24
+ Dynamic: license-file
25
+
26
+ # ami-djAPI-analyzing
27
+
28
+ Lightweight **ami-djAPI-analyzing** for analyzing SQL queries, request performance, and detecting common database issues like **N+1 queries, duplicate queries, slow queries, and missing indexes**.
29
+
30
+ It provides detailed profiling information directly in the response headers and API responses.
31
+
32
+ ---
33
+
34
+ # Features
35
+
36
+ • Capture all SQL queries executed during a request
37
+ • Detect **N+1 queries automatically**
38
+ • Detect **duplicate queries**
39
+ • Detect **slow SQL queries**
40
+ • Detect **potential missing indexes**
41
+ • Analyze **database time vs total response time**
42
+ • **Performance score system (A–F grading)**
43
+ • Track **request history**
44
+ • Capture **memory usage**
45
+ • Generate **EXPLAIN plans for slow queries**
46
+ • Optional **step-by-step view execution profiler**
47
+ • Encrypted performance payload in response headers
48
+
49
+ ---
50
+
51
+ # Installation
52
+
53
+ ```bash
54
+ pip install ami-djAPI-analyzing
@@ -0,0 +1,15 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ ami_djAPI_analyzing.egg-info/PKG-INFO
5
+ ami_djAPI_analyzing.egg-info/SOURCES.txt
6
+ ami_djAPI_analyzing.egg-info/dependency_links.txt
7
+ ami_djAPI_analyzing.egg-info/requires.txt
8
+ ami_djAPI_analyzing.egg-info/top_level.txt
9
+ ami_djapi_analyzing/__init__.py
10
+ ami_djapi_analyzing/analyzer.py
11
+ ami_djapi_analyzing/middleware.py
12
+ ami_djapi_analyzing/perf_detector.py
13
+ ami_djapi_analyzing/scorer.py
14
+ ami_djapi_analyzing/sql_collector.py
15
+ ami_djapi_analyzing/steps.py
@@ -0,0 +1,7 @@
1
+ Django>=3.2
2
+ cryptography>=41.0
3
+
4
+ [dev]
5
+ pytest
6
+ black
7
+ flake8
@@ -0,0 +1 @@
1
+ ami_djapi_analyzing
@@ -0,0 +1 @@
1
+ __version__ = "0.1.0"
@@ -0,0 +1,151 @@
1
+ import re
2
+ from collections import defaultdict
3
+
4
+
5
+ SLOW_QUERY_THRESHOLD = 0.05
6
+
7
+
8
+ def normalize_sql(sql: str):
9
+ """
10
+ Normalize SQL to detect duplicates.
11
+ """
12
+ sql = re.sub(r"\s+", " ", sql)
13
+ sql = re.sub(r"=\s*[\d']+", "= ?", sql)
14
+ return sql.strip().lower()
15
+
16
+
17
+ def detect_duplicate_queries(queries):
18
+
19
+ counter = defaultdict(list)
20
+
21
+ for q in queries:
22
+ key = normalize_sql(q["sql"])
23
+ counter[key].append(q)
24
+
25
+ duplicates = []
26
+
27
+ for key, items in counter.items():
28
+ if len(items) > 1:
29
+ duplicates.append({
30
+ "query": key,
31
+ "count": len(items),
32
+ "total_time": sum(q["time"] for q in items)
33
+ })
34
+
35
+ return duplicates
36
+
37
+
38
+ def detect_n_plus_one(queries):
39
+
40
+ grouped = defaultdict(list)
41
+
42
+ for q in queries:
43
+ model = q.get("model")
44
+ if model:
45
+ grouped[model].append(q)
46
+
47
+ n_plus_one = []
48
+
49
+ for model, items in grouped.items():
50
+ if len(items) > 5:
51
+ n_plus_one.append({
52
+ "model": model,
53
+ "count": len(items),
54
+ "suggestion": "Consider using select_related or prefetch_related"
55
+ })
56
+
57
+ return n_plus_one
58
+
59
+
60
+ def detect_slow_queries(queries):
61
+
62
+ return [
63
+ q for q in queries
64
+ if q["time"] > SLOW_QUERY_THRESHOLD
65
+ ]
66
+
67
+
68
+ def detect_missing_indexes(queries):
69
+
70
+ problems = []
71
+
72
+ for q in queries:
73
+
74
+ sql = q["sql"].lower()
75
+
76
+ if "where" in sql and "id" not in sql:
77
+ problems.append({
78
+ "sql": q["sql"],
79
+ "reason": "Possible missing index on filtered column"
80
+ })
81
+
82
+ return problems
83
+
84
+
85
+ def build_suggestions(n_plus_one, duplicates, slow_queries, index_problems):
86
+
87
+ suggestions = []
88
+
89
+ if n_plus_one:
90
+ suggestions.append(
91
+ "Detected potential N+1 queries. Use select_related or prefetch_related."
92
+ )
93
+
94
+ if duplicates:
95
+ suggestions.append(
96
+ "Duplicate queries detected. Consider caching or query optimization."
97
+ )
98
+
99
+ if slow_queries:
100
+ suggestions.append(
101
+ "Slow queries detected. Add indexes or optimize joins."
102
+ )
103
+
104
+ if index_problems:
105
+ suggestions.append(
106
+ "Queries filtering non-indexed columns detected."
107
+ )
108
+
109
+ return suggestions
110
+
111
+
112
+ def analyze_queries(queries, response_time):
113
+
114
+ duplicate_queries = detect_duplicate_queries(queries)
115
+
116
+ n_plus_one = detect_n_plus_one(queries)
117
+
118
+ slow_queries = detect_slow_queries(queries)
119
+
120
+ index_problems = detect_missing_indexes(queries)
121
+
122
+ suggestions = build_suggestions(
123
+ n_plus_one,
124
+ duplicate_queries,
125
+ slow_queries,
126
+ index_problems
127
+ )
128
+
129
+ total_db_time = round(sum(q["time"] for q in queries), 4)
130
+
131
+ return {
132
+
133
+ "query_count": len(queries),
134
+
135
+ "total_db_time": total_db_time,
136
+
137
+ "duplicate_details": duplicate_queries,
138
+ "duplicate_query_count": len(duplicate_queries),
139
+
140
+ "n_plus_one_details": n_plus_one,
141
+ "n_plus_one_count": len(n_plus_one),
142
+
143
+ "slow_queries": slow_queries,
144
+ "slow_query_count": len(slow_queries),
145
+
146
+ "missing_index_warnings": index_problems,
147
+
148
+ "optimization_suggestions": suggestions,
149
+
150
+ "is_slow_endpoint": response_time > 1.0,
151
+ }
@@ -0,0 +1,83 @@
1
+ import time
2
+ import tracemalloc
3
+ from collections import deque
4
+ from django.db import connections
5
+ from django.conf import settings
6
+
7
+ from .analyzer import analyze_queries
8
+ from .scorer import calculate_score
9
+ from .sql_collector import QueryCaptureWrapper, generate_key, encrypt_data
10
+ import base64
11
+
12
+
13
+ REQUEST_HISTORY = deque(maxlen=50)
14
+
15
+ class PerformanceMiddleware:
16
+ def __init__(self, get_response):
17
+ self.get_response = get_response
18
+
19
+ def __call__(self, request):
20
+ wrapper = QueryCaptureWrapper()
21
+ tracemalloc.start()
22
+ start_time = time.perf_counter()
23
+
24
+ # SQL Capture
25
+ with connections["default"].execute_wrapper(wrapper):
26
+ response = self.get_response(request)
27
+
28
+ current, peak = tracemalloc.get_traced_memory()
29
+ tracemalloc.stop()
30
+
31
+ total_response_time = round(time.perf_counter() - start_time, 4)
32
+ queries = wrapper.queries
33
+
34
+ analysis = analyze_queries(queries, total_response_time)
35
+ score_data = calculate_score(analysis)
36
+
37
+ slow_queries = sorted(queries, key=lambda q: q["time"], reverse=True)[:5]
38
+
39
+ memory_data = {
40
+ "current_memory_bytes": current,
41
+ "peak_memory_bytes": peak
42
+ }
43
+
44
+
45
+ REQUEST_HISTORY.append({
46
+ "path": request.path,
47
+ "method": request.method,
48
+ "response_time": total_response_time,
49
+ "query_count": analysis["query_count"],
50
+ })
51
+ history = list(REQUEST_HISTORY)
52
+
53
+ # Build agent payload
54
+ agent_payload = {
55
+ "summary": {
56
+ "query_count": analysis["query_count"],
57
+ "db_time": analysis["total_db_time"],
58
+ "response_time": total_response_time,
59
+ "score": score_data["score"],
60
+ "grade": score_data["grade"],
61
+ },
62
+ "sql_queries": queries,
63
+ "top_slow_queries": slow_queries,
64
+ "sql_analysis": {
65
+ "duplicate_queries": analysis["duplicate_details"],
66
+ "n_plus_one_queries": analysis["n_plus_one_details"],
67
+ "slow_query_count": analysis["slow_query_count"],
68
+ "is_slow_endpoint": analysis["is_slow_endpoint"],
69
+ },
70
+ "memory_usage": memory_data,
71
+ "request_history": history,
72
+
73
+ }
74
+
75
+ # Encrypt agent payload
76
+ key = generate_key()
77
+ encrypted_payload = encrypt_data(agent_payload, key)
78
+ response["agent_encrypted"] = encrypted_payload
79
+ response["X-Encryption-Key"] = base64.b64encode(key).decode("utf-8")
80
+ response["X-Debug-Mode"] = str(settings.DEBUG)
81
+ response["X-Profiler-Enabled"] = "True"
82
+
83
+ return response
@@ -0,0 +1,52 @@
1
+ # perf.py
2
+
3
+ import re
4
+ from collections import defaultdict, Counter
5
+
6
+
7
+ def detect_n_plus_one(queries, threshold=5):
8
+ """
9
+ Detect repeated similar queries (N+1).
10
+ Normalizes numeric values.
11
+ """
12
+
13
+ normalized = defaultdict(list)
14
+
15
+ for q in queries:
16
+ sql = q["sql"]
17
+
18
+ # Normalize numbers to detect pattern
19
+ normalized_sql = re.sub(r"\b\d+\b", "?", sql)
20
+
21
+ normalized[normalized_sql].append(sql)
22
+
23
+ warnings = [
24
+ {
25
+ "query_sample": key[:120],
26
+ "count": len(val)
27
+ }
28
+ for key, val in normalized.items()
29
+ if len(val) >= threshold
30
+ ]
31
+
32
+ return warnings
33
+
34
+
35
+ def detect_duplicate_queries(queries):
36
+ """
37
+ Detect exact duplicate SQL queries.
38
+ """
39
+
40
+ sql_list = [q["sql"] for q in queries]
41
+ counter = Counter(sql_list)
42
+
43
+ duplicates = [
44
+ {
45
+ "query_sample": sql[:120],
46
+ "count": count
47
+ }
48
+ for sql, count in counter.items()
49
+ if count > 1
50
+ ]
51
+
52
+ return duplicates
@@ -0,0 +1,43 @@
1
+ def calculate_score(analysis):
2
+
3
+ score = 100
4
+
5
+ if analysis["query_count"] > 20:
6
+ score -= 25
7
+ elif analysis["query_count"] > 10:
8
+ score -= 10
9
+
10
+ if analysis["total_db_time"] > 1.0:
11
+ score -= 20
12
+ elif analysis["total_db_time"] > 0.5:
13
+ score -= 10
14
+
15
+ if analysis["n_plus_one_count"] > 0:
16
+ score -= 20
17
+
18
+ if analysis["duplicate_query_count"] > 0:
19
+ score -= 10
20
+
21
+ if analysis["is_slow_endpoint"]:
22
+ score -= 20
23
+
24
+ score = max(score, 0)
25
+
26
+ return {
27
+ "score": score,
28
+ "grade": grade_from_score(score),
29
+ }
30
+
31
+
32
+ def grade_from_score(score):
33
+
34
+ if score >= 90:
35
+ return "A"
36
+ elif score >= 75:
37
+ return "B"
38
+ elif score >= 60:
39
+ return "C"
40
+ elif score >= 40:
41
+ return "D"
42
+ else:
43
+ return "F"
@@ -0,0 +1,120 @@
1
+ import time
2
+ import traceback
3
+ import re
4
+ from django.db import connections
5
+ from django.conf import settings
6
+ import os
7
+ import base64
8
+ import json
9
+ from cryptography.hazmat.primitives.ciphers.aead import AESGCM
10
+
11
+
12
+ def detect_model_from_sql(sql):
13
+ match = re.search(r'FROM\s+"?([\w_]+)"?', sql, re.IGNORECASE)
14
+ if match:
15
+ return match.group(1)
16
+ return None
17
+
18
+
19
+ def get_raw_sql(sql, params):
20
+ """
21
+ Return exact SQL executed by DB using cursor.mogrify()
22
+ """
23
+ try:
24
+ from django.db import connection
25
+
26
+ with connection.cursor() as cursor:
27
+ raw_sql = cursor.mogrify(sql, params)
28
+
29
+ if isinstance(raw_sql, bytes):
30
+ raw_sql = raw_sql.decode("utf-8")
31
+
32
+ return raw_sql
33
+
34
+ except Exception:
35
+ return sql
36
+
37
+
38
+ class QueryCaptureWrapper:
39
+
40
+ def __init__(self):
41
+ self.queries = []
42
+
43
+ def _stack_trace(self):
44
+
45
+ if not settings.DEBUG:
46
+ return []
47
+
48
+ stack = traceback.extract_stack()[:-3]
49
+
50
+ return [
51
+ {
52
+ "file": frame.filename,
53
+ "line": frame.lineno,
54
+ "function": frame.name,
55
+ }
56
+ for frame in stack[-6:]
57
+ ]
58
+
59
+ def explain_query(self, sql, params):
60
+
61
+ if not sql.lower().startswith("select"):
62
+ return []
63
+
64
+ try:
65
+ with connections["default"].cursor() as cursor:
66
+ cursor.execute(f"EXPLAIN {sql}", params or [])
67
+ plan = cursor.fetchall()
68
+ return [str(p) for p in plan]
69
+ except Exception:
70
+ return []
71
+
72
+ def __call__(self, execute, sql, params, many, context):
73
+
74
+ start = time.perf_counter()
75
+
76
+ try:
77
+ return execute(sql, params, many, context)
78
+
79
+ finally:
80
+
81
+ duration = time.perf_counter() - start
82
+ model = detect_model_from_sql(sql)
83
+
84
+ raw_sql = get_raw_sql(sql, params)
85
+
86
+ query_data = {
87
+ "sql": raw_sql,
88
+ "params": params,
89
+ "time": round(duration, 6),
90
+ "model": model,
91
+ "many": many,
92
+ "stack_trace": self._stack_trace(),
93
+ }
94
+
95
+ if duration > 0.05:
96
+ query_data["explain_plan"] = self.explain_query(sql, params)
97
+
98
+ self.queries.append(query_data)
99
+
100
+
101
+ def generate_key() -> bytes:
102
+ """Generate a 32-byte AES key"""
103
+ return AESGCM.generate_key(bit_length=256)
104
+
105
+ def encrypt_data(data: dict, key: bytes) -> str:
106
+ """Encrypt JSON-serializable dict and return base64 string"""
107
+ aesgcm = AESGCM(key)
108
+ nonce = os.urandom(12) # 12-byte nonce
109
+ plaintext = json.dumps(data, default=str).encode("utf-8")
110
+ ciphertext = aesgcm.encrypt(nonce, plaintext, None)
111
+ return base64.b64encode(nonce + ciphertext).decode("utf-8")
112
+
113
+ def decrypt_data(encrypted_b64: str, key_b64: str) -> dict:
114
+ """Decrypt base64 string back to dict"""
115
+ key = base64.b64decode(key_b64)
116
+ raw = base64.b64decode(encrypted_b64)
117
+ nonce, ciphertext = raw[:12], raw[12:]
118
+ aesgcm = AESGCM(key)
119
+ plaintext = aesgcm.decrypt(nonce, ciphertext, None)
120
+ return json.loads(plaintext)
@@ -0,0 +1,130 @@
1
+ import functools
2
+ import sys
3
+ import time
4
+ import linecache
5
+ import os
6
+
7
+
8
+ class StepProfiler:
9
+
10
+ def __init__(self, target_file=None, max_steps=2000):
11
+ self.target_file = target_file
12
+ self.max_steps = max_steps
13
+ self.steps = []
14
+ self.start_time = None
15
+ self._original_trace = None
16
+
17
+ def _trace(self, frame, event, arg):
18
+
19
+ try:
20
+ if event != "line":
21
+ return self._trace
22
+
23
+ filename = os.path.basename(frame.f_code.co_filename)
24
+
25
+ # Only track API file
26
+ if filename != self.target_file:
27
+ return self._trace
28
+
29
+ if len(self.steps) >= self.max_steps:
30
+ return self._trace
31
+
32
+ lineno = frame.f_lineno
33
+ func_name = frame.f_code.co_name
34
+ code_line = linecache.getline(frame.f_code.co_filename, lineno).strip()
35
+
36
+ step = {
37
+ "file": filename,
38
+ "function": func_name,
39
+ "line_no": lineno,
40
+ "code": code_line,
41
+ "time_from_start": round(time.time() - self.start_time, 6),
42
+ }
43
+
44
+ self.steps.append(step)
45
+
46
+ except Exception:
47
+ pass
48
+
49
+ return self._trace
50
+
51
+ def start(self):
52
+ self.steps = []
53
+ self.start_time = time.time()
54
+ self._original_trace = sys.gettrace()
55
+ sys.settrace(self._trace)
56
+
57
+ def stop(self):
58
+ sys.settrace(self._original_trace)
59
+ return self.steps
60
+
61
+ def human_readable_time(seconds: float) -> str:
62
+
63
+ if seconds <= 0:
64
+ return "0 ms"
65
+
66
+ if seconds < 0.001:
67
+ return f"{round(seconds * 1_000_000, 2)} (Microseconds)"
68
+
69
+ elif seconds < 1:
70
+ return f"{round(seconds * 1000, 3)} ms (Milliseconds)"
71
+
72
+ else:
73
+ return f"{round(seconds, 3)} s (Seconds)"
74
+
75
+ def format_steps(steps):
76
+ formatted = []
77
+ prev_time = None
78
+
79
+ for i, step in enumerate(steps, start=1):
80
+
81
+ current_time = step.get("time_from_start", 0)
82
+
83
+ if prev_time is None:
84
+ step_time = 0
85
+ else:
86
+ step_time = current_time - prev_time
87
+
88
+ prev_time = current_time
89
+
90
+ formatted.append({
91
+ "step": i,
92
+ "file": step.get("file"),
93
+ "function": step.get("function"),
94
+ "line_no": step.get("line_no"),
95
+ "code": step.get("code"),
96
+ "execution_time": human_readable_time(step_time)
97
+ })
98
+
99
+ return formatted
100
+
101
+
102
+ def with_steps(view_func):
103
+
104
+ @functools.wraps(view_func)
105
+ def wrapper(*args, **kwargs):
106
+
107
+ view_file = os.path.basename(view_func.__code__.co_filename)
108
+
109
+ profiler = StepProfiler(target_file=view_file)
110
+
111
+
112
+ profiler.start()
113
+
114
+ try:
115
+ response = view_func(*args, **kwargs)
116
+ finally:
117
+ steps = profiler.stop()
118
+
119
+ formatted_steps = format_steps(steps)
120
+
121
+ if hasattr(response, "data") and isinstance(response.data, dict):
122
+
123
+ response.data["profiler"] = {
124
+ "total_steps": len(formatted_steps),
125
+ "steps": formatted_steps,
126
+ }
127
+
128
+ return response
129
+
130
+ return wrapper
@@ -0,0 +1,51 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "ami-djAPI-analyzing"
7
+ version = "0.1.0"
8
+ description = "Lightweight Django middleware for profiling SQL queries, detecting performance issues, and analyzing request execution."
9
+ readme = "README.md"
10
+ requires-python = ">=3.9"
11
+
12
+ authors = [
13
+ { name = "Ami" }
14
+ ]
15
+
16
+ license = { text = "MIT" }
17
+
18
+ keywords = [
19
+ "django",
20
+ "django-profiler",
21
+ "sql-profiler",
22
+ "performance",
23
+ "django-middleware",
24
+ "debugging"
25
+ ]
26
+
27
+ dependencies = [
28
+ "Django>=3.2",
29
+ "cryptography>=41.0"
30
+ ]
31
+
32
+ classifiers = [
33
+ "Framework :: Django",
34
+ "Programming Language :: Python :: 3",
35
+ "Programming Language :: Python :: 3.9",
36
+ "Programming Language :: Python :: 3.10",
37
+ "Programming Language :: Python :: 3.11",
38
+ "License :: OSI Approved :: MIT License",
39
+ "Operating System :: OS Independent",
40
+ ]
41
+
42
+ [tool.setuptools.packages.find]
43
+ where = ["."]
44
+ include = ["ami_djapi_analyzing*"]
45
+
46
+ [project.optional-dependencies]
47
+ dev = [
48
+ "pytest",
49
+ "black",
50
+ "flake8"
51
+ ]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+