gwc-pybundle 2.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gwc-pybundle might be problematic. Click here for more details.
- gwc_pybundle-2.1.2.dist-info/METADATA +903 -0
- gwc_pybundle-2.1.2.dist-info/RECORD +82 -0
- gwc_pybundle-2.1.2.dist-info/WHEEL +5 -0
- gwc_pybundle-2.1.2.dist-info/entry_points.txt +2 -0
- gwc_pybundle-2.1.2.dist-info/licenses/LICENSE.md +25 -0
- gwc_pybundle-2.1.2.dist-info/top_level.txt +1 -0
- pybundle/__init__.py +0 -0
- pybundle/__main__.py +4 -0
- pybundle/cli.py +546 -0
- pybundle/context.py +404 -0
- pybundle/doctor.py +148 -0
- pybundle/filters.py +228 -0
- pybundle/manifest.py +77 -0
- pybundle/packaging.py +45 -0
- pybundle/policy.py +132 -0
- pybundle/profiles.py +454 -0
- pybundle/roadmap_model.py +42 -0
- pybundle/roadmap_scan.py +328 -0
- pybundle/root_detect.py +14 -0
- pybundle/runner.py +180 -0
- pybundle/steps/__init__.py +26 -0
- pybundle/steps/ai_context.py +791 -0
- pybundle/steps/api_docs.py +219 -0
- pybundle/steps/asyncio_analysis.py +358 -0
- pybundle/steps/bandit.py +72 -0
- pybundle/steps/base.py +20 -0
- pybundle/steps/blocking_call_detection.py +291 -0
- pybundle/steps/call_graph.py +219 -0
- pybundle/steps/compileall.py +76 -0
- pybundle/steps/config_docs.py +319 -0
- pybundle/steps/config_validation.py +302 -0
- pybundle/steps/container_image.py +294 -0
- pybundle/steps/context_expand.py +272 -0
- pybundle/steps/copy_pack.py +293 -0
- pybundle/steps/coverage.py +101 -0
- pybundle/steps/cprofile_step.py +166 -0
- pybundle/steps/dependency_sizes.py +136 -0
- pybundle/steps/django_checks.py +214 -0
- pybundle/steps/dockerfile_lint.py +282 -0
- pybundle/steps/dockerignore.py +311 -0
- pybundle/steps/duplication.py +103 -0
- pybundle/steps/env_completeness.py +269 -0
- pybundle/steps/env_var_usage.py +253 -0
- pybundle/steps/error_refs.py +204 -0
- pybundle/steps/event_loop_patterns.py +280 -0
- pybundle/steps/exception_patterns.py +190 -0
- pybundle/steps/fastapi_integration.py +250 -0
- pybundle/steps/flask_debugging.py +312 -0
- pybundle/steps/git_analytics.py +315 -0
- pybundle/steps/handoff_md.py +176 -0
- pybundle/steps/import_time.py +175 -0
- pybundle/steps/interrogate.py +106 -0
- pybundle/steps/license_scan.py +96 -0
- pybundle/steps/line_profiler.py +117 -0
- pybundle/steps/link_validation.py +287 -0
- pybundle/steps/logging_analysis.py +233 -0
- pybundle/steps/memory_profile.py +176 -0
- pybundle/steps/migration_history.py +336 -0
- pybundle/steps/mutation_testing.py +141 -0
- pybundle/steps/mypy.py +103 -0
- pybundle/steps/orm_optimization.py +316 -0
- pybundle/steps/pip_audit.py +45 -0
- pybundle/steps/pipdeptree.py +62 -0
- pybundle/steps/pylance.py +562 -0
- pybundle/steps/pytest.py +66 -0
- pybundle/steps/query_pattern_analysis.py +334 -0
- pybundle/steps/radon.py +161 -0
- pybundle/steps/repro_md.py +161 -0
- pybundle/steps/rg_scans.py +78 -0
- pybundle/steps/roadmap.py +153 -0
- pybundle/steps/ruff.py +117 -0
- pybundle/steps/secrets_detection.py +235 -0
- pybundle/steps/security_headers.py +309 -0
- pybundle/steps/shell.py +74 -0
- pybundle/steps/slow_tests.py +178 -0
- pybundle/steps/sqlalchemy_validation.py +269 -0
- pybundle/steps/test_flakiness.py +184 -0
- pybundle/steps/tree.py +116 -0
- pybundle/steps/type_coverage.py +277 -0
- pybundle/steps/unused_deps.py +211 -0
- pybundle/steps/vulture.py +167 -0
- pybundle/tools.py +63 -0
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Step: Logging Analysis
|
|
3
|
+
Analyze logging calls and log level distribution.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import ast
|
|
7
|
+
import re
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, List
|
|
10
|
+
|
|
11
|
+
from .base import Step, StepResult
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LoggingAnalysisStep(Step):
|
|
15
|
+
"""Analyze logging calls and log level distribution in Python code."""
|
|
16
|
+
|
|
17
|
+
name = "logging analysis"
|
|
18
|
+
|
|
19
|
+
def run(self, ctx: "BundleContext") -> StepResult: # type: ignore[name-defined]
|
|
20
|
+
"""Extract logging calls and analyze log level distribution."""
|
|
21
|
+
import time
|
|
22
|
+
|
|
23
|
+
start = time.time()
|
|
24
|
+
|
|
25
|
+
root = ctx.root
|
|
26
|
+
python_files = sorted(root.rglob("*.py"))
|
|
27
|
+
if not python_files:
|
|
28
|
+
return StepResult(self.name, "SKIP", int(time.time() - start), "No Python files found")
|
|
29
|
+
|
|
30
|
+
# Track logging patterns
|
|
31
|
+
log_levels: Dict[str, List[str]] = {
|
|
32
|
+
"debug": [],
|
|
33
|
+
"info": [],
|
|
34
|
+
"warning": [],
|
|
35
|
+
"error": [],
|
|
36
|
+
"critical": [],
|
|
37
|
+
"exception": [],
|
|
38
|
+
}
|
|
39
|
+
logging_configs = []
|
|
40
|
+
logger_names = set()
|
|
41
|
+
analyzed_files = 0
|
|
42
|
+
|
|
43
|
+
# Common logging patterns to detect
|
|
44
|
+
logging_import_pattern = re.compile(r"import\s+logging")
|
|
45
|
+
getlogger_pattern = re.compile(r"logging\.getLogger\(['\"]?([^'\")\s]+)?['\"]?\)")
|
|
46
|
+
basicconfig_pattern = re.compile(r"logging\.basicConfig\(")
|
|
47
|
+
|
|
48
|
+
for py_file in python_files:
|
|
49
|
+
# Skip non-user code
|
|
50
|
+
if any(
|
|
51
|
+
part in py_file.parts
|
|
52
|
+
for part in [
|
|
53
|
+
"venv",
|
|
54
|
+
".venv",
|
|
55
|
+
"env",
|
|
56
|
+
"site-packages",
|
|
57
|
+
"__pycache__",
|
|
58
|
+
".git",
|
|
59
|
+
"node_modules",
|
|
60
|
+
]
|
|
61
|
+
):
|
|
62
|
+
continue
|
|
63
|
+
|
|
64
|
+
analyzed_files += 1
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
source = py_file.read_text(encoding="utf-8", errors="ignore")
|
|
68
|
+
|
|
69
|
+
# Check for logging usage
|
|
70
|
+
if "logging" not in source and "logger" not in source:
|
|
71
|
+
continue
|
|
72
|
+
|
|
73
|
+
rel_path = py_file.relative_to(root)
|
|
74
|
+
|
|
75
|
+
# Find logging configurations
|
|
76
|
+
if basicconfig_pattern.search(source):
|
|
77
|
+
logging_configs.append(str(rel_path))
|
|
78
|
+
|
|
79
|
+
# Find logger names
|
|
80
|
+
for match in getlogger_pattern.finditer(source):
|
|
81
|
+
logger_name = match.group(1) or "__main__"
|
|
82
|
+
logger_names.add(logger_name)
|
|
83
|
+
|
|
84
|
+
# Parse AST to find logging calls
|
|
85
|
+
try:
|
|
86
|
+
tree = ast.parse(source, str(py_file))
|
|
87
|
+
|
|
88
|
+
for node in ast.walk(tree):
|
|
89
|
+
if isinstance(node, ast.Call):
|
|
90
|
+
# Check if it's a logging call
|
|
91
|
+
level = self._extract_log_level(node)
|
|
92
|
+
if level:
|
|
93
|
+
location = f"{rel_path}:{node.lineno}"
|
|
94
|
+
log_levels[level].append(location)
|
|
95
|
+
|
|
96
|
+
except SyntaxError:
|
|
97
|
+
continue
|
|
98
|
+
|
|
99
|
+
except (UnicodeDecodeError, OSError):
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
# Calculate statistics
|
|
103
|
+
total_logs = sum(len(locations) for locations in log_levels.values())
|
|
104
|
+
|
|
105
|
+
# Generate report
|
|
106
|
+
lines = [
|
|
107
|
+
"=" * 80,
|
|
108
|
+
"LOGGING ANALYSIS",
|
|
109
|
+
"=" * 80,
|
|
110
|
+
"",
|
|
111
|
+
f"Total Python files analyzed: {analyzed_files}",
|
|
112
|
+
f"Total logging calls found: {total_logs}",
|
|
113
|
+
f"Logging configurations found: {len(logging_configs)}",
|
|
114
|
+
f"Unique logger names: {len(logger_names)}",
|
|
115
|
+
"",
|
|
116
|
+
]
|
|
117
|
+
|
|
118
|
+
# Log level distribution
|
|
119
|
+
if total_logs > 0:
|
|
120
|
+
lines.extend(
|
|
121
|
+
[
|
|
122
|
+
"=" * 80,
|
|
123
|
+
"LOG LEVEL DISTRIBUTION",
|
|
124
|
+
"=" * 80,
|
|
125
|
+
"",
|
|
126
|
+
]
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
for level in ["debug", "info", "warning", "error", "critical", "exception"]:
|
|
130
|
+
count = len(log_levels[level])
|
|
131
|
+
percentage = (count / total_logs * 100) if total_logs > 0 else 0
|
|
132
|
+
lines.append(f"{level.upper():12} {count:6} ({percentage:5.1f}%)")
|
|
133
|
+
|
|
134
|
+
lines.append("")
|
|
135
|
+
|
|
136
|
+
# Show sample locations for each level
|
|
137
|
+
lines.extend(
|
|
138
|
+
[
|
|
139
|
+
"=" * 80,
|
|
140
|
+
"LOG LEVEL LOCATIONS (sample)",
|
|
141
|
+
"=" * 80,
|
|
142
|
+
"",
|
|
143
|
+
]
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
for level in ["debug", "info", "warning", "error", "critical", "exception"]:
|
|
147
|
+
locations = log_levels[level]
|
|
148
|
+
if locations:
|
|
149
|
+
lines.append(f"{level.upper()}: {len(locations)} occurrence(s)")
|
|
150
|
+
for loc in locations[:5]: # Show first 5
|
|
151
|
+
lines.append(f" - {loc}")
|
|
152
|
+
if len(locations) > 5:
|
|
153
|
+
lines.append(f" ... and {len(locations) - 5} more")
|
|
154
|
+
lines.append("")
|
|
155
|
+
|
|
156
|
+
# Logger names
|
|
157
|
+
if logger_names:
|
|
158
|
+
lines.extend(
|
|
159
|
+
[
|
|
160
|
+
"=" * 80,
|
|
161
|
+
"LOGGER NAMES",
|
|
162
|
+
"=" * 80,
|
|
163
|
+
"",
|
|
164
|
+
]
|
|
165
|
+
)
|
|
166
|
+
for name in sorted(logger_names):
|
|
167
|
+
lines.append(f" - {name}")
|
|
168
|
+
lines.append("")
|
|
169
|
+
|
|
170
|
+
# Logging configurations
|
|
171
|
+
if logging_configs:
|
|
172
|
+
lines.extend(
|
|
173
|
+
[
|
|
174
|
+
"=" * 80,
|
|
175
|
+
"LOGGING CONFIGURATIONS (basicConfig)",
|
|
176
|
+
"=" * 80,
|
|
177
|
+
"",
|
|
178
|
+
]
|
|
179
|
+
)
|
|
180
|
+
for config in logging_configs:
|
|
181
|
+
lines.append(f" - {config}")
|
|
182
|
+
lines.append("")
|
|
183
|
+
|
|
184
|
+
# Recommendations
|
|
185
|
+
lines.extend(
|
|
186
|
+
[
|
|
187
|
+
"=" * 80,
|
|
188
|
+
"RECOMMENDATIONS",
|
|
189
|
+
"=" * 80,
|
|
190
|
+
"",
|
|
191
|
+
]
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
if total_logs == 0:
|
|
195
|
+
lines.append(" - Consider adding logging to improve debuggability")
|
|
196
|
+
else:
|
|
197
|
+
debug_pct = len(log_levels["debug"]) / total_logs * 100 if total_logs > 0 else 0
|
|
198
|
+
info_pct = len(log_levels["info"]) / total_logs * 100 if total_logs > 0 else 0
|
|
199
|
+
|
|
200
|
+
if debug_pct > 50:
|
|
201
|
+
lines.append(" - High percentage of DEBUG logs; consider reducing in production")
|
|
202
|
+
if info_pct < 10 and total_logs > 10:
|
|
203
|
+
lines.append(" - Low INFO logging; consider adding more informational logs")
|
|
204
|
+
if len(log_levels["exception"]) == 0 and len(log_levels["error"]) > 0:
|
|
205
|
+
lines.append(" - Consider using logger.exception() in exception handlers for stack traces")
|
|
206
|
+
|
|
207
|
+
lines.append("")
|
|
208
|
+
|
|
209
|
+
# Write report
|
|
210
|
+
output = "\n".join(lines)
|
|
211
|
+
dest = ctx.workdir / "meta" / "102_logging_analysis.txt"
|
|
212
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
213
|
+
dest.write_text(output, encoding="utf-8")
|
|
214
|
+
|
|
215
|
+
elapsed = int(time.time() - start)
|
|
216
|
+
return StepResult(self.name, "OK", elapsed, "")
|
|
217
|
+
|
|
218
|
+
def _extract_log_level(self, node: ast.Call) -> str:
|
|
219
|
+
"""Extract log level from logging call."""
|
|
220
|
+
# Check for logger.level() or logging.level()
|
|
221
|
+
if isinstance(node.func, ast.Attribute):
|
|
222
|
+
method = node.func.attr.lower()
|
|
223
|
+
if method in ["debug", "info", "warning", "error", "critical", "exception"]:
|
|
224
|
+
# Verify it's a logging call
|
|
225
|
+
if isinstance(node.func.value, ast.Name):
|
|
226
|
+
var_name = node.func.value.id.lower()
|
|
227
|
+
if "log" in var_name:
|
|
228
|
+
return method
|
|
229
|
+
elif isinstance(node.func.value, ast.Attribute):
|
|
230
|
+
# logging.getLogger().debug()
|
|
231
|
+
return method
|
|
232
|
+
|
|
233
|
+
return ""
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memory profiling with tracemalloc - Milestone 3 (v1.4.0)
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import subprocess
|
|
8
|
+
import time
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
from .base import StepResult
|
|
13
|
+
from ..context import BundleContext
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class MemoryProfileStep:
|
|
18
|
+
"""
|
|
19
|
+
Memory profiling using tracemalloc to identify memory-consuming operations.
|
|
20
|
+
|
|
21
|
+
Outputs:
|
|
22
|
+
- logs/62_memory_profile.txt: Top memory-consuming functions and allocations
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
name: str = "memory_profile"
|
|
26
|
+
|
|
27
|
+
def run(self, ctx: BundleContext) -> StepResult:
|
|
28
|
+
start = time.time()
|
|
29
|
+
|
|
30
|
+
if ctx.options.no_profile or not ctx.options.profile_memory:
|
|
31
|
+
return StepResult(self.name, "SKIP", 0, "memory profiling not enabled")
|
|
32
|
+
|
|
33
|
+
# Require pytest for memory profiling
|
|
34
|
+
if not ctx.tools.pytest:
|
|
35
|
+
return StepResult(self.name, "SKIP", 0, "pytest not found")
|
|
36
|
+
|
|
37
|
+
tests_dir = ctx.root / "tests"
|
|
38
|
+
if not tests_dir.is_dir():
|
|
39
|
+
return StepResult(self.name, "SKIP", 0, "no tests/ directory")
|
|
40
|
+
|
|
41
|
+
ctx.emit(" Running memory profiling on test suite")
|
|
42
|
+
|
|
43
|
+
# Create a temporary script to run pytest with tracemalloc
|
|
44
|
+
profile_script = self._create_profile_script(ctx.root)
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
# Run the profiling script
|
|
48
|
+
result = subprocess.run(
|
|
49
|
+
[str(ctx.tools.python), str(profile_script)],
|
|
50
|
+
cwd=ctx.root,
|
|
51
|
+
capture_output=True,
|
|
52
|
+
text=True,
|
|
53
|
+
timeout=300, # 5 minute timeout
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# Write output
|
|
57
|
+
output_file = ctx.workdir / "logs" / "62_memory_profile.txt"
|
|
58
|
+
output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
59
|
+
|
|
60
|
+
with output_file.open("w") as f:
|
|
61
|
+
f.write("=" * 70 + "\n")
|
|
62
|
+
f.write("MEMORY PROFILING (tracemalloc)\n")
|
|
63
|
+
f.write("=" * 70 + "\n\n")
|
|
64
|
+
|
|
65
|
+
if result.returncode == 0:
|
|
66
|
+
f.write(result.stdout)
|
|
67
|
+
if result.stderr:
|
|
68
|
+
f.write("\n\nTest output:\n")
|
|
69
|
+
f.write(result.stderr)
|
|
70
|
+
else:
|
|
71
|
+
f.write("Memory profiling failed\n\n")
|
|
72
|
+
f.write("STDOUT:\n")
|
|
73
|
+
f.write(result.stdout)
|
|
74
|
+
f.write("\n\nSTDERR:\n")
|
|
75
|
+
f.write(result.stderr)
|
|
76
|
+
|
|
77
|
+
elapsed = int((time.time() - start) * 1000)
|
|
78
|
+
if result.returncode == 0:
|
|
79
|
+
return StepResult(self.name, "OK", elapsed)
|
|
80
|
+
else:
|
|
81
|
+
return StepResult(
|
|
82
|
+
self.name, "FAIL", elapsed, f"exit {result.returncode}"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
except subprocess.TimeoutExpired:
|
|
86
|
+
elapsed = int((time.time() - start) * 1000)
|
|
87
|
+
return StepResult(self.name, "FAIL", elapsed, "timeout")
|
|
88
|
+
except Exception as e:
|
|
89
|
+
elapsed = int((time.time() - start) * 1000)
|
|
90
|
+
return StepResult(self.name, "FAIL", elapsed, str(e))
|
|
91
|
+
finally:
|
|
92
|
+
# Clean up temporary script
|
|
93
|
+
if profile_script.exists():
|
|
94
|
+
profile_script.unlink()
|
|
95
|
+
|
|
96
|
+
def _create_profile_script(self, root: Path) -> Path:
|
|
97
|
+
"""Create a temporary Python script that runs pytest with tracemalloc"""
|
|
98
|
+
script_path = root / ".pybundle_memory_profile.py"
|
|
99
|
+
|
|
100
|
+
script_content = '''"""Temporary memory profiling script for pybundle"""
|
|
101
|
+
import tracemalloc
|
|
102
|
+
import sys
|
|
103
|
+
import pytest
|
|
104
|
+
|
|
105
|
+
def format_size(size_bytes):
|
|
106
|
+
"""Format bytes as human-readable string"""
|
|
107
|
+
for unit in ['B', 'KB', 'MB', 'GB']:
|
|
108
|
+
if size_bytes < 1024.0:
|
|
109
|
+
return f"{size_bytes:.2f} {unit}"
|
|
110
|
+
size_bytes /= 1024.0
|
|
111
|
+
return f"{size_bytes:.2f} TB"
|
|
112
|
+
|
|
113
|
+
# Start tracing
|
|
114
|
+
tracemalloc.start()
|
|
115
|
+
|
|
116
|
+
# Record initial snapshot
|
|
117
|
+
snapshot1 = tracemalloc.take_snapshot()
|
|
118
|
+
|
|
119
|
+
# Run pytest
|
|
120
|
+
exit_code = pytest.main(["-q"])
|
|
121
|
+
|
|
122
|
+
# Take final snapshot
|
|
123
|
+
snapshot2 = tracemalloc.take_snapshot()
|
|
124
|
+
|
|
125
|
+
# Get traced memory
|
|
126
|
+
current, peak = tracemalloc.get_traced_memory()
|
|
127
|
+
|
|
128
|
+
# Stop tracing
|
|
129
|
+
tracemalloc.stop()
|
|
130
|
+
|
|
131
|
+
# Analyze differences
|
|
132
|
+
print("=" * 70)
|
|
133
|
+
print("MEMORY ALLOCATION SUMMARY")
|
|
134
|
+
print("=" * 70)
|
|
135
|
+
print()
|
|
136
|
+
print(f"Peak memory usage: {format_size(peak)}")
|
|
137
|
+
print()
|
|
138
|
+
|
|
139
|
+
# Top allocations
|
|
140
|
+
top_stats = snapshot2.compare_to(snapshot1, 'lineno')
|
|
141
|
+
|
|
142
|
+
print("TOP 30 MEMORY ALLOCATIONS (by increase):")
|
|
143
|
+
print("-" * 70)
|
|
144
|
+
print(f"{'Size':<12} {'Count':<8} {'Location'}")
|
|
145
|
+
print("-" * 70)
|
|
146
|
+
|
|
147
|
+
for stat in top_stats[:30]:
|
|
148
|
+
print(f"{format_size(stat.size):<12} {stat.count:<8} {stat.traceback}")
|
|
149
|
+
|
|
150
|
+
# Top by current size
|
|
151
|
+
print()
|
|
152
|
+
print("=" * 70)
|
|
153
|
+
print("TOP 30 MEMORY CONSUMERS (by total size):")
|
|
154
|
+
print("-" * 70)
|
|
155
|
+
print(f"{'Size':<12} {'Count':<8} {'Location'}")
|
|
156
|
+
print("-" * 70)
|
|
157
|
+
|
|
158
|
+
current_stats = snapshot2.statistics('lineno')
|
|
159
|
+
for stat in current_stats[:30]:
|
|
160
|
+
print(f"{format_size(stat.size):<12} {stat.count:<8} {stat.traceback}")
|
|
161
|
+
|
|
162
|
+
print()
|
|
163
|
+
print("=" * 70)
|
|
164
|
+
print("RECOMMENDATIONS:")
|
|
165
|
+
print("- Review functions with large memory allocations")
|
|
166
|
+
print("- Check for memory leaks in repeatedly allocated objects")
|
|
167
|
+
print("- Consider using generators for large data processing")
|
|
168
|
+
print("=" * 70)
|
|
169
|
+
|
|
170
|
+
sys.exit(exit_code)
|
|
171
|
+
'''
|
|
172
|
+
|
|
173
|
+
with script_path.open("w") as f:
|
|
174
|
+
f.write(script_content)
|
|
175
|
+
|
|
176
|
+
return script_path
|