pdd-cli 0.0.90__py3-none-any.whl → 0.0.121__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +38 -6
- pdd/agentic_bug.py +323 -0
- pdd/agentic_bug_orchestrator.py +506 -0
- pdd/agentic_change.py +231 -0
- pdd/agentic_change_orchestrator.py +537 -0
- pdd/agentic_common.py +533 -770
- pdd/agentic_crash.py +2 -1
- pdd/agentic_e2e_fix.py +319 -0
- pdd/agentic_e2e_fix_orchestrator.py +582 -0
- pdd/agentic_fix.py +118 -3
- pdd/agentic_update.py +27 -9
- pdd/agentic_verify.py +3 -2
- pdd/architecture_sync.py +565 -0
- pdd/auth_service.py +210 -0
- pdd/auto_deps_main.py +63 -53
- pdd/auto_include.py +236 -3
- pdd/auto_update.py +125 -47
- pdd/bug_main.py +195 -23
- pdd/cmd_test_main.py +345 -197
- pdd/code_generator.py +4 -2
- pdd/code_generator_main.py +118 -32
- pdd/commands/__init__.py +6 -0
- pdd/commands/analysis.py +113 -48
- pdd/commands/auth.py +309 -0
- pdd/commands/connect.py +358 -0
- pdd/commands/fix.py +155 -114
- pdd/commands/generate.py +5 -0
- pdd/commands/maintenance.py +3 -2
- pdd/commands/misc.py +8 -0
- pdd/commands/modify.py +225 -163
- pdd/commands/sessions.py +284 -0
- pdd/commands/utility.py +12 -7
- pdd/construct_paths.py +334 -32
- pdd/context_generator_main.py +167 -170
- pdd/continue_generation.py +6 -3
- pdd/core/__init__.py +33 -0
- pdd/core/cli.py +44 -7
- pdd/core/cloud.py +237 -0
- pdd/core/dump.py +68 -20
- pdd/core/errors.py +4 -0
- pdd/core/remote_session.py +61 -0
- pdd/crash_main.py +219 -23
- pdd/data/llm_model.csv +4 -4
- pdd/docs/prompting_guide.md +864 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/benchmark_analysis.py +495 -0
- pdd/docs/whitepaper_with_benchmarks/data_and_functions/creation_compare.py +528 -0
- pdd/fix_code_loop.py +208 -34
- pdd/fix_code_module_errors.py +6 -2
- pdd/fix_error_loop.py +291 -38
- pdd/fix_main.py +208 -6
- pdd/fix_verification_errors_loop.py +235 -26
- pdd/fix_verification_main.py +269 -83
- pdd/frontend/dist/assets/index-B5DZHykP.css +1 -0
- pdd/frontend/dist/assets/index-CUWd8al1.js +450 -0
- pdd/frontend/dist/index.html +376 -0
- pdd/frontend/dist/logo.svg +33 -0
- pdd/generate_output_paths.py +46 -5
- pdd/generate_test.py +212 -151
- pdd/get_comment.py +19 -44
- pdd/get_extension.py +8 -9
- pdd/get_jwt_token.py +309 -20
- pdd/get_language.py +8 -7
- pdd/get_run_command.py +7 -5
- pdd/insert_includes.py +2 -1
- pdd/llm_invoke.py +531 -97
- pdd/load_prompt_template.py +15 -34
- pdd/operation_log.py +342 -0
- pdd/path_resolution.py +140 -0
- pdd/postprocess.py +122 -97
- pdd/preprocess.py +68 -12
- pdd/preprocess_main.py +33 -1
- pdd/prompts/agentic_bug_step10_pr_LLM.prompt +182 -0
- pdd/prompts/agentic_bug_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_bug_step2_docs_LLM.prompt +129 -0
- pdd/prompts/agentic_bug_step3_triage_LLM.prompt +95 -0
- pdd/prompts/agentic_bug_step4_reproduce_LLM.prompt +97 -0
- pdd/prompts/agentic_bug_step5_root_cause_LLM.prompt +123 -0
- pdd/prompts/agentic_bug_step6_test_plan_LLM.prompt +107 -0
- pdd/prompts/agentic_bug_step7_generate_LLM.prompt +172 -0
- pdd/prompts/agentic_bug_step8_verify_LLM.prompt +119 -0
- pdd/prompts/agentic_bug_step9_e2e_test_LLM.prompt +289 -0
- pdd/prompts/agentic_change_step10_identify_issues_LLM.prompt +1006 -0
- pdd/prompts/agentic_change_step11_fix_issues_LLM.prompt +984 -0
- pdd/prompts/agentic_change_step12_create_pr_LLM.prompt +140 -0
- pdd/prompts/agentic_change_step1_duplicate_LLM.prompt +73 -0
- pdd/prompts/agentic_change_step2_docs_LLM.prompt +101 -0
- pdd/prompts/agentic_change_step3_research_LLM.prompt +126 -0
- pdd/prompts/agentic_change_step4_clarify_LLM.prompt +164 -0
- pdd/prompts/agentic_change_step5_docs_change_LLM.prompt +981 -0
- pdd/prompts/agentic_change_step6_devunits_LLM.prompt +1005 -0
- pdd/prompts/agentic_change_step7_architecture_LLM.prompt +1044 -0
- pdd/prompts/agentic_change_step8_analyze_LLM.prompt +1027 -0
- pdd/prompts/agentic_change_step9_implement_LLM.prompt +1077 -0
- pdd/prompts/agentic_e2e_fix_step1_unit_tests_LLM.prompt +90 -0
- pdd/prompts/agentic_e2e_fix_step2_e2e_tests_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step3_root_cause_LLM.prompt +89 -0
- pdd/prompts/agentic_e2e_fix_step4_fix_e2e_tests_LLM.prompt +96 -0
- pdd/prompts/agentic_e2e_fix_step5_identify_devunits_LLM.prompt +91 -0
- pdd/prompts/agentic_e2e_fix_step6_create_unit_tests_LLM.prompt +106 -0
- pdd/prompts/agentic_e2e_fix_step7_verify_tests_LLM.prompt +116 -0
- pdd/prompts/agentic_e2e_fix_step8_run_pdd_fix_LLM.prompt +120 -0
- pdd/prompts/agentic_e2e_fix_step9_verify_all_LLM.prompt +146 -0
- pdd/prompts/agentic_fix_primary_LLM.prompt +2 -2
- pdd/prompts/agentic_update_LLM.prompt +192 -338
- pdd/prompts/auto_include_LLM.prompt +22 -0
- pdd/prompts/change_LLM.prompt +3093 -1
- pdd/prompts/detect_change_LLM.prompt +571 -14
- pdd/prompts/fix_code_module_errors_LLM.prompt +8 -0
- pdd/prompts/fix_errors_from_unit_tests_LLM.prompt +1 -0
- pdd/prompts/generate_test_LLM.prompt +19 -1
- pdd/prompts/generate_test_from_example_LLM.prompt +366 -0
- pdd/prompts/insert_includes_LLM.prompt +262 -252
- pdd/prompts/prompt_code_diff_LLM.prompt +123 -0
- pdd/prompts/prompt_diff_LLM.prompt +82 -0
- pdd/remote_session.py +876 -0
- pdd/server/__init__.py +52 -0
- pdd/server/app.py +335 -0
- pdd/server/click_executor.py +587 -0
- pdd/server/executor.py +338 -0
- pdd/server/jobs.py +661 -0
- pdd/server/models.py +241 -0
- pdd/server/routes/__init__.py +31 -0
- pdd/server/routes/architecture.py +451 -0
- pdd/server/routes/auth.py +364 -0
- pdd/server/routes/commands.py +929 -0
- pdd/server/routes/config.py +42 -0
- pdd/server/routes/files.py +603 -0
- pdd/server/routes/prompts.py +1347 -0
- pdd/server/routes/websocket.py +473 -0
- pdd/server/security.py +243 -0
- pdd/server/terminal_spawner.py +217 -0
- pdd/server/token_counter.py +222 -0
- pdd/summarize_directory.py +236 -237
- pdd/sync_animation.py +8 -4
- pdd/sync_determine_operation.py +329 -47
- pdd/sync_main.py +272 -28
- pdd/sync_orchestration.py +289 -211
- pdd/sync_order.py +304 -0
- pdd/template_expander.py +161 -0
- pdd/templates/architecture/architecture_json.prompt +41 -46
- pdd/trace.py +1 -1
- pdd/track_cost.py +0 -13
- pdd/unfinished_prompt.py +2 -1
- pdd/update_main.py +68 -26
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.121.dist-info}/METADATA +15 -10
- pdd_cli-0.0.121.dist-info/RECORD +229 -0
- pdd_cli-0.0.90.dist-info/RECORD +0 -153
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.121.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.121.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.121.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.90.dist-info → pdd_cli-0.0.121.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,528 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import matplotlib.pyplot as plt
|
|
6
|
+
import seaborn as sns
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from io import StringIO
|
|
9
|
+
|
|
10
|
+
# --- Configuration ---
|
|
11
|
+
CLAUDE_CSV_PATH = Path('analysis/claude_creation.csv')
|
|
12
|
+
PDD_CSV_PATH = Path('analysis/PDD_creation.csv')
|
|
13
|
+
OUTPUT_DIR = Path('creation_report')
|
|
14
|
+
|
|
15
|
+
# --- Helper Functions ---
|
|
16
|
+
|
|
17
|
+
def parse_duration_to_seconds(duration_str: str) -> float:
|
|
18
|
+
"""
|
|
19
|
+
Converts a duration string (e.g., "1h 35m 33.0s", "35m 28.3s") to total seconds.
|
|
20
|
+
Handles missing components (hours, minutes, or seconds).
|
|
21
|
+
"""
|
|
22
|
+
if pd.isna(duration_str) or not isinstance(duration_str, str):
|
|
23
|
+
return 0.0
|
|
24
|
+
|
|
25
|
+
hours, minutes, seconds = 0, 0, 0.0
|
|
26
|
+
|
|
27
|
+
h_match = re.search(r'(\d+)h', duration_str)
|
|
28
|
+
if h_match:
|
|
29
|
+
hours = int(h_match.group(1))
|
|
30
|
+
|
|
31
|
+
m_match = re.search(r'(\d+)m', duration_str)
|
|
32
|
+
if m_match:
|
|
33
|
+
minutes = int(m_match.group(1))
|
|
34
|
+
|
|
35
|
+
s_match = re.search(r'([\d\.]+)s', duration_str)
|
|
36
|
+
if s_match:
|
|
37
|
+
seconds = float(s_match.group(1))
|
|
38
|
+
|
|
39
|
+
total_seconds = hours * 3600 + minutes * 60 + seconds
|
|
40
|
+
return total_seconds
|
|
41
|
+
|
|
42
|
+
def format_seconds_to_hms(total_seconds: float) -> str:
|
|
43
|
+
"""
|
|
44
|
+
Formats total seconds into a human-readable string "Xh Ym Z.Ws".
|
|
45
|
+
"""
|
|
46
|
+
if pd.isna(total_seconds):
|
|
47
|
+
return "N/A"
|
|
48
|
+
|
|
49
|
+
s = total_seconds
|
|
50
|
+
h = int(s // 3600)
|
|
51
|
+
s %= 3600
|
|
52
|
+
m = int(s // 60)
|
|
53
|
+
s %= 60
|
|
54
|
+
|
|
55
|
+
parts = []
|
|
56
|
+
if h > 0:
|
|
57
|
+
parts.append(f"{h}h")
|
|
58
|
+
if m > 0:
|
|
59
|
+
parts.append(f"{m}m")
|
|
60
|
+
if s > 0 or not parts: # Always show seconds if no h or m, or if s > 0
|
|
61
|
+
parts.append(f"{s:.1f}s")
|
|
62
|
+
|
|
63
|
+
return " ".join(parts) if parts else "0.0s"
|
|
64
|
+
|
|
65
|
+
def load_and_preprocess_claude_data(csv_path: Path) -> pd.DataFrame | None:
|
|
66
|
+
"""Loads and preprocesses the Claude creation data."""
|
|
67
|
+
print(f"Loading Claude data from: {csv_path}")
|
|
68
|
+
try:
|
|
69
|
+
# Use the provided CSV data for demonstration if actual file not found
|
|
70
|
+
# This is for making the script runnable with the prompt's data
|
|
71
|
+
if not csv_path.exists():
|
|
72
|
+
print(f"Warning: File {csv_path} not found. Using example data from prompt.")
|
|
73
|
+
claude_csv_content = """Total Cost,API Duration,Wall Duration,Lines Added,Lines Removed
|
|
74
|
+
$9.97,35m 28.3s,1h 35m 33.0s,4265,597
|
|
75
|
+
$10.46,40m 53.6s,1h 31m 13.2s,2011,498
|
|
76
|
+
$0.2631,1m 12.2s,3m 18.2s,4,4
|
|
77
|
+
$0.83,5m 52.4s,16m 3.8s,67,6
|
|
78
|
+
$7.01,39m 56.0s,2h 52m 47.2s,812,295
|
|
79
|
+
"""
|
|
80
|
+
df_claude = pd.read_csv(StringIO(claude_csv_content), sep=',', quoting=csv.QUOTE_MINIMAL)
|
|
81
|
+
else:
|
|
82
|
+
df_claude = pd.read_csv(csv_path, sep=',', quoting=csv.QUOTE_MINIMAL)
|
|
83
|
+
|
|
84
|
+
except FileNotFoundError:
|
|
85
|
+
print(f"Error: Claude CSV file not found at {csv_path}")
|
|
86
|
+
return None
|
|
87
|
+
except Exception as e:
|
|
88
|
+
print(f"Error loading Claude CSV: {e}")
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
if df_claude.empty:
|
|
92
|
+
print("Error: Claude CSV file is empty.")
|
|
93
|
+
return None
|
|
94
|
+
|
|
95
|
+
print("\nClaude data preview (raw):")
|
|
96
|
+
print(df_claude.head())
|
|
97
|
+
|
|
98
|
+
# Standardize column names
|
|
99
|
+
df_claude.columns = [col.strip().replace(' ', '_').lower() for col in df_claude.columns]
|
|
100
|
+
print("\nClaude columns (standardized):", df_claude.columns.tolist())
|
|
101
|
+
|
|
102
|
+
# Verify required columns
|
|
103
|
+
required_cols = ['total_cost', 'api_duration', 'wall_duration', 'lines_added', 'lines_removed']
|
|
104
|
+
for col in required_cols:
|
|
105
|
+
if col not in df_claude.columns:
|
|
106
|
+
print(f"Error: Missing required column '{col}' in Claude data.")
|
|
107
|
+
return None
|
|
108
|
+
|
|
109
|
+
# Clean and convert 'total_cost'
|
|
110
|
+
df_claude['total_cost'] = df_claude['total_cost'].replace({'\$': ''}, regex=True).astype(float)
|
|
111
|
+
|
|
112
|
+
# Convert duration strings to seconds
|
|
113
|
+
df_claude['api_duration_seconds'] = df_claude['api_duration'].apply(parse_duration_to_seconds)
|
|
114
|
+
df_claude['wall_duration_seconds'] = df_claude['wall_duration'].apply(parse_duration_to_seconds)
|
|
115
|
+
|
|
116
|
+
# Ensure 'lines_added' and 'lines_removed' are numeric
|
|
117
|
+
df_claude['lines_added'] = pd.to_numeric(df_claude['lines_added'], errors='coerce').fillna(0).astype(int)
|
|
118
|
+
df_claude['lines_removed'] = pd.to_numeric(df_claude['lines_removed'], errors='coerce').fillna(0).astype(int)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
print("\nClaude data preview (processed):")
|
|
122
|
+
print(df_claude.head())
|
|
123
|
+
print("\nClaude data types (processed):")
|
|
124
|
+
print(df_claude.dtypes)
|
|
125
|
+
|
|
126
|
+
return df_claude
|
|
127
|
+
|
|
128
|
+
def load_and_preprocess_pdd_data(csv_path: Path) -> pd.DataFrame | None:
|
|
129
|
+
"""Loads and preprocesses the PDD creation data."""
|
|
130
|
+
print(f"\nLoading PDD data from: {csv_path}")
|
|
131
|
+
try:
|
|
132
|
+
# Use the provided CSV data for demonstration if actual file not found
|
|
133
|
+
if not csv_path.exists():
|
|
134
|
+
print(f"Warning: File {csv_path} not found. Using example data from prompt.")
|
|
135
|
+
pdd_csv_content = """module,avg_time,total_time,avg_cost,total_cost
|
|
136
|
+
__init__,201.14583333333334,1206.875,0.2806173166666667,1.6837039000000003
|
|
137
|
+
anthropic_service,394.1295,2364.777,0.4338396916666667,2.60303815
|
|
138
|
+
caching_manager,131.4565,788.739,0.39087164166666666,2.34522985
|
|
139
|
+
cli,103.59766666666667,621.586,0.28302760833333335,1.69816565
|
|
140
|
+
config,268.2664285714286,1877.8650000000002,0.6513883928571429,4.55971875
|
|
141
|
+
cost_tracker,286.63933333333335,1719.8360000000002,0.3199597583333334,1.9197585500000003
|
|
142
|
+
edit_tool_impl,361.14799999999997,2166.888,0.5458140000000001,3.2748840000000006
|
|
143
|
+
file_handler,321.13228571428573,2247.926,0.5430141571428572,3.8010991
|
|
144
|
+
instruction_parser,286.99983333333336,1721.999,0.3802859916666666,2.2817159499999997
|
|
145
|
+
main_editor,1645.7278333333334,9874.367,0.5656376916666667,3.3938261499999998
|
|
146
|
+
prompts,109.875,659.25,0.2954146583333333,1.77248795
|
|
147
|
+
utils,51.00416666666666,306.025,0.09755935833333333,0.58535615
|
|
148
|
+
"""
|
|
149
|
+
df_pdd = pd.read_csv(StringIO(pdd_csv_content), sep=',', quoting=csv.QUOTE_MINIMAL)
|
|
150
|
+
else:
|
|
151
|
+
df_pdd = pd.read_csv(csv_path, sep=',', quoting=csv.QUOTE_MINIMAL)
|
|
152
|
+
|
|
153
|
+
except FileNotFoundError:
|
|
154
|
+
print(f"Error: PDD CSV file not found at {csv_path}")
|
|
155
|
+
return None
|
|
156
|
+
except Exception as e:
|
|
157
|
+
print(f"Error loading PDD CSV: {e}")
|
|
158
|
+
return None
|
|
159
|
+
|
|
160
|
+
if df_pdd.empty:
|
|
161
|
+
print("Error: PDD CSV file is empty.")
|
|
162
|
+
return None
|
|
163
|
+
|
|
164
|
+
print("\nPDD data preview (raw):")
|
|
165
|
+
print(df_pdd.head())
|
|
166
|
+
print("\nPDD columns:", df_pdd.columns.tolist())
|
|
167
|
+
|
|
168
|
+
# Verify required columns
|
|
169
|
+
required_cols = ['module', 'avg_time', 'total_time', 'avg_cost', 'total_cost']
|
|
170
|
+
for col in required_cols:
|
|
171
|
+
if col not in df_pdd.columns:
|
|
172
|
+
print(f"Error: Missing required column '{col}' in PDD data.")
|
|
173
|
+
return None
|
|
174
|
+
|
|
175
|
+
# Ensure numeric types (already float as per prompt, but good to verify)
|
|
176
|
+
for col in ['avg_time', 'total_time', 'avg_cost', 'total_cost']:
|
|
177
|
+
df_pdd[col] = pd.to_numeric(df_pdd[col], errors='coerce')
|
|
178
|
+
if df_pdd[col].isnull().any():
|
|
179
|
+
print(f"Warning: NaNs found in PDD column '{col}' after numeric conversion.")
|
|
180
|
+
df_pdd[col] = df_pdd[col].fillna(0)
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
print("\nPDD data preview (processed):")
|
|
184
|
+
print(df_pdd.head())
|
|
185
|
+
print("\nPDD data types (processed):")
|
|
186
|
+
print(df_pdd.dtypes)
|
|
187
|
+
|
|
188
|
+
return df_pdd
|
|
189
|
+
|
|
190
|
+
# --- Main Analysis Function ---
|
|
191
|
+
def perform_analysis():
|
|
192
|
+
"""Performs the comparative analysis and generates reports."""
|
|
193
|
+
|
|
194
|
+
# Create output directory
|
|
195
|
+
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
|
196
|
+
print(f"\nOutput will be saved to: {OUTPUT_DIR}")
|
|
197
|
+
|
|
198
|
+
# Load data
|
|
199
|
+
df_claude = load_and_preprocess_claude_data(CLAUDE_CSV_PATH)
|
|
200
|
+
df_pdd = load_and_preprocess_pdd_data(PDD_CSV_PATH)
|
|
201
|
+
|
|
202
|
+
if df_claude is None or df_pdd is None:
|
|
203
|
+
print("\nExiting due to data loading errors.")
|
|
204
|
+
return
|
|
205
|
+
|
|
206
|
+
# Initialize markdown report content
|
|
207
|
+
report_md = f"# Comparative Analysis: Claude vs. PDD Creation Task\n\n"
|
|
208
|
+
report_md += f"Analysis based on data from `{CLAUDE_CSV_PATH.name}` and `{PDD_CSV_PATH.name}`.\n\n"
|
|
209
|
+
|
|
210
|
+
# --- Cost Analysis ---
|
|
211
|
+
print("\n--- Cost Analysis ---")
|
|
212
|
+
report_md += "## 1. Cost Analysis\n\n"
|
|
213
|
+
|
|
214
|
+
# Claude Cost
|
|
215
|
+
claude_total_cost = df_claude['total_cost'].sum()
|
|
216
|
+
claude_avg_cost_per_run = df_claude['total_cost'].mean()
|
|
217
|
+
claude_num_runs = len(df_claude)
|
|
218
|
+
print(f"Claude - Total Cost: ${claude_total_cost:.2f}")
|
|
219
|
+
print(f"Claude - Number of Runs: {claude_num_runs}")
|
|
220
|
+
print(f"Claude - Average Cost per Run: ${claude_avg_cost_per_run:.2f}")
|
|
221
|
+
|
|
222
|
+
report_md += "### 1.1. Claude Creation\n"
|
|
223
|
+
report_md += f"- Total Cost: ${claude_total_cost:.2f}\n"
|
|
224
|
+
report_md += f"- Number of Runs: {claude_num_runs}\n"
|
|
225
|
+
report_md += f"- Average Cost per Run: ${claude_avg_cost_per_run:.2f}\n"
|
|
226
|
+
report_md += f"- Cost per Run Statistics:\n{df_claude['total_cost'].describe().to_markdown()}\n\n"
|
|
227
|
+
|
|
228
|
+
fig, ax = plt.subplots(figsize=(8, 5))
|
|
229
|
+
sns.boxplot(x=df_claude['total_cost'], ax=ax)
|
|
230
|
+
ax.set_title('Claude: Cost per Run Distribution')
|
|
231
|
+
ax.set_xlabel('Cost per Run ($)')
|
|
232
|
+
plt.tight_layout()
|
|
233
|
+
plot_path = OUTPUT_DIR / 'claude_cost_per_run_dist.png'
|
|
234
|
+
fig.savefig(plot_path)
|
|
235
|
+
plt.close(fig)
|
|
236
|
+
report_md += f"\n\n"
|
|
237
|
+
|
|
238
|
+
# PDD Cost
|
|
239
|
+
pdd_total_cost = df_pdd['total_cost'].sum()
|
|
240
|
+
pdd_avg_cost_per_module = df_pdd['avg_cost'].mean() # Mean of 'avg_cost' column
|
|
241
|
+
pdd_num_modules = len(df_pdd)
|
|
242
|
+
print(f"PDD - Total Cost: ${pdd_total_cost:.2f}")
|
|
243
|
+
print(f"PDD - Number of Modules: {pdd_num_modules}")
|
|
244
|
+
print(f"PDD - Overall Average Cost per Module (from 'avg_cost'): ${pdd_avg_cost_per_module:.2f}")
|
|
245
|
+
|
|
246
|
+
report_md += "### 1.2. PDD Creation\n"
|
|
247
|
+
report_md += f"- Total Cost (sum of module total_costs): ${pdd_total_cost:.2f}\n"
|
|
248
|
+
report_md += f"- Number of Modules: {pdd_num_modules}\n"
|
|
249
|
+
report_md += f"- Overall Average Cost per Module (mean of 'avg_cost' column): ${pdd_avg_cost_per_module:.2f}\n"
|
|
250
|
+
report_md += f"- 'avg_cost' per Module Statistics:\n{df_pdd['avg_cost'].describe().to_markdown()}\n\n"
|
|
251
|
+
report_md += f"- 'total_cost' per Module Statistics:\n{df_pdd['total_cost'].describe().to_markdown()}\n\n"
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
fig, ax = plt.subplots(figsize=(8, 5))
|
|
255
|
+
sns.boxplot(x=df_pdd['avg_cost'], ax=ax)
|
|
256
|
+
ax.set_title('PDD: Average Cost per Module Distribution')
|
|
257
|
+
ax.set_xlabel('Average Cost per Module ($)')
|
|
258
|
+
plt.tight_layout()
|
|
259
|
+
plot_path = OUTPUT_DIR / 'pdd_avg_cost_per_module_dist.png'
|
|
260
|
+
fig.savefig(plot_path)
|
|
261
|
+
plt.close(fig)
|
|
262
|
+
report_md += f"\n\n"
|
|
263
|
+
|
|
264
|
+
# PDD Top N modules by cost
|
|
265
|
+
pdd_top_cost_modules = df_pdd.sort_values(by='total_cost', ascending=False).head(10)
|
|
266
|
+
report_md += "Top 10 PDD Modules by Total Cost:\n"
|
|
267
|
+
report_md += f"{pdd_top_cost_modules[['module', 'total_cost']].to_markdown(index=False)}\n\n"
|
|
268
|
+
|
|
269
|
+
fig, ax = plt.subplots(figsize=(12, 7))
|
|
270
|
+
sns.barplot(data=pdd_top_cost_modules, x='total_cost', y='module', ax=ax, palette="viridis")
|
|
271
|
+
ax.set_title('PDD: Top 10 Modules by Total Cost')
|
|
272
|
+
ax.set_xlabel('Total Cost ($)')
|
|
273
|
+
ax.set_ylabel('Module')
|
|
274
|
+
plt.tight_layout()
|
|
275
|
+
plot_path = OUTPUT_DIR / 'pdd_top_modules_by_cost.png'
|
|
276
|
+
fig.savefig(plot_path)
|
|
277
|
+
plt.close(fig)
|
|
278
|
+
report_md += f"\n\n"
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
# Cost Comparison
|
|
282
|
+
report_md += "### 1.3. Cost Comparison Summary\n"
|
|
283
|
+
cost_comp_data = {
|
|
284
|
+
'Metric': ['Total Cost', 'Average Cost per Unit'],
|
|
285
|
+
'Claude': [f"${claude_total_cost:.2f}", f"${claude_avg_cost_per_run:.2f} (per run)"],
|
|
286
|
+
'PDD': [f"${pdd_total_cost:.2f}", f"${pdd_avg_cost_per_module:.2f} (avg per module)"]
|
|
287
|
+
}
|
|
288
|
+
cost_comp_df = pd.DataFrame(cost_comp_data)
|
|
289
|
+
report_md += cost_comp_df.to_markdown(index=False) + "\n\n"
|
|
290
|
+
|
|
291
|
+
fig, ax = plt.subplots(figsize=(8, 5))
|
|
292
|
+
sns.barplot(x=['Claude (All Runs)', 'PDD (All Modules)'], y=[claude_total_cost, pdd_total_cost], ax=ax, palette="mako")
|
|
293
|
+
ax.set_title('Total Cost Comparison')
|
|
294
|
+
ax.set_ylabel('Total Cost ($)')
|
|
295
|
+
for i, v in enumerate([claude_total_cost, pdd_total_cost]):
|
|
296
|
+
ax.text(i, v + 0.01 * max(claude_total_cost, pdd_total_cost), f"${v:.2f}", ha='center', va='bottom')
|
|
297
|
+
plt.tight_layout()
|
|
298
|
+
plot_path = OUTPUT_DIR / 'total_cost_comparison.png'
|
|
299
|
+
fig.savefig(plot_path)
|
|
300
|
+
plt.close(fig)
|
|
301
|
+
report_md += f"\n\n"
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
# --- Time Analysis (Wall Duration / Total Time) ---
|
|
305
|
+
print("\n--- Time Analysis ---")
|
|
306
|
+
report_md += "## 2. Time Analysis (Wall Duration / Total Time)\n\n"
|
|
307
|
+
|
|
308
|
+
# Claude Time
|
|
309
|
+
claude_total_wall_duration_s = df_claude['wall_duration_seconds'].sum()
|
|
310
|
+
claude_avg_wall_duration_s = df_claude['wall_duration_seconds'].mean()
|
|
311
|
+
print(f"Claude - Total Wall Duration: {format_seconds_to_hms(claude_total_wall_duration_s)} ({claude_total_wall_duration_s:.2f}s)")
|
|
312
|
+
print(f"Claude - Average Wall Duration per Run: {format_seconds_to_hms(claude_avg_wall_duration_s)} ({claude_avg_wall_duration_s:.2f}s)")
|
|
313
|
+
|
|
314
|
+
report_md += "### 2.1. Claude Creation (Wall Duration)\n"
|
|
315
|
+
report_md += f"- Total Wall Duration: {format_seconds_to_hms(claude_total_wall_duration_s)} ({claude_total_wall_duration_s:.2f} seconds)\n"
|
|
316
|
+
report_md += f"- Wall Duration per Run Statistics (seconds):\n{df_claude['wall_duration_seconds'].describe().to_markdown()}\n\n"
|
|
317
|
+
|
|
318
|
+
fig, ax = plt.subplots(figsize=(8, 5))
|
|
319
|
+
sns.boxplot(x=df_claude['wall_duration_seconds'], ax=ax)
|
|
320
|
+
ax.set_title('Claude: Wall Duration per Run Distribution')
|
|
321
|
+
ax.set_xlabel('Wall Duration per Run (seconds)')
|
|
322
|
+
plt.tight_layout()
|
|
323
|
+
plot_path = OUTPUT_DIR / 'claude_wall_duration_dist.png'
|
|
324
|
+
fig.savefig(plot_path)
|
|
325
|
+
plt.close(fig)
|
|
326
|
+
report_md += f"\n\n"
|
|
327
|
+
|
|
328
|
+
# Claude API Duration (for completeness, not direct comparison)
|
|
329
|
+
claude_total_api_duration_s = df_claude['api_duration_seconds'].sum()
|
|
330
|
+
claude_avg_api_duration_s = df_claude['api_duration_seconds'].mean()
|
|
331
|
+
report_md += "Claude API Duration (for context):\n"
|
|
332
|
+
report_md += f"- Total API Duration: {format_seconds_to_hms(claude_total_api_duration_s)} ({claude_total_api_duration_s:.2f} seconds)\n"
|
|
333
|
+
report_md += f"- API Duration per Run Statistics (seconds):\n{df_claude['api_duration_seconds'].describe().to_markdown()}\n\n"
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
# PDD Time
|
|
337
|
+
pdd_total_time_s = df_pdd['total_time'].sum() # Sum of 'total_time' for all modules
|
|
338
|
+
pdd_avg_module_total_time_s = df_pdd['total_time'].mean() # Avg of 'total_time' across modules
|
|
339
|
+
print(f"PDD - Total Time (sum of module total_times): {format_seconds_to_hms(pdd_total_time_s)} ({pdd_total_time_s:.2f}s)")
|
|
340
|
+
print(f"PDD - Average 'total_time' per Module: {format_seconds_to_hms(pdd_avg_module_total_time_s)} ({pdd_avg_module_total_time_s:.2f}s)")
|
|
341
|
+
|
|
342
|
+
report_md += "### 2.2. PDD Creation (Total Time per Module)\n"
|
|
343
|
+
report_md += f"- Total Time (sum of module `total_time`): {format_seconds_to_hms(pdd_total_time_s)} ({pdd_total_time_s:.2f} seconds)\n"
|
|
344
|
+
report_md += f"- `total_time` per Module Statistics (seconds):\n{df_pdd['total_time'].describe().to_markdown()}\n\n"
|
|
345
|
+
|
|
346
|
+
fig, ax = plt.subplots(figsize=(8, 5))
|
|
347
|
+
sns.boxplot(x=df_pdd['total_time'], ax=ax)
|
|
348
|
+
ax.set_title('PDD: Total Time per Module Distribution')
|
|
349
|
+
ax.set_xlabel('Total Time per Module (seconds)')
|
|
350
|
+
plt.tight_layout()
|
|
351
|
+
plot_path = OUTPUT_DIR / 'pdd_total_time_per_module_dist.png'
|
|
352
|
+
fig.savefig(plot_path)
|
|
353
|
+
plt.close(fig)
|
|
354
|
+
report_md += f"\n\n"
|
|
355
|
+
|
|
356
|
+
# PDD Top N modules by time
|
|
357
|
+
pdd_top_time_modules = df_pdd.sort_values(by='total_time', ascending=False).head(10)
|
|
358
|
+
report_md += "Top 10 PDD Modules by Total Time:\n"
|
|
359
|
+
report_md += f"{pdd_top_time_modules[['module', 'total_time']].to_markdown(index=False)}\n\n"
|
|
360
|
+
|
|
361
|
+
fig, ax = plt.subplots(figsize=(12, 7))
|
|
362
|
+
sns.barplot(data=pdd_top_time_modules, x='total_time', y='module', ax=ax, palette="crest")
|
|
363
|
+
ax.set_title('PDD: Top 10 Modules by Total Time')
|
|
364
|
+
ax.set_xlabel('Total Time (seconds)')
|
|
365
|
+
ax.set_ylabel('Module')
|
|
366
|
+
plt.tight_layout()
|
|
367
|
+
plot_path = OUTPUT_DIR / 'pdd_top_modules_by_time.png'
|
|
368
|
+
fig.savefig(plot_path)
|
|
369
|
+
plt.close(fig)
|
|
370
|
+
report_md += f"\n\n"
|
|
371
|
+
|
|
372
|
+
# Time Comparison
|
|
373
|
+
report_md += "### 2.3. Time Comparison Summary\n"
|
|
374
|
+
time_comp_data = {
|
|
375
|
+
'Metric': ['Total Execution Time', 'Average Time per Unit'],
|
|
376
|
+
'Claude': [f"{format_seconds_to_hms(claude_total_wall_duration_s)}", f"{format_seconds_to_hms(claude_avg_wall_duration_s)} (per run, wall duration)"],
|
|
377
|
+
'PDD': [f"{format_seconds_to_hms(pdd_total_time_s)}", f"{format_seconds_to_hms(pdd_avg_module_total_time_s)} (avg module total_time)"]
|
|
378
|
+
}
|
|
379
|
+
time_comp_df = pd.DataFrame(time_comp_data)
|
|
380
|
+
report_md += time_comp_df.to_markdown(index=False) + "\n\n"
|
|
381
|
+
|
|
382
|
+
fig, ax = plt.subplots(figsize=(8, 5))
|
|
383
|
+
sns.barplot(x=['Claude (Wall Duration)', 'PDD (Total Time)'], y=[claude_total_wall_duration_s, pdd_total_time_s], ax=ax, palette="flare")
|
|
384
|
+
ax.set_title('Total Execution Time Comparison')
|
|
385
|
+
ax.set_ylabel('Total Time (seconds)')
|
|
386
|
+
for i, v in enumerate([claude_total_wall_duration_s, pdd_total_time_s]):
|
|
387
|
+
ax.text(i, v + 0.01 * max(claude_total_wall_duration_s, pdd_total_time_s), f"{format_seconds_to_hms(v)}", ha='center', va='bottom')
|
|
388
|
+
plt.tight_layout()
|
|
389
|
+
plot_path = OUTPUT_DIR / 'total_time_comparison.png'
|
|
390
|
+
fig.savefig(plot_path)
|
|
391
|
+
plt.close(fig)
|
|
392
|
+
report_md += f"\n\n"
|
|
393
|
+
|
|
394
|
+
# --- Lines Added/Removed Analysis (Claude Only) ---
|
|
395
|
+
print("\n--- Lines Added/Removed Analysis (Claude Only) ---")
|
|
396
|
+
report_md += "## 3. Lines Added/Removed Analysis (Claude Only)\n\n"
|
|
397
|
+
|
|
398
|
+
claude_total_lines_added = df_claude['lines_added'].sum()
|
|
399
|
+
claude_total_lines_removed = df_claude['lines_removed'].sum()
|
|
400
|
+
claude_avg_lines_added = df_claude['lines_added'].mean()
|
|
401
|
+
claude_avg_lines_removed = df_claude['lines_removed'].mean()
|
|
402
|
+
claude_net_lines_added_total = claude_total_lines_added - claude_total_lines_removed
|
|
403
|
+
df_claude['net_lines_added'] = df_claude['lines_added'] - df_claude['lines_removed']
|
|
404
|
+
claude_avg_net_lines_added = df_claude['net_lines_added'].mean()
|
|
405
|
+
|
|
406
|
+
print(f"Claude - Total Lines Added: {claude_total_lines_added}")
|
|
407
|
+
print(f"Claude - Total Lines Removed: {claude_total_lines_removed}")
|
|
408
|
+
print(f"Claude - Net Lines Added (Total): {claude_net_lines_added_total}")
|
|
409
|
+
print(f"Claude - Average Lines Added per Run: {claude_avg_lines_added:.2f}")
|
|
410
|
+
print(f"Claude - Average Lines Removed per Run: {claude_avg_lines_removed:.2f}")
|
|
411
|
+
print(f"Claude - Average Net Lines Added per Run: {claude_avg_net_lines_added:.2f}")
|
|
412
|
+
|
|
413
|
+
report_md += f"- Total Lines Added: {claude_total_lines_added}\n"
|
|
414
|
+
report_md += f"- Total Lines Removed: {claude_total_lines_removed}\n"
|
|
415
|
+
report_md += f"- Net Lines Added (Total): {claude_net_lines_added_total}\n"
|
|
416
|
+
report_md += f"- Lines Added per Run Statistics:\n{df_claude['lines_added'].describe().to_markdown()}\n\n"
|
|
417
|
+
report_md += f"- Lines Removed per Run Statistics:\n{df_claude['lines_removed'].describe().to_markdown()}\n\n"
|
|
418
|
+
report_md += f"- Net Lines Added per Run Statistics:\n{df_claude['net_lines_added'].describe().to_markdown()}\n\n"
|
|
419
|
+
|
|
420
|
+
fig, axes = plt.subplots(1, 2, figsize=(15, 6))
|
|
421
|
+
sns.histplot(df_claude['lines_added'], ax=axes[0], kde=True, color='skyblue')
|
|
422
|
+
axes[0].set_title('Claude: Distribution of Lines Added per Run')
|
|
423
|
+
axes[0].set_xlabel('Lines Added')
|
|
424
|
+
sns.histplot(df_claude['lines_removed'], ax=axes[1], kde=True, color='salmon')
|
|
425
|
+
axes[1].set_title('Claude: Distribution of Lines Removed per Run')
|
|
426
|
+
axes[1].set_xlabel('Lines Removed')
|
|
427
|
+
plt.tight_layout()
|
|
428
|
+
plot_path = OUTPUT_DIR / 'claude_lines_added_removed_dist.png'
|
|
429
|
+
fig.savefig(plot_path)
|
|
430
|
+
plt.close(fig)
|
|
431
|
+
report_md += f"\n\n"
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
# --- Summary and Insights ---
|
|
435
|
+
print("\n--- Summary and Insights ---")
|
|
436
|
+
report_md += "## 4. Summary and Insights\n\n"
|
|
437
|
+
|
|
438
|
+
report_md += "This analysis compared file creation tasks using two approaches, reflected in `claude_creation.csv` (multiple runs) and `PDD_creation.csv` (single process, module breakdown).\n\n"
|
|
439
|
+
|
|
440
|
+
report_md += "**Key Cost Observations:**\n"
|
|
441
|
+
report_md += f"- The total cost for all Claude runs was ${claude_total_cost:.2f} over {claude_num_runs} runs, averaging ${claude_avg_cost_per_run:.2f} per run.\n"
|
|
442
|
+
report_md += f"- The PDD process had a total cost of ${pdd_total_cost:.2f}, distributed across {pdd_num_modules} modules. The overall average cost per module (from 'avg_cost') was ${pdd_avg_cost_per_module:.2f}.\n"
|
|
443
|
+
if claude_total_cost > pdd_total_cost:
|
|
444
|
+
report_md += "- PDD appears more cost-effective in total for the tasks represented in these datasets.\n"
|
|
445
|
+
elif pdd_total_cost > claude_total_cost:
|
|
446
|
+
report_md += "- Claude (sum of runs) appears more cost-effective in total for the tasks represented.\n"
|
|
447
|
+
else:
|
|
448
|
+
report_md += "- The total costs are comparable.\n"
|
|
449
|
+
report_md += "- Note: Claude data represents multiple, possibly distinct, creation tasks, while PDD data is a breakdown of one larger process. Direct cost-per-task comparison is nuanced.\n\n"
|
|
450
|
+
|
|
451
|
+
report_md += "**Key Time Observations (Wall/Total Time):**\n"
|
|
452
|
+
report_md += f"- Claude runs had a total wall duration of {format_seconds_to_hms(claude_total_wall_duration_s)}, with an average of {format_seconds_to_hms(claude_avg_wall_duration_s)} per run.\n"
|
|
453
|
+
report_md += f"- The PDD process had a total execution time of {format_seconds_to_hms(pdd_total_time_s)} (sum of module `total_time`).\n"
|
|
454
|
+
if claude_total_wall_duration_s > pdd_total_time_s:
|
|
455
|
+
report_md += "- PDD was faster in total execution time compared to the sum of Claude's wall durations.\n"
|
|
456
|
+
elif pdd_total_time_s > claude_total_wall_duration_s:
|
|
457
|
+
report_md += "- Claude (sum of wall durations) was faster in total execution time compared to PDD.\n"
|
|
458
|
+
else:
|
|
459
|
+
report_md += "- The total execution times are comparable.\n"
|
|
460
|
+
report_md += "- PDD's `main_editor` module was a significant contributor to its total time and cost. Optimizing such modules could yield substantial improvements.\n\n"
|
|
461
|
+
|
|
462
|
+
report_md += "**Nature of Data:**\n"
|
|
463
|
+
report_md += "- It's crucial to remember that Claude's data represents multiple independent runs, potentially for different specific creation tasks. PDD's data is a breakdown of a single, possibly more complex, integrated process.\n"
|
|
464
|
+
report_md += "- This difference means that 'total' figures for Claude are aggregates of separate events, while for PDD, they represent components of one event.\n\n"
|
|
465
|
+
|
|
466
|
+
report_md += "**Potential Insights & Recommendations:**\n"
|
|
467
|
+
report_md += "- **PDD Efficiency:** For PDD, analyzing modules with high `total_time` and `total_cost` (e.g., `main_editor`, `file_handler`, `edit_tool_impl`) can identify bottlenecks for optimization.\n"
|
|
468
|
+
report_md += "- **Claude Variability:** The distribution of costs and times for Claude runs (if tasks were similar) can indicate variability in performance. If tasks were diverse, it reflects the cost/time for different types of creation jobs.\n"
|
|
469
|
+
report_md += "- **Cost vs. Time Trade-off:** The data can be used to explore cost vs. time trade-offs. For example, PDD's `anthropic_service` has a relatively high average cost but its total time contribution might be justified if it performs critical, complex tasks efficiently.\n"
|
|
470
|
+
report_md += "- **Further Analysis:** If the Claude runs correspond to specific types of files or tasks, segmenting the Claude data by these types could provide more granular insights into its performance characteristics.\n\n"
|
|
471
|
+
|
|
472
|
+
# Save Markdown report
|
|
473
|
+
report_file_path = OUTPUT_DIR / 'creation_analysis_report.md'
|
|
474
|
+
with open(report_file_path, 'w') as f:
|
|
475
|
+
f.write(report_md)
|
|
476
|
+
print(f"\nAnalysis complete. Report saved to: {report_file_path}")
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
if __name__ == "__main__":
|
|
480
|
+
# Set plot style
|
|
481
|
+
sns.set_style("whitegrid")
|
|
482
|
+
plt.rcParams['figure.dpi'] = 100 # Adjust for higher quality plots if needed
|
|
483
|
+
|
|
484
|
+
# Create dummy analysis directory and files if they don't exist, for standalone running
|
|
485
|
+
# This part is for demonstration; in a real scenario, these files would exist.
|
|
486
|
+
if not CLAUDE_CSV_PATH.parent.exists():
|
|
487
|
+
CLAUDE_CSV_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
488
|
+
if not PDD_CSV_PATH.parent.exists(): # Should be same as Claude's parent
|
|
489
|
+
PDD_CSV_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
490
|
+
|
|
491
|
+
# The load_and_preprocess functions now handle using example data if files are not found.
|
|
492
|
+
# So, explicit creation of dummy files here is not strictly necessary for the script to run.
|
|
493
|
+
# However, if you want to test with actual files, create them with the content below:
|
|
494
|
+
|
|
495
|
+
# Example: Create dummy claude_creation.csv if it doesn't exist
|
|
496
|
+
# if not CLAUDE_CSV_PATH.exists():
|
|
497
|
+
# claude_csv_content = """Total Cost,API Duration,Wall Duration,Lines Added,Lines Removed
|
|
498
|
+
# $9.97,35m 28.3s,1h 35m 33.0s,4265,597
|
|
499
|
+
# $10.46,40m 53.6s,1h 31m 13.2s,2011,498
|
|
500
|
+
# $0.2631,1m 12.2s,3m 18.2s,4,4
|
|
501
|
+
# $0.83,5m 52.4s,16m 3.8s,67,6
|
|
502
|
+
# $7.01,39m 56.0s,2h 52m 47.2s,812,295
|
|
503
|
+
# """
|
|
504
|
+
# with open(CLAUDE_CSV_PATH, 'w') as f:
|
|
505
|
+
# f.write(claude_csv_content)
|
|
506
|
+
# print(f"Created dummy {CLAUDE_CSV_PATH}")
|
|
507
|
+
|
|
508
|
+
# Example: Create dummy PDD_creation.csv if it doesn't exist
|
|
509
|
+
# if not PDD_CSV_PATH.exists():
|
|
510
|
+
# pdd_csv_content = """module,avg_time,total_time,avg_cost,total_cost
|
|
511
|
+
# __init__,201.14583333333334,1206.875,0.2806173166666667,1.6837039000000003
|
|
512
|
+
# anthropic_service,394.1295,2364.777,0.4338396916666667,2.60303815
|
|
513
|
+
# caching_manager,131.4565,788.739,0.39087164166666666,2.34522985
|
|
514
|
+
# cli,103.59766666666667,621.586,0.28302760833333335,1.69816565
|
|
515
|
+
# config,268.2664285714286,1877.8650000000002,0.6513883928571429,4.55971875
|
|
516
|
+
# cost_tracker,286.63933333333335,1719.8360000000002,0.3199597583333334,1.9197585500000003
|
|
517
|
+
# edit_tool_impl,361.14799999999997,2166.888,0.5458140000000001,3.2748840000000006
|
|
518
|
+
# file_handler,321.13228571428573,2247.926,0.5430141571428572,3.8010991
|
|
519
|
+
# instruction_parser,286.99983333333336,1721.999,0.3802859916666666,2.2817159499999997
|
|
520
|
+
# main_editor,1645.7278333333334,9874.367,0.5656376916666667,3.3938261499999998
|
|
521
|
+
# prompts,109.875,659.25,0.2954146583333333,1.77248795
|
|
522
|
+
# utils,51.00416666666666,306.025,0.09755935833333333,0.58535615
|
|
523
|
+
# """
|
|
524
|
+
# with open(PDD_CSV_PATH, 'w') as f:
|
|
525
|
+
# f.write(pdd_csv_content)
|
|
526
|
+
# print(f"Created dummy {PDD_CSV_PATH}")
|
|
527
|
+
|
|
528
|
+
perform_analysis()
|