pyphyschemtools 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. pyphyschemtools/Chem3D.py +831 -0
  2. pyphyschemtools/ML.py +42 -0
  3. pyphyschemtools/PeriodicTable.py +289 -0
  4. pyphyschemtools/__init__.py +43 -0
  5. pyphyschemtools/aithermo.py +350 -0
  6. pyphyschemtools/cheminformatics.py +230 -0
  7. pyphyschemtools/core.py +119 -0
  8. pyphyschemtools/icons-logos-banner/Logo_pyPhysChem_border.svg +1109 -0
  9. pyphyschemtools/icons-logos-banner/__init__.py +0 -0
  10. pyphyschemtools/icons-logos-banner/logo.png +0 -0
  11. pyphyschemtools/icons-logos-banner/tools4pyPC_banner.png +0 -0
  12. pyphyschemtools/icons-logos-banner/tools4pyPC_banner.svg +193 -0
  13. pyphyschemtools/kinetics.py +193 -0
  14. pyphyschemtools/resources/css/BrainHalfHalf-120x139.base64 +1 -0
  15. pyphyschemtools/resources/css/BrainHalfHalf-120x139.png +0 -0
  16. pyphyschemtools/resources/css/BrainHalfHalf.base64 +8231 -0
  17. pyphyschemtools/resources/css/BrainHalfHalf.png +0 -0
  18. pyphyschemtools/resources/css/BrainHalfHalf.svg +289 -0
  19. pyphyschemtools/resources/css/visualID.css +325 -0
  20. pyphyschemtools/resources/img/Tranformative_3.webp +0 -0
  21. pyphyschemtools/resources/img/Tranformative_3_banner.png +0 -0
  22. pyphyschemtools/resources/img/pyPhysChem_1.png +0 -0
  23. pyphyschemtools/resources/svg/BrainHalfHalf.png +0 -0
  24. pyphyschemtools/resources/svg/BrainHalfHalf.svg +289 -0
  25. pyphyschemtools/resources/svg/GitHub-Logo-C.png +0 -0
  26. pyphyschemtools/resources/svg/GitHub-Logo.png +0 -0
  27. pyphyschemtools/resources/svg/Logo-Universite-Toulouse-n-2023.png +0 -0
  28. pyphyschemtools/resources/svg/Logo_pyPhysChem_1-translucentBgd-woName.png +0 -0
  29. pyphyschemtools/resources/svg/Logo_pyPhysChem_1-translucentBgd.png +0 -0
  30. pyphyschemtools/resources/svg/Logo_pyPhysChem_1.png +0 -0
  31. pyphyschemtools/resources/svg/Logo_pyPhysChem_1.svg +622 -0
  32. pyphyschemtools/resources/svg/Logo_pyPhysChem_5.png +0 -0
  33. pyphyschemtools/resources/svg/Logo_pyPhysChem_5.svg +48 -0
  34. pyphyschemtools/resources/svg/Logo_pyPhysChem_border.svg +1109 -0
  35. pyphyschemtools/resources/svg/Python-logo-notext.svg +265 -0
  36. pyphyschemtools/resources/svg/Python_logo_and_wordmark.svg.png +0 -0
  37. pyphyschemtools/resources/svg/UT3_logoQ.jpg +0 -0
  38. pyphyschemtools/resources/svg/UT3_logoQ.png +0 -0
  39. pyphyschemtools/resources/svg/Universite-Toulouse-n-2023.svg +141 -0
  40. pyphyschemtools/resources/svg/X.png +0 -0
  41. pyphyschemtools/resources/svg/logoAnaconda.png +0 -0
  42. pyphyschemtools/resources/svg/logoAnaconda.webp +0 -0
  43. pyphyschemtools/resources/svg/logoCNRS.png +0 -0
  44. pyphyschemtools/resources/svg/logoDebut.svg +316 -0
  45. pyphyschemtools/resources/svg/logoEnd.svg +172 -0
  46. pyphyschemtools/resources/svg/logoFin.svg +172 -0
  47. pyphyschemtools/resources/svg/logoPPCL.svg +359 -0
  48. pyphyschemtools/resources/svg/logoPytChem.png +0 -0
  49. pyphyschemtools/resources/svg/logo_lpcno_300_dpi_notexttransparent.png +0 -0
  50. pyphyschemtools/resources/svg/logo_pyPhysChem.png +0 -0
  51. pyphyschemtools/resources/svg/logo_pyPhysChem_0.png +0 -0
  52. pyphyschemtools/resources/svg/logo_pyPhysChem_0.svg +390 -0
  53. pyphyschemtools/resources/svg/logopyPhyschem.png +0 -0
  54. pyphyschemtools/resources/svg/logopyPhyschem_2.webp +0 -0
  55. pyphyschemtools/resources/svg/logopyPhyschem_3.webp +0 -0
  56. pyphyschemtools/resources/svg/logopyPhyschem_4.webp +0 -0
  57. pyphyschemtools/resources/svg/logopyPhyschem_5.png +0 -0
  58. pyphyschemtools/resources/svg/logopyPhyschem_5.webp +0 -0
  59. pyphyschemtools/resources/svg/logopyPhyschem_6.webp +0 -0
  60. pyphyschemtools/resources/svg/logopyPhyschem_7.webp +0 -0
  61. pyphyschemtools/resources/svg/logos-Anaconda-pyPhysChem.png +0 -0
  62. pyphyschemtools/resources/svg/logos-Anaconda-pyPhysChem.svg +58 -0
  63. pyphyschemtools/resources/svg/pyPCBanner.svg +309 -0
  64. pyphyschemtools/resources/svg/pyPhysChem-GitHubSocialMediaTemplate.png +0 -0
  65. pyphyschemtools/resources/svg/pyPhysChem-GitHubSocialMediaTemplate.svg +295 -0
  66. pyphyschemtools/resources/svg/pyPhysChemBanner.png +0 -0
  67. pyphyschemtools/resources/svg/pyPhysChemBanner.svg +639 -0
  68. pyphyschemtools/resources/svg/qrcode-pyPhysChem.png +0 -0
  69. pyphyschemtools/resources/svg/repository-open-graph-template.png +0 -0
  70. pyphyschemtools/spectra.py +451 -0
  71. pyphyschemtools/survey.py +1048 -0
  72. pyphyschemtools/sympyUtilities.py +51 -0
  73. pyphyschemtools/tools4AS.py +960 -0
  74. pyphyschemtools/visualID.py +101 -0
  75. pyphyschemtools/visualID_Eng.py +175 -0
  76. pyphyschemtools-0.1.0.dist-info/METADATA +38 -0
  77. pyphyschemtools-0.1.0.dist-info/RECORD +80 -0
  78. pyphyschemtools-0.1.0.dist-info/WHEEL +5 -0
  79. pyphyschemtools-0.1.0.dist-info/licenses/LICENSE +674 -0
  80. pyphyschemtools-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1048 @@
1
+ ######################################################################
2
+ # Survey
3
+ ######################################################################
4
+ from .visualID_Eng import fg, bg, hl
5
+ from .core import centerTitle, centertxt
6
+
7
+ import os, json, yaml, pandas as pd
8
+ from datetime import datetime
9
+ from IPython.display import display
10
+ from ipywidgets import VBox, HTML, Button, IntSlider, Text, Textarea, Layout, HBox, Dropdown
11
+ from textwrap import wrap
12
+ import numpy as np
13
+ import matplotlib.pyplot as plt
14
+
15
+ class SurveyApp:
16
+ def __init__(self, mode="participant", base_dir="ML-survey"):
17
+ self.mode = mode
18
+ self.base_dir = base_dir
19
+ self.responses_dir = os.path.join(base_dir, "responses")
20
+ self.summary_dir = os.path.join(base_dir, "summary")
21
+ os.makedirs(self.responses_dir, exist_ok=True)
22
+ os.makedirs(self.summary_dir, exist_ok=True)
23
+ self.questions, self.blocks = self.load_questions()
24
+
25
+ def enable_slider_css(self):
26
+ """Inject CSS for hover/active color effects on sliders."""
27
+ from IPython.display import HTML, display
28
+ display(HTML("""
29
+ <style>
30
+ /* Hover: track + rail */
31
+ .jp-InputSlider:hover .MuiSlider-track,
32
+ .jp-InputSlider:hover .MuiSlider-rail {
33
+ background-color: #1E90FF55 !important;
34
+ }
35
+
36
+ /* Hover: thumb */
37
+ .jp-InputSlider:hover .MuiSlider-thumb {
38
+ background-color: #1E90FF !important;
39
+ box-shadow: 0px 0px 4px #1E90FF !important;
40
+ }
41
+
42
+ /* Active: thumb when clicked or dragged */
43
+ .jp-InputSlider .MuiSlider-thumb.Mui-active {
44
+ background-color: #FF4500 !important;
45
+ box-shadow: 0px 0px 6px #FF4500 !important;
46
+ }
47
+ </style>
48
+ """))
49
+
50
+ def get_or_create_user_id(self):
51
+ """Return a persistent anonymous ID (stored in .survey_id)."""
52
+ id_path = os.path.join(self.base_dir, ".survey_id")
53
+
54
+ # If ID file already exists, read it
55
+ if os.path.exists(id_path):
56
+ with open(id_path, "r") as f:
57
+ user_id = f.read().strip()
58
+ if user_id:
59
+ return user_id
60
+
61
+ # Otherwise, create a new one
62
+ import secrets
63
+ user_id = f"UID_{datetime.now().strftime('%Y%m%d')}_{secrets.token_hex(3).upper()}"
64
+ with open(id_path, "w") as f:
65
+ f.write(user_id)
66
+ return user_id
67
+
68
+ def load_questions(self):
69
+ yaml_path = os.path.join(self.base_dir, "survey_questions.yaml")
70
+ with open(yaml_path, "r") as f:
71
+ data = yaml.safe_load(f)
72
+
73
+ questions, blocks = {}, {}
74
+
75
+ for b, v in data["blocks"].items():
76
+ blocks[b] = (v["title"], v["subtitle"])
77
+
78
+ for qid, qinfo in v["questions"].items():
79
+ questions[qid] = {
80
+ "text": qinfo["text"],
81
+ "required": qinfo.get("required", True) # default = required
82
+ }
83
+
84
+ return questions, blocks
85
+
86
+ # === Helper: Print Summary ===
87
+ def print_questions_summary(self):
88
+ """Affiche la liste des questions par bloc et leur type (Numérique/Texte)."""
89
+ print("\n#####################################################")
90
+ print("# RÉPARTITION DES QUESTIONS PAR BLOC #")
91
+ print("#####################################################")
92
+
93
+ num_total, text_total = 0, 0
94
+
95
+ for block_id, (title, subtitle) in self.blocks.items():
96
+ print(f"\n--- {block_id}. {title} ---")
97
+
98
+ num_in_block, text_in_block = 0, 0
99
+
100
+ # Filtre les questions appartenant à ce bloc
101
+ block_questions = {
102
+ qid: qinfo for qid, qinfo in self.questions.items()
103
+ if qid.startswith(block_id)
104
+ }
105
+
106
+ for qid, qinfo in block_questions.items():
107
+ text = qinfo["text"]
108
+
109
+ # Reproduction de la logique de détection des types
110
+ if "(1 =" in text:
111
+ q_type = "NUMÉ(Slider)"
112
+ num_in_block += 1
113
+ else:
114
+ q_type = "TEXTE(Libre)"
115
+ text_in_block += 1
116
+
117
+ print(f" [{qid:4}] {q_type:12} : {text.split('(1 =')[0].strip()}")
118
+
119
+ num_total += num_in_block
120
+ text_total += text_in_block
121
+
122
+ print("\n-----------------------------------------------------")
123
+ print(f"TOTAL : {num_total} questions numériques, {text_total} questions à champ libre.")
124
+ print("-----------------------------------------------------")
125
+
126
+ # === UI Builder ===
127
+ def run(self):
128
+ if self.mode == "participant":
129
+ self.build_participant_form()
130
+ elif self.mode == "admin":
131
+ self.build_admin_dashboard()
132
+
133
+ # === Participant Mode ===
134
+ def build_participant_form(self):
135
+ self.enable_slider_css() # ← inject CSS automatically
136
+ colors = ["#f7f9fc", "#f0f0f0"]
137
+ base_styles = {
138
+ "title": "font-size:18px;font-weight:bold;margin-top:5px;",
139
+ "subtitle": "color:#444;font-style:italic;font-size:13px;margin-bottom:8px;",
140
+ "warn": "color:#CC0000;font-size:12px;font-style:italic;",
141
+ }
142
+
143
+ self.user_id = self.get_or_create_user_id()
144
+ self.full_form = [
145
+ HTML(f"<b>🆔 Your anonymous ID:</b> <code>{self.user_id}</code><br>"
146
+ f"<span style='color:#777;font-size:12px'>(This ID is stored locally in a hidden file .survey_id)</span>")
147
+ ]
148
+ self.input_controls, self.warn_labels = [], []
149
+
150
+ block_index = 0
151
+ for block in self.blocks.keys():
152
+ color = colors[block_index % len(colors)]
153
+ title, subtitle = self.blocks[block]
154
+ header_html = f"""
155
+ <div style='background-color:{color};border:1px solid #ccc;border-radius:8px;padding:15px 20px;margin:12px 0'>
156
+ <div style='{base_styles['title']}color:#1E90FF'>{title}</div>
157
+ <div style='{base_styles['subtitle']}'>{subtitle}</div><div style='margin-left:15px;'>
158
+ """
159
+ footer_html = "</div></div>"
160
+ block_widgets = [HTML(header_html)]
161
+ for q, qinfo in self.questions.items():
162
+ if q.startswith(block): # ← IMPORTANT, à garder absolument
163
+ txt = qinfo["text"]
164
+ required = qinfo["required"]
165
+
166
+ # Affichage + astérisque
167
+ star = "<span style='color:#a00'>*</span>" if required else ""
168
+ block_widgets.append(HTML(f"<b>{txt}</b> {star}"))
169
+
170
+ # Détection slider vs textarea (inchangée)
171
+ if "(1 =" in txt:
172
+ w = IntSlider(
173
+ value=0, min=0, max=5, step=1,
174
+ description='', layout=Layout(width="35%")
175
+ )
176
+ w.slider_behavior = "drag-tap"
177
+ else:
178
+ w = Textarea(
179
+ placeholder="Write your answer here...",
180
+ layout=Layout(width="85%", height="60px")
181
+ )
182
+
183
+ warn = HTML("")
184
+
185
+ # Stockage widget + required
186
+ self.input_controls.append((w, required))
187
+ self.warn_labels.append(warn)
188
+
189
+ # Ajout dans le layout
190
+ block_widgets.extend([w, warn])
191
+ block_widgets.append(HTML(footer_html))
192
+ self.full_form.extend(block_widgets)
193
+ block_index += 1
194
+
195
+ # === Buttons ===
196
+ btn_layout = Layout(width="220px", height="40px", margin="3px 6px 3px 0")
197
+ self.save_button = Button(description="💾 Save draft", button_style="info", layout=btn_layout)
198
+ self.load_button = Button(description="📂 Load selected draft", button_style="warning", layout=btn_layout)
199
+ self.submit_button = Button(description="✅ Submit", button_style="success", layout=btn_layout)
200
+ self.status_label = HTML(value="", layout=Layout(margin="10px 0px"))
201
+ self.draft_status_label = HTML(value="", layout=Layout(margin="5px 0px"))
202
+
203
+ # === Dropdown to select which draft to load ===
204
+ self.draft_dropdown = Dropdown(
205
+ options=self.list_drafts(),
206
+ description="Drafts:",
207
+ layout=Layout(width="70%")
208
+ )
209
+
210
+ self.save_button.on_click(self.save_draft)
211
+ self.load_button.on_click(self.load_draft)
212
+ self.submit_button.on_click(self.submit_form)
213
+
214
+ self.full_form.append(
215
+ VBox([
216
+ self.save_button,
217
+ HBox([self.load_button, self.draft_dropdown]), # ✅ ici à la place de self.load_button
218
+ self.draft_status_label,
219
+ self.submit_button,
220
+ self.status_label
221
+ ])
222
+ )
223
+ display(VBox(self.full_form))
224
+
225
+
226
+ # === Helper: list available drafts ===
227
+ def list_drafts(self):
228
+ if not os.path.exists(self.responses_dir):
229
+ return ["No drafts found"]
230
+ drafts = sorted([f for f in os.listdir(self.responses_dir) if f.endswith(".json")])
231
+ return ["Select a draft to load and then click on the Load Selected Draft button"] + drafts if drafts else ["No drafts found"]
232
+
233
+ # === Actions ===
234
+ def save_draft(self, b):
235
+ data = self._collect_data()
236
+ base_name = f"FallSchool_Draft_{self.user_id}"
237
+ existing = [f for f in os.listdir(self.responses_dir) if f.startswith(base_name)]
238
+ filename = os.path.join(self.responses_dir, f"{base_name}_v{len(existing)+1}.json")
239
+ with open(filename, "w") as f: json.dump(data, f, indent=2)
240
+ self.status_label.value = f"<div style='background:#fff4e5;color:#b35900;padding:6px;border:1px solid #b35900;border-radius:6px'>💾 Draft saved as <code>{os.path.basename(filename)}</code></div>"
241
+ self.draft_dropdown.options = self.list_drafts()
242
+
243
+ def load_draft(self, b):
244
+ selected = self.draft_dropdown.value
245
+ # --- Sécurité : rien sélectionné ou placeholder ---
246
+ if not selected or selected.startswith("Select") or selected.startswith("No drafts"):
247
+ self.status_label.value = (
248
+ "<div style='color:#a00'>⚠ Please select a valid draft from the dropdown.</div>"
249
+ )
250
+ return
251
+ filename = os.path.join(self.responses_dir, selected)
252
+
253
+ with open(filename, "r") as f:
254
+ data = json.load(f)
255
+
256
+ if "id" in data:
257
+ self.user_id = data["id"]
258
+
259
+ for i, (q, _) in enumerate(self.questions.items()):
260
+ if q in data:
261
+ w, required = self.input_controls[i]
262
+ val = data[q]
263
+ if isinstance(w, IntSlider): w.value = int(val)
264
+ else: w.value = str(val)
265
+ self.status_label.value = (f"<div style='background:#fff4e5;color:#b35900;padding:6px;"
266
+ f"border:1px solid #b35900;border-radius:6px'>📂 Loaded "
267
+ f"{os.path.basename(filename)}</div>")
268
+
269
+ def submit_form(self, b):
270
+ incomplete = False
271
+ data = {}
272
+
273
+ for i, (q, _) in enumerate(self.questions.items()):
274
+ w, required = self.input_controls[i]
275
+ val = w.value
276
+ warn_label = self.warn_labels[i] # 🔴 label d’avertissement sous chaque question
277
+
278
+ # --- Vérification des sliders ---
279
+ if isinstance(w, IntSlider):
280
+ if required and val == 0:
281
+ warn_label.value = (
282
+ "<span style='color:#a00;font-size:12px;font-style:italic;'>⚠ Please answer this question.</span>"
283
+ )
284
+ w.style.handle_color = "red"
285
+ incomplete = True
286
+ else:
287
+ warn_label.value = ""
288
+ w.style.handle_color = None
289
+ data[q] = int(val)
290
+
291
+ # --- Vérification des champs texte ---
292
+ else:
293
+ if required and not str(val).strip():
294
+ warn_label.value = (
295
+ "<span style='color:#a00;font-size:12px;font-style:italic;'>⚠ Please provide an answer.</span>"
296
+ )
297
+ incomplete = True
298
+ else:
299
+ warn_label.value = ""
300
+ data[q] = val
301
+
302
+ data["id"] = getattr(self, "user_id", "Anonymous")
303
+
304
+ # === Si des réponses manquent ===
305
+ if incomplete:
306
+ self.status_label.value = (
307
+ "<div style='background:#ffe6e6;color:#a00;border:1px solid #a00;"
308
+ "padding:8px;border-radius:6px;'>❌ Some questions are missing. "
309
+ "Please check the red warnings above.</div>"
310
+ )
311
+ return
312
+
313
+ # === Si tout est rempli ===
314
+ filename = os.path.join(
315
+ self.responses_dir,
316
+ f"Response_{data['id']}_{datetime.now().strftime('%Y%m%d_%H%M')}.csv"
317
+ )
318
+ pd.DataFrame([data]).to_csv(filename, index=False)
319
+ self.status_label.value = (
320
+ f"<div style='background:#e6ffe6;color:#060;border:1px solid #060;"
321
+ f"padding:8px;border-radius:6px;'>✅ Response saved to "
322
+ f"<code>{os.path.basename(filename)}</code></div>"
323
+ )
324
+
325
+
326
+ def _collect_data(self):
327
+ data = {}
328
+ for q, (w, required) in zip(self.questions.keys(), self.input_controls):
329
+ data[q] = w.value
330
+ data["id"] = self.user_id
331
+ return data
332
+
333
+ # === Admin mode ===================================================================================
334
+ #=== Helper
335
+ # === Admin mode ===================================================================================
336
+
337
+ def plot_spider_multi(self, df, title="Participant and Mean Scores per Block", savepath=None, figsize=(12,8)):
338
+ """
339
+ Draw radar (spider) chart with per-participant transparency
340
+ and block names instead of A–F.
341
+ """
342
+
343
+ # --- Compute averages ---
344
+ avg = df.mean(axis=0)
345
+ labels = avg.index.tolist()
346
+ N = len(labels)
347
+ angles = np.linspace(0, 2 * np.pi, N, endpoint=False).tolist()
348
+ angles += [angles[0]]
349
+
350
+ # === Replace A–F with block titles ===
351
+ # → only use the first sentence (shortened title)
352
+ label_map = {b: self.blocks[b][0].replace(f"Block {b}. ", "") for b in self.blocks.keys()}
353
+ display_labels = [label_map.get(lbl, lbl) for lbl in labels]
354
+
355
+ # === Auto linebreak: split labels into two roughly equal parts ===
356
+ def split_label(text):
357
+ words = text.split()
358
+ if len(words) <= 2:
359
+ return text
360
+ mid = len(words) // 2
361
+ return " ".join(words[:mid]) + "\n" + " ".join(words[mid:])
362
+
363
+ display_labels = [split_label(lbl) for lbl in display_labels]
364
+
365
+ # --- Create figure ---
366
+ fig, ax = plt.subplots(figsize=figsize, subplot_kw=dict(polar=True))
367
+
368
+ # --- Plot all participants ---
369
+ for i in range(len(df)):
370
+ values = df.iloc[i].values.tolist()
371
+ values += [values[0]]
372
+ ax.plot(angles, values, linewidth=1, alpha=0.25, color="gray")
373
+ ax.fill(angles, values, alpha=0.05, color="gray")
374
+
375
+ # --- Mean polygon ---
376
+ mean_values = avg.values.tolist() + [avg.values[0]]
377
+ ax.plot(angles, mean_values, color='navy', linewidth=2.5)
378
+ ax.fill(angles, mean_values, color='navy', alpha=0.25)
379
+
380
+ # --- Axis style ---
381
+ ax.set_xticks(angles[:-1])
382
+ ax.set_xticklabels(display_labels, fontsize=11, fontweight='bold', wrap=True)
383
+ ax.set_yticks([1,2,3,4,5])
384
+ ax.set_yticklabels(["1","2","3","4","5"], fontsize=10, fontweight='bold', color="gray")
385
+ ax.set_ylim(0,5)
386
+ ax.set_title(title, size=14, weight='bold', pad=25)
387
+
388
+ # --- Grid and outer circle ---
389
+ ax.grid(True, linestyle='--', color='gray', alpha=0.4, linewidth=0.8)
390
+ ax.spines['polar'].set_visible(False) # remove the black frame
391
+ outer_circle = plt.Circle((0,0), 5, transform=ax.transData._b, fill=False, lw=5, color="red", alpha=0.4)
392
+ ax.add_artist(outer_circle)
393
+
394
+ plt.tight_layout()
395
+
396
+ # --- Save plot if requested ---
397
+ if savepath:
398
+ plt.savefig(savepath, dpi=300, bbox_inches='tight')
399
+ print(f"💾 Saved radar plot to {savepath}")
400
+
401
+ plt.show()
402
+
403
+ def summarize_by_block(self, df):
404
+ """Compute average score per block (A–F) for numeric questions."""
405
+ import re
406
+ num_df = df.select_dtypes(include=["number"])
407
+ block_means = {}
408
+ for col in num_df.columns:
409
+ match = re.match(r"([A-F])\d+", col)
410
+ if match:
411
+ block = match.group(1)
412
+ block_means.setdefault(block, []).append(num_df[col])
413
+ # Mean per block (ignores missing NaN)
414
+ block_avg = {b: pd.concat(cols, axis=1).mean(axis=1) for b, cols in block_means.items()}
415
+ return pd.DataFrame(block_avg)
416
+
417
+
418
+ ############################################################
419
+ # 🔍 TEXTUAL & SEMANTIC ANALYSIS METHODS
420
+ ############################################################
421
+
422
+ def load_all_responses(self):
423
+ """Load and merge all .csv survey responses into a DataFrame."""
424
+ import pandas as pd, os
425
+ files = [f for f in os.listdir(self.responses_dir) if f.endswith(".csv")]
426
+ if not files:
427
+ print("⚠ No responses found.")
428
+ return None
429
+ df = pd.concat([pd.read_csv(os.path.join(self.responses_dir, f)) for f in files], ignore_index=True)
430
+ df.reset_index(drop=True, inplace=True)
431
+ print(f"✅ Loaded {len(df)} responses ({len(df.columns)} columns)")
432
+ return df
433
+
434
+
435
+ def analyze_text_columns(self, df=None, columns=None, top_n=20):
436
+ """
437
+ Basic textual analysis: show frequent words, word clouds, and per-question summary.
438
+ """
439
+ import matplotlib.pyplot as plt
440
+ from sklearn.feature_extraction.text import CountVectorizer
441
+ from wordcloud import WordCloud
442
+ import pandas as pd
443
+ import os
444
+
445
+ if df is None:
446
+ df = self.load_all_responses()
447
+ if df is None:
448
+ return
449
+
450
+ # auto-detect textual columns if not provided
451
+ if columns is None:
452
+ columns = [c for c in df.columns if df[c].dtype == 'object']
453
+ if not columns:
454
+ print("⚠ No text columns found.")
455
+ return
456
+
457
+ os.makedirs(self.summary_dir, exist_ok=True)
458
+ print(f"🧩 Textual questions detected: {columns}")
459
+
460
+ for col in columns:
461
+ texts = df[col].dropna().astype(str)
462
+ if len(texts) == 0:
463
+ continue
464
+
465
+ # vectorize text
466
+ vectorizer = CountVectorizer(stop_words='english')
467
+ X = vectorizer.fit_transform(texts)
468
+ word_freq = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names_out()).sum().sort_values(ascending=False)
469
+
470
+ # show top words
471
+ print(f"\n📝 Top {top_n} words for '{col}':")
472
+ display(word_freq.head(top_n))
473
+
474
+ # wordcloud
475
+ wc = WordCloud(width=800, height=400, background_color='white').generate(" ".join(texts))
476
+ plt.figure(figsize=(8, 4))
477
+ plt.imshow(wc, interpolation="bilinear")
478
+ plt.axis("off")
479
+ plt.title(f"Word Cloud: {col}")
480
+ savepath = os.path.join(self.summary_dir, f"WordCloud_{col}.png")
481
+ plt.savefig(savepath, dpi=300, bbox_inches="tight")
482
+ print(f"💾 Saved {savepath}")
483
+ plt.show()
484
+
485
+
486
+ def semantic_analysis(self, df=None, columns=None, n_clusters=3):
487
+ """
488
+ Perform semantic clustering on open-ended responses using sentence-transformers.
489
+ """
490
+ from sentence_transformers import SentenceTransformer
491
+ from sklearn.cluster import KMeans
492
+ import matplotlib.pyplot as plt
493
+ import umap
494
+ import numpy as np
495
+ import os
496
+
497
+ if df is None:
498
+ df = self.load_all_responses()
499
+ if df is None:
500
+ return
501
+
502
+ if columns is None:
503
+ columns = [c for c in df.columns if df[c].dtype == 'object']
504
+ texts = []
505
+ meta = []
506
+ for col in columns:
507
+ for t in df[col].dropna():
508
+ texts.append(str(t))
509
+ meta.append(col)
510
+
511
+ if len(texts) < 2:
512
+ print("⚠ Not enough text to perform semantic analysis.")
513
+ return
514
+
515
+ print(f"🧠 Encoding {len(texts)} responses from {len(columns)} text questions...")
516
+ model = SentenceTransformer('all-MiniLM-L6-v2')
517
+ embeddings = model.encode(texts)
518
+
519
+ reducer = umap.UMAP(random_state=0)
520
+ emb_2d = reducer.fit_transform(embeddings)
521
+
522
+ kmeans = KMeans(n_clusters=n_clusters, random_state=0)
523
+ labels = kmeans.fit_predict(embeddings)
524
+
525
+ plt.figure(figsize=(8, 6))
526
+ plt.scatter(emb_2d[:, 0], emb_2d[:, 1], c=labels, cmap='tab10', alpha=0.7)
527
+ plt.title("Semantic Clusters of Open Responses", fontsize=14, weight='bold')
528
+ for i, (x, y) in enumerate(emb_2d):
529
+ plt.text(x, y, meta[i], fontsize=8, alpha=0.6)
530
+ plt.tight_layout()
531
+
532
+ savepath = os.path.join(self.summary_dir, "SemanticClusters.png")
533
+ plt.savefig(savepath, dpi=300, bbox_inches="tight")
534
+ print(f"💾 Saved semantic clustering plot to {savepath}")
535
+ plt.show()
536
+
537
+
538
+ def build_admin_dashboard(self):
539
+
540
+ # === APPEL AJOUTÉ ICI ! ===
541
+ self.print_questions_summary()
542
+
543
+ # === Load all responses ===
544
+ df = self.load_all_responses()
545
+ if df is None:
546
+ return
547
+
548
+ # --- CODE POUR SAUVEGARDER EN EXCEL ---
549
+ excel_path = os.path.join(self.summary_dir, f"All_Responses_{datetime.now().strftime('%Y%m%d_%H%M')}.xlsx")
550
+ df.to_excel(excel_path, index=False)
551
+ print(f"✅ Saved all responses to Excel: {excel_path}")
552
+ # ---------------------------------------------
553
+
554
+ display(HTML("<h4>📊 All collected responses</h3>"))
555
+ display(df)
556
+
557
+ # === Summary statistics ===
558
+ display(HTML("<h4>📈 Summary statistics</h4>"))
559
+ display(df.describe())
560
+
561
+ # === Missing values report ===
562
+ html_summary = "<h4>🕳 Missing values per column:</h4><div style='font-family:monospace;font-size:14px;'>"
563
+ missing = df.isna().sum()
564
+ for col, val in missing.items():
565
+ if val > 0:
566
+ html_summary += f"<span style='color:red;font-weight:bold;'>{col}={val}</span> | "
567
+ else:
568
+ html_summary += f"{col}=0 | "
569
+ html_summary = html_summary.rstrip(" | ") + "</div>"
570
+ display(HTML(html_summary))
571
+
572
+ # === 🧩 Textual analysis ===
573
+ text_cols = [c for c in df.columns if df[c].dtype == 'object' and c not in ['id']]
574
+ if text_cols:
575
+ display(HTML("<h4>🧠 Textual Analysis</h4>"))
576
+ self.analyze_text_columns(df=df, columns=text_cols, top_n=15)
577
+ else:
578
+ print("ℹ️ No open-ended text columns found for analysis.")
579
+
580
+ # 🕸 Radar plot
581
+ block_avg_df = self.summarize_by_block(df)
582
+ self.plot_spider_multi(
583
+ block_avg_df,
584
+ title="",
585
+ savepath=os.path.join(self.summary_dir, "Radar_BlockScores.png")
586
+ )
587
+
588
+ # === 🧭 Semantic map of text answers ===
589
+ display(HTML("<h4>🧭 Semantic Clustering Map</h4>"))
590
+ try:
591
+ self.semantic_analysis(df=df, columns=text_cols, n_clusters=4)
592
+ except Exception as e:
593
+ print(f"⚠️ Skipped semantic clustering (reason: {e})")
594
+
595
+ display(HTML(
596
+ "<h4>✅ Dashboard summary saved in:</h4>"
597
+ f"<code>{os.path.abspath(self.summary_dir)}</code>"
598
+ ))
599
+
600
+ ############################################################
601
+ # Absorption spectra
602
+ ############################################################
603
+
604
+ import numpy as np
605
+ import matplotlib.pyplot as plt
606
+ import scipy.constants as sc
607
+
608
+ class SpectrumSimulator:
609
+
610
+ def __init__(self, sigma_ev=0.3, plotWH=(12,8), \
611
+ fontSize_axisText=14, fontSize_axisLabels=14, fontSize_legends=12,
612
+ fontsize_peaks=12,
613
+ colorS='#3e89be',colorVT='#469cd6'
614
+ ):
615
+ """
616
+ Initializes the spectrum simulator
617
+
618
+ Args:
619
+ - sigma_ev (float): Gaussian half-width at half-maximum in electron-volts (eV).
620
+ Default is 0.3 eV (GaussView default is 0.4 eV).
621
+ - plotWH (tuple(int,int)): Width and Height of the matplotlib figures in inches. Default is (12,8).
622
+ - colorS: color of the simulated spectrum (default ='#3e89be')
623
+ - colorVT: color of the vertical transition line (default = '#469cd6')
624
+
625
+ Returns:
626
+ None: This method initializes the instance attributes.
627
+ Calculates:
628
+ - sigmanm = half-width of the Gaussian band, in nm
629
+ """
630
+ self.sigma_ev = sigma_ev
631
+ # Conversion constante eV -> nm sigma
632
+ self.ev2nm_const = (sc.h * sc.c) * 1e9 / sc.e
633
+ self.sigmanm = self.ev2nm_const / self.sigma_ev
634
+ self.plotW = plotWH[0]
635
+ self.plotH = plotWH[1]
636
+ self.colorS = colorS
637
+ self.colorVT = colorVT
638
+ self.fig = None
639
+ self.graph = None
640
+ self.fontSize_axisText = fontSize_axisText
641
+ self.fontSize_axisLabels = fontSize_axisLabels
642
+ self.fontSize_legends = fontSize_legends
643
+ self.fontsize_peaks = fontsize_peaks
644
+
645
+ print(f"sigma = {sigma_ev} eV -> sigmanm = {self.sigmanm:.1f} nm")
646
+
647
+ def _initializePlot(self):
648
+ fig, graph = plt.subplots(figsize=(self.plotW,self.plotH))
649
+ plt.subplots_adjust(wspace=0)
650
+ plt.xticks(fontsize=self.fontSize_axisText,fontweight='bold')
651
+ plt.yticks(fontsize=self.fontSize_axisText,fontweight='bold')
652
+ return fig, graph
653
+
654
+ def _calc_epsiG(self,lambdaX,lambdai,fi):
655
+ '''
656
+ calculates a Gaussian band shape around a vertical transition
657
+ input:
658
+ - lambdaX = wavelength variable, in nm
659
+ - lambdai = vertical excitation wavelength for i_th state, in nm
660
+ - fi = oscillator strength for state i (dimensionless)
661
+ output :
662
+ molar absorption coefficient, in L mol-1 cm-1
663
+ '''
664
+ import scipy.constants as sc
665
+ import numpy as np
666
+ c = sc.c*1e2 #cm-1
667
+ NA = sc.N_A #mol-1
668
+ me = sc.m_e*1000 #g
669
+ e = sc.e*sc.c*10 #elementary charge in esu
670
+ pf = np.sqrt(np.pi)*e**2*NA/(1000*np.log(10)*c**2*me)
671
+ nubarX = 1e7 / lambdaX # nm to cm-1
672
+ nubari = 1e7 / lambdai
673
+ sigmabar = 1e7 / self.sigmanm
674
+ epsi = pf * (fi / sigmabar) * np.exp(-((nubarX - nubari)/sigmabar)**2)
675
+ return epsi
676
+
677
+ def _Absorbance(self,eps,opl,cc):
678
+ '''
679
+ Calculates the Absorbance with the Beer-Lambert law
680
+ input:
681
+ - eps = molar absorption coefficient, in L mol-1 cm-1
682
+ - opl = optical path length, in cm
683
+ - cc = concentration of the attenuating species, in mol.L-1
684
+ output :
685
+ Absorbance, A (dimensionless)
686
+ '''
687
+ return eps*opl*cc
688
+
689
+ def _sumStatesWithGf(self,wavel,wavelTAB,feTAB):
690
+ '''
691
+ '''
692
+ import numpy as np
693
+ sumInt = np.zeros(len(wavel))
694
+ for l in wavel:
695
+ for i in range(len(wavelTAB)):
696
+ sumInt[np.argwhere(l==wavel)[0][0]] += self._calc_epsiG(l,wavelTAB[i],feTAB[i])
697
+ return sumInt
698
+
699
+ def _FindPeaks(self,sumInt,height,prom=1):
700
+ '''
701
+ Finds local maxima within the spectrum based on height and prominence.
702
+
703
+ Prominence is crucial when switching between linear and logarithmic scales:
704
+ - In Linear mode: A large prominence (e.g., 1 to 1000) filters out noise.
705
+ - In Log mode: Data is compressed into a range of ~0 to 5. A large
706
+ prominence will 'hide' real peaks. A smaller value (0.01 to 0.1)
707
+ is required to detect shoulders and overlapping bands.
708
+
709
+ Input:
710
+ - sumInt: Array of intensities (Epsilon or Absorbance).
711
+ - height: Minimum height a peak must reach to be considered.
712
+ - prom: Required vertical distance between the peak and its lowest contour line.
713
+
714
+ Returns:
715
+ - PeakIndex: Indices of the detected peaks in the wavelength array.
716
+ - PeakHeight: The intensity values at these peak positions.
717
+ '''
718
+ from scipy.signal import find_peaks
719
+ peaks = find_peaks(sumInt, height = height, threshold = None, distance = 1, prominence=prom)
720
+ PeakIndex = peaks[0]
721
+ # Check if 'peak_heights' exists in the properties dictionary
722
+ if 'peak_heights' in peaks[1]:
723
+ PeakHeight = peaks[1]['peak_heights']
724
+ else:
725
+ # If height=None, we extract values manually from the input data
726
+ PeakHeight = sumInt[PeakIndex]
727
+ return PeakIndex,PeakHeight
728
+
729
+ def _FindShoulders(self, data, tP):
730
+ '''
731
+ ###not working
732
+ Detects shoulders using the second derivative.
733
+ A shoulder appears as a peak in the negative second derivative.
734
+
735
+ Note on scales:
736
+ - If ylog is True: data should be log10(sumInt) and tP should be log10(tP).
737
+ The second derivative on log data is much more sensitive to subtle
738
+ inflection points in weak transitions (like n -> pi*).
739
+ - If ylog is False: data is linear sumInt and tP is linear.
740
+
741
+ Returns:
742
+ - shoulder_idx (ndarray): Array of indices where shoulders were found.
743
+ - shoulder_heights (ndarray): The intensity values at these positions
744
+ extracted from the input data.
745
+ '''
746
+ import numpy as np
747
+ # Calculate the second derivative (rate of change of the slope)
748
+ d2 = np.gradient(np.gradient(data))
749
+
750
+ # We search for peaks in the opposite of the second derivative (-d2).
751
+ # A local maximum in -d2 corresponds to a point of maximum curvature
752
+ # (inflection), which identifies a shoulder.
753
+ # We use a very low prominence threshold to capture subtle inflections.
754
+ shoulder_idx, _ = self._FindPeaks(-d2, height=None, prom=0.0001)
755
+ shoulder_heights = data[shoulder_idx]
756
+ print(shoulder_idx, shoulder_heights )
757
+
758
+ return shoulder_idx, shoulder_heights
759
+
760
+ def _pickPeak(self,wavel,peaksIndex,peaksH,color,\
761
+ shift=500,height=500,posAnnotation=200, ylog=False):
762
+ '''
763
+ Annotates peaks with a small vertical tick and the wavelength value.
764
+ Adjusts offsets based on whether the plot is in log10 scale or linear.
765
+ In log mode, peaksH must already be log10 values.
766
+ '''
767
+ s=shift
768
+ h=height
769
+ a=posAnnotation
770
+
771
+
772
+ for i in range(len(peaksIndex)):
773
+ x = wavel[peaksIndex[i]]
774
+ y = peaksH[i]
775
+ if ylog:
776
+ # In log scale, we use multipliers to keep the same visual distance
777
+ # 1.1 means "10% above the peak"
778
+ # Adjust these factors based on your preference
779
+ y_s = y * 1.1
780
+ y_h = y * 1.3
781
+ y_a = y * 1.5
782
+ self.graph.vlines(x, y_s, y_h, colors=color, linestyles='solid')
783
+ self.graph.annotate(f"{x:.1f}",xy=(x,y),xytext=(x,y_a),rotation=90,size=self.fontsize_peaks,ha='center',va='bottom', color=color)
784
+ else:
785
+ # Classic linear offsets
786
+ self.graph.vlines(x, y+s, y+s+h, colors=color, linestyles='solid')
787
+ self.graph.annotate(f"{x:.1f}",xy=(x,y),xytext=(x,y+s+h+a),rotation=90,size=self.fontsize_peaks,ha='center',va='bottom',color=color)
788
+ return
789
+
790
+ def _setup_axes(self, lambdamin, lambdamax, ymax, ylabel="Absorbance"):
791
+ self.graph.set_xlabel('wavelength / nm', size=self.fontSize_axisLabels, fontweight='bold', color='#2f6b91')
792
+ self.graph.set_ylabel(ylabel, size=self.fontSize_axisLabels, fontweight='bold', color='#2f6b91')
793
+ self.graph.set_xlim(lambdamin, lambdamax)
794
+ self.graph.set_ylim(0, ymax)
795
+ self.graph.tick_params(axis='both', labelsize=self.fontSize_axisText,labelcolor='black')
796
+ for tick in self.graph.xaxis.get_majorticklabels(): tick.set_fontweight('bold') #it is both powerful
797
+ # (you can specify the type of a specific tick) and annoying
798
+ for tick in self.graph.yaxis.get_majorticklabels(): tick.set_fontweight('bold')
799
+
800
+ def plotTDDFTSpectrum(self,wavel,sumInt,wavelTAB,feTAB,tP,ylog,labelSpectrum,colorS='#0000ff',colorT='#0000cf'):
801
+
802
+ '''
803
+ Called by plotEps_lambda_TDDFT. Plots a single simulated UV-Vis spectrum, i.e. after
804
+ gaussian broadening, together with the TDDFT vertical transitions (i.e. plotted as lines)
805
+
806
+ input:
807
+ - wavel = array of gaussian-broadened wavelengths, in nm
808
+ - sumInt = corresponding molar absorptiopn coefficients, in L. mol-1 cm-1
809
+ - wavelTAB = wavelength of TDDFT, e.g. discretized, transitions
810
+ - ylog = log plot of epsilon
811
+ - tP: threshold for finding the peaks
812
+ - feTAB = TDDFT oscillator strength for each transition of wavelTAB
813
+ - labelSpectrum = title for the spectrum
814
+ '''
815
+
816
+ # # --- DEBUG START ---
817
+ # if ylog:
818
+ # print(f"\n--- DEBUG LOG MODE ---")
819
+ # print(f"Max sumInt (linear): {np.max(sumInt):.2f}")
820
+ # print(f"Max sumInt (log10): {np.log10(max(np.max(sumInt), 1e-5)):.2f}")
821
+ # # --- DEBUG END ---
822
+ if ylog:
823
+ # Apply safety floor to the entire array
824
+ self.graph.set_yscale('log')
825
+ ymin_val = 1.0 # Epsilon = 1
826
+ else:
827
+ self.graph.set_yscale('linear')
828
+ ymin_val = 0
829
+
830
+ # vertical lines
831
+ for i in range(len(wavelTAB)):
832
+ val_eps = self._calc_epsiG(wavelTAB[i],wavelTAB[i],feTAB[i])
833
+ self.graph.vlines(x=wavelTAB[i], ymin=ymin_val, ymax=max(val_eps, ymin_val), colors=colorT)
834
+
835
+ self.graph.plot(wavel,sumInt,linewidth=3,linestyle='-',color=colorS,label=labelSpectrum)
836
+
837
+ self.graph.legend(fontsize=self.fontSize_legends)
838
+ if ylog:
839
+ # Use log-transformed data and log-transformed threshold
840
+ # Clipping tP to 1e-5 ensures we don't take log of 0 or negative
841
+ tPlog = np.log10(max(tP, 1e-5))
842
+ # prom=0.05 allows detection of peaks that are close in log-magnitude
843
+ peaks, peaksH_log = self._FindPeaks(np.log10(np.clip(sumInt, 1e-5, None)), tPlog, prom=0.05)
844
+ peaksH = 10**peaksH_log
845
+ # shoulders, shouldersH_log = self._FindShoulders(np.log10(np.clip(sumInt, 1e-5, None)), tPlog)
846
+ # all_idx = np.concatenate((peaks, shoulders))
847
+ # allH_log = np.concatenate((peaksH_log, shouldersH_log))
848
+ # allH = 10**allH_log
849
+ else:
850
+ peaks, peaksH = self._FindPeaks(sumInt,tP)
851
+ # shoulders, shouldersH = self._FindShoulders(wavel, sumInt, tP)
852
+ # all_idx = np.concatenate((peaks, shoulders))
853
+ # allH = np.concatenate((peaksH, shouldersH))
854
+ self._pickPeak(wavel,peaks,peaksH,colorS,500,500,200,ylog)
855
+
856
+
857
+ def plotEps_lambda_TDDFT(self,datFile,lambdamin=200,lambdamax=800,\
858
+ epsMax=None, titles=None, tP = 10, \
859
+ ylog=False,\
860
+ filename=None):
861
+ '''
862
+ Plots a TDDFT VUV simulated spectrum (vertical transitions and transitions summed with gaussian functions)
863
+ between lambdamin and lambdamax (sum of states done in the range [lambdamin-50, lambdamlax+50] nm)
864
+ input:
865
+ - datFile: list of pathway/names to "XXX_ExcStab.dat" files generated by 'GParser Gaussian.log -S'
866
+ - lambdamin, lambdamax: plot range
867
+ - epsMax: y axis graph limit
868
+ - titles: list of titles (1 per spectrum plot)
869
+ - tP: threshold for finding the peaks (default = 10 L. mol-1 cm-1)
870
+ - ylog: y logarithmic axis (default: False).
871
+ - save: saves in a png file (300 dpi) if True (default = False)
872
+ - filename: saves figure in a 300 dpi png file if not None (default), with filename=full pathway
873
+ '''
874
+ if self.fig is not None:
875
+ graph = self.graph
876
+ fig = self.fig
877
+ lambdamin = self.lambdamin
878
+ lambdamax = self.lambdamax
879
+ epsMax = self.epsMax
880
+ else:
881
+ fig, graph = self._initializePlot()
882
+
883
+ graph.set_prop_cycle(None)
884
+
885
+ if self.fig is None:
886
+ self.fig = fig
887
+ self.graph = graph
888
+ self.lambdamin = lambdamin
889
+ self.lambdamax = lambdamax
890
+ self.epsMax = epsMax
891
+
892
+ graph.set_xlabel('wavelength / nm',size=self.fontSize_axisLabels,fontweight='bold',color='#2f6b91')
893
+
894
+ graph.set_xlim(lambdamin,lambdamax)
895
+
896
+ import matplotlib.ticker as ticker
897
+ graph.xaxis.set_major_locator(ticker.MultipleLocator(50)) # sets a tick for every integer multiple of the base (here 250) within the view interval
898
+
899
+ istate,state,wavel,fe,SSq = np.genfromtxt(datFile,skip_header=1,dtype="<U20,<U20,float,float,<U20",unpack=True)
900
+ wavel = np.array(wavel)
901
+ fe = np.array(fe)
902
+ if wavel.size == 1:
903
+ wavel = np.array([wavel])
904
+ fe = np.array([fe])
905
+ wvl = np.arange(lambdamin-50,lambdamax+50,1)
906
+ sumInt = self._sumStatesWithGf(wvl,wavel,fe)
907
+ self.plotTDDFTSpectrum(wvl,sumInt,wavel,fe,tP,ylog,titles,self.colorS,self.colorVT)
908
+ if ylog:
909
+ graph.set_ylabel('log(molar absorption coefficient / L mol$^{-1}$ cm$^{-1})$',size=self.fontSize_axisLabels,fontweight='bold',color='#2f6b91')
910
+ graph.set_ylim(1, epsMax * 5 if epsMax else None)
911
+ else:
912
+ graph.set_yscale('linear')
913
+ graph.set_ylabel('molar absorption coefficient / L mol$^{-1}$ cm$^{-1}$',size=self.fontSize_axisLabels,fontweight='bold',color='#2f6b91')
914
+ graph.set_ylim(0, epsMax if epsMax else np.max(sumInt)*1.18)
915
+ if filename is not None: self.fig.savefig(filename, dpi=300, bbox_inches='tight')
916
+ plt.show()
917
+
918
+ peaksI, peaksH = self._FindPeaks(sumInt,tP)
919
+ print(f"{bg.LIGHTREDB}{titles}{bg.OFF}")
920
+ for i in range(len(peaksI)):
921
+ print(f"peak {i:3}. {wvl[peaksI[i]]:4} nm. epsilon_max = {peaksH[i]:.1f} L mol-1 cm-1")
922
+ if ylog:
923
+ print()
924
+ # prom=0.05 allows detection of peaks that are close in log-magnitude
925
+ peaksI, peaksH = self._FindPeaks(np.log10(np.clip(sumInt, 1e-5, None)), np.log10(max(tP, 1e-5)), prom=0.05)
926
+ for i in range(len(peaksI)):
927
+ print(f"peak {i:3}. {wvl[peaksI[i]]:4} nm. log10(epsilon_max) = {peaksH[i]:.1f}")
928
+
929
+ def plotAbs_lambda_TDDFT(self, datFiles=None, C0=1e-5, lambdamin=200, lambdamax=800, Amax=2.0,\
930
+ titles=None, linestyles=[], annotateP=[], tP = 0.1,\
931
+ resetColors=False,\
932
+ filename=None):
933
+ '''
934
+ Plots a simulated TDDFT VUV absorbance spectrum (transitions summed with gaussian functions)
935
+ between lambdamin and lambdamax (sum of states done in the range [lambdamin-50, lambdamlax+50] nm)
936
+ input:
937
+ - datFiles: list of pathway/name to files generated by 'GParser Gaussian.log -S'
938
+ - C0: list of concentrations needed to calculate A = epsilon x l x c (in mol.L-1)
939
+ - lambdamin, lambdamax: plot range (x axis)
940
+ - Amax: y axis graph limit
941
+ - titles: list of titles (1 per spectrum plot)
942
+ - linestyles: list of line styles(default = "-", i.e. a continuous line)
943
+ - annotateP: list of Boolean (annotate lambda max True or False. Default = True)
944
+ - tP: threshold for finding the peaks (default = 0.1)
945
+ - resetColors (bool): If True, resets the matplotlib color cycle
946
+ to the first color. This allows different series
947
+ (e.g., gas phase vs. solvent) to share the same
948
+ color coding for each molecule across multiple calls. Default: False
949
+ - save: saves in a png file (300 dpi) if True (default = False)
950
+ - filename: saves figure in a 300 dpi png file if not None (default), with filename=full pathway
951
+ '''
952
+
953
+ if self.fig is None:
954
+ fig, graph = self._initializePlot()
955
+ self.fig = fig
956
+ self.graph = graph
957
+ self.lambdamin = lambdamin
958
+ self.lambdamax = lambdamax
959
+ self.Amax = Amax
960
+ else:
961
+ graph = self.graph
962
+ fig = self.fig
963
+ lambdamin = self.lambdamin
964
+ lambdamax = self.lambdamax
965
+ Amax = self.Amax
966
+ if resetColors: graph.set_prop_cycle(None)
967
+
968
+ if linestyles == []: linestyles = len(datFiles)*['-']
969
+ if annotateP == []: annotateP = len(datFiles)*[True]
970
+
971
+ self._setup_axes(lambdamin, lambdamax, self.Amax, ylabel="Absorbance")
972
+
973
+ wvl = np.arange(lambdamin-50,lambdamax+50,1)
974
+ for f in range(len(datFiles)):
975
+ istate,state,wavel,fe,SSq = np.genfromtxt(datFiles[f],skip_header=1,dtype="<U20,<U20,float,float,<U20",unpack=True)
976
+ sumInt = self._sumStatesWithGf(wvl,wavel,fe)
977
+ Abs = self._Absorbance(sumInt,1,C0[f])
978
+ plot=self.graph.plot(wvl,Abs,linewidth=3,linestyle=linestyles[f],label=f"{titles[f]}. TDDFT ($C_0$={C0[f]} mol/L)")
979
+ peaksI, peaksH = self._FindPeaks(Abs,tP,0.01)
980
+ if (annotateP[f]): self._pickPeak(wvl,peaksI,peaksH,plot[0].get_color(),0.01,0.04,0.02)
981
+ print(f"{bg.LIGHTREDB}TDDFT. {titles[f]}{bg.OFF}")
982
+ for i in range(len(peaksI)):
983
+ print(f"peak {i:3}. {wvl[peaksI[i]]:4} nm. A = {peaksH[i]:.2f}")
984
+
985
+ self.graph.legend(fontsize=self.fontSize_legends)
986
+
987
+ if filename is not None: self.fig.savefig(filename, dpi=300, bbox_inches='tight')
988
+
989
+ return
990
+
991
+ def plotAbs_lambda_exp(self, csvFiles, C0, lambdamin=200, lambdamax=800,\
992
+ Amax=2.0, titles=None, linestyles=[], annotateP=[], tP = 0.1,\
993
+ filename=None):
994
+ '''
995
+ Plots an experimental VUV absorbance spectrum read from a csv file between lambdamin and lambdamax
996
+ input:
997
+ - superpose: False = plots a new graph, otherwise the plot is superposed to a previously created one
998
+ (probably with plotAbs_lambda_TDDFT())
999
+ - csvfiles: list of pathway/name to experimental csvFiles (see examples for the format)
1000
+ - C0: list of experimental concentrations, i.e. for each sample
1001
+ - lambdamin, lambdamax: plot range (x axis)
1002
+ - Amax: graph limit (y axis)
1003
+ - titles: list of titles (1 per spectrum plot)
1004
+ - linestyles: list of line styles(default = "--", i.e. a dashed line)
1005
+ - annotateP: list of Boolean (annotate lambda max True or False. Default = True)
1006
+ - tP: threshold for finding the peaks (default = 0.1)
1007
+ - save: saves in a png file (300 dpi) if True (default = False)
1008
+ - filename: saves figure in a 300 dpi png file if not None (default), with filename=full pathway
1009
+ '''
1010
+ if linestyles == []: linestyles = len(csvFiles)*['--']
1011
+ if annotateP == []: annotateP = len(csvFiles)*[True]
1012
+
1013
+ if self.fig is not None:
1014
+ graph = self.graph
1015
+ fig = self.fig
1016
+ lambdamin = self.lambdamin
1017
+ lambdamax = self.lambdamax
1018
+ Amax = self.Amax
1019
+ else:
1020
+ fig, graph = self._initializePlot()
1021
+
1022
+ graph.set_prop_cycle(None)
1023
+
1024
+ if self.fig is None:
1025
+ self.graph = graph
1026
+ self.fig = fig
1027
+ self.lambdamin = lambdamin
1028
+ self.lambdamax = lambdamax
1029
+ self.Amax = Amax
1030
+
1031
+ self._setup_axes(lambdamin, lambdamax, self.Amax, ylabel="Absorbance")
1032
+
1033
+ for f in range(len(csvFiles)):
1034
+ wavel,Abs = np.genfromtxt(csvFiles[f],skip_header=1,unpack=True,delimiter=";")
1035
+ wavel *= 1e9
1036
+ plot=graph.plot(wavel,Abs,linewidth=3,linestyle=linestyles[f],label=f"{titles[f]}. exp ($C_0$={C0[f]} mol/L)")
1037
+ peaksI, peaksH = self._FindPeaks(Abs,tP,0.01)
1038
+ if (annotateP[f]): self._pickPeak(wavel,peaksI,peaksH,plot[0].get_color(),0.01,0.04,0.02)
1039
+ print(f"{bg.LIGHTREDB}exp. {titles[f]}{bg.OFF}")
1040
+ for i in range(len(peaksI)):
1041
+ print(f"peak {i:3}. {wavel[peaksI[i]]:4} nm. A = {peaksH[i]:.2f}")
1042
+
1043
+ graph.legend(fontsize=self.fontSize_legends)
1044
+
1045
+ if filename is not None: self.fig.savefig(filename, dpi=300, bbox_inches='tight')
1046
+
1047
+ return
1048
+