celltype-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. celltype_cli-0.1.0.dist-info/METADATA +267 -0
  2. celltype_cli-0.1.0.dist-info/RECORD +89 -0
  3. celltype_cli-0.1.0.dist-info/WHEEL +4 -0
  4. celltype_cli-0.1.0.dist-info/entry_points.txt +2 -0
  5. celltype_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
  6. ct/__init__.py +3 -0
  7. ct/agent/__init__.py +0 -0
  8. ct/agent/case_studies.py +426 -0
  9. ct/agent/config.py +523 -0
  10. ct/agent/doctor.py +544 -0
  11. ct/agent/knowledge.py +523 -0
  12. ct/agent/loop.py +99 -0
  13. ct/agent/mcp_server.py +478 -0
  14. ct/agent/orchestrator.py +733 -0
  15. ct/agent/runner.py +656 -0
  16. ct/agent/sandbox.py +481 -0
  17. ct/agent/session.py +145 -0
  18. ct/agent/system_prompt.py +186 -0
  19. ct/agent/trace_store.py +228 -0
  20. ct/agent/trajectory.py +169 -0
  21. ct/agent/types.py +182 -0
  22. ct/agent/workflows.py +462 -0
  23. ct/api/__init__.py +1 -0
  24. ct/api/app.py +211 -0
  25. ct/api/config.py +120 -0
  26. ct/api/engine.py +124 -0
  27. ct/cli.py +1448 -0
  28. ct/data/__init__.py +0 -0
  29. ct/data/compute_providers.json +59 -0
  30. ct/data/cro_database.json +395 -0
  31. ct/data/downloader.py +238 -0
  32. ct/data/loaders.py +252 -0
  33. ct/kb/__init__.py +5 -0
  34. ct/kb/benchmarks.py +147 -0
  35. ct/kb/governance.py +106 -0
  36. ct/kb/ingest.py +415 -0
  37. ct/kb/reasoning.py +129 -0
  38. ct/kb/schema_monitor.py +162 -0
  39. ct/kb/substrate.py +387 -0
  40. ct/models/__init__.py +0 -0
  41. ct/models/llm.py +370 -0
  42. ct/tools/__init__.py +195 -0
  43. ct/tools/_compound_resolver.py +297 -0
  44. ct/tools/biomarker.py +368 -0
  45. ct/tools/cellxgene.py +282 -0
  46. ct/tools/chemistry.py +1371 -0
  47. ct/tools/claude.py +390 -0
  48. ct/tools/clinical.py +1153 -0
  49. ct/tools/clue.py +249 -0
  50. ct/tools/code.py +1069 -0
  51. ct/tools/combination.py +397 -0
  52. ct/tools/compute.py +402 -0
  53. ct/tools/cro.py +413 -0
  54. ct/tools/data_api.py +2114 -0
  55. ct/tools/design.py +295 -0
  56. ct/tools/dna.py +575 -0
  57. ct/tools/experiment.py +604 -0
  58. ct/tools/expression.py +655 -0
  59. ct/tools/files.py +957 -0
  60. ct/tools/genomics.py +1387 -0
  61. ct/tools/http_client.py +146 -0
  62. ct/tools/imaging.py +319 -0
  63. ct/tools/intel.py +223 -0
  64. ct/tools/literature.py +743 -0
  65. ct/tools/network.py +422 -0
  66. ct/tools/notification.py +111 -0
  67. ct/tools/omics.py +3330 -0
  68. ct/tools/ops.py +1230 -0
  69. ct/tools/parity.py +649 -0
  70. ct/tools/pk.py +245 -0
  71. ct/tools/protein.py +678 -0
  72. ct/tools/regulatory.py +643 -0
  73. ct/tools/remote_data.py +179 -0
  74. ct/tools/report.py +181 -0
  75. ct/tools/repurposing.py +376 -0
  76. ct/tools/safety.py +1280 -0
  77. ct/tools/shell.py +178 -0
  78. ct/tools/singlecell.py +533 -0
  79. ct/tools/statistics.py +552 -0
  80. ct/tools/structure.py +882 -0
  81. ct/tools/target.py +901 -0
  82. ct/tools/translational.py +123 -0
  83. ct/tools/viability.py +218 -0
  84. ct/ui/__init__.py +0 -0
  85. ct/ui/markdown.py +31 -0
  86. ct/ui/status.py +258 -0
  87. ct/ui/suggestions.py +567 -0
  88. ct/ui/terminal.py +1456 -0
  89. ct/ui/traces.py +112 -0
ct/tools/network.py ADDED
@@ -0,0 +1,422 @@
1
+ """
2
+ Network biology tools: protein-protein interaction analysis (STRING) and pathway crosstalk (Reactome).
3
+
4
+ These are REST API wrappers -- no local data required.
5
+ """
6
+
7
+ from ct.tools import registry
8
+ from ct.tools.http_client import request, request_json
9
+
10
+
11
+ def _coerce_gene_list(value) -> list[str]:
12
+ """Normalize gene input from str/list/tuple/set into a clean symbol list."""
13
+ if value is None:
14
+ return []
15
+
16
+ items = []
17
+ if isinstance(value, str):
18
+ # Accept comma, semicolon, newline, or pipe separated strings.
19
+ for chunk in value.replace("\n", ",").replace(";", ",").replace("|", ",").split(","):
20
+ token = str(chunk).strip()
21
+ if token:
22
+ items.append(token)
23
+ elif isinstance(value, (list, tuple, set)):
24
+ for entry in value:
25
+ if entry is None:
26
+ continue
27
+ token = str(entry).strip()
28
+ if token:
29
+ items.append(token)
30
+ else:
31
+ token = str(value).strip()
32
+ if token:
33
+ items.append(token)
34
+
35
+ # De-duplicate while preserving order.
36
+ seen = set()
37
+ genes = []
38
+ for gene in items:
39
+ if gene in seen:
40
+ continue
41
+ seen.add(gene)
42
+ genes.append(gene)
43
+ return genes
44
+
45
+
46
+ @registry.register(
47
+ name="network.ppi_analysis",
48
+ description="Analyze protein-protein interaction network for a gene using STRING database",
49
+ category="network",
50
+ parameters={
51
+ "gene": "Gene symbol or comma-separated list (e.g. 'CRBN' or 'CRBN,DDB1,CUL4A')",
52
+ "min_score": "Minimum interaction confidence score 0-1 (default 0.4 = medium)",
53
+ "network_depth": "1=direct partners only, 2=partners of partners (default 1)",
54
+ },
55
+ usage_guide="You want to understand what proteins interact with a target — maps the interaction neighborhood using STRING. Use for target validation, mechanism exploration, and finding co-complex members.",
56
+ )
57
+ def ppi_analysis(gene: str, min_score: float = 0.4, network_depth: int = 1, **kwargs) -> dict:
58
+ """Analyze protein-protein interaction network via STRING API.
59
+
60
+ Retrieves direct interaction partners and optionally second-shell neighbors.
61
+ Computes network statistics and runs functional enrichment on the interactor set.
62
+ """
63
+ genes = _coerce_gene_list(gene)
64
+ if not genes:
65
+ return {"error": "No gene symbols provided", "summary": "No gene symbols provided"}
66
+ string_score = int(min_score * 1000) # STRING uses 0-1000 scale
67
+ base = "https://string-db.org/api/json"
68
+
69
+ # Step 1: Get direct interaction network
70
+ interactions, error = request_json(
71
+ "GET",
72
+ f"{base}/network",
73
+ params={
74
+ "identifiers": "\r".join(genes),
75
+ "species": 9606,
76
+ "required_score": string_score,
77
+ "caller_identity": "ct-celltype",
78
+ },
79
+ timeout=15,
80
+ retries=2,
81
+ )
82
+ if error:
83
+ return {"error": f"STRING network query failed: {error}", "summary": f"STRING network query failed: {error}"}
84
+ if not interactions:
85
+ return {
86
+ "summary": f"No interactions found for {', '.join(genes)} at score >= {min_score}",
87
+ "query_genes": genes,
88
+ "interactions": [],
89
+ "network_stats": {"node_count": len(genes), "edge_count": 0},
90
+ }
91
+
92
+ # Parse interactions
93
+ edges = []
94
+ all_nodes = set(genes)
95
+ for ix in interactions:
96
+ a = ix.get("preferredName_A", ix.get("stringId_A", ""))
97
+ b = ix.get("preferredName_B", ix.get("stringId_B", ""))
98
+ score = round(ix.get("score", 0), 3)
99
+ edges.append({
100
+ "gene_a": a,
101
+ "gene_b": b,
102
+ "score": score,
103
+ "nscore": round(ix.get("nscore", 0), 3),
104
+ "fscore": round(ix.get("fscore", 0), 3),
105
+ "pscore": round(ix.get("pscore", 0), 3),
106
+ "ascore": round(ix.get("ascore", 0), 3),
107
+ "escore": round(ix.get("escore", 0), 3),
108
+ "dscore": round(ix.get("dscore", 0), 3),
109
+ "tscore": round(ix.get("tscore", 0), 3),
110
+ })
111
+ all_nodes.add(a)
112
+ all_nodes.add(b)
113
+
114
+ # Sort by score descending
115
+ edges.sort(key=lambda x: x["score"], reverse=True)
116
+
117
+ # Step 2: Depth-2 expansion (partners of partners)
118
+ depth2_edges = []
119
+ if network_depth >= 2:
120
+ # Get first-shell partners (not query genes themselves)
121
+ first_shell = all_nodes - set(genes)
122
+ if first_shell:
123
+ # Query top 10 first-shell partners to keep API calls reasonable
124
+ expand_genes = sorted(first_shell, key=lambda g: max(
125
+ (e["score"] for e in edges if g in (e["gene_a"], e["gene_b"])),
126
+ default=0,
127
+ ), reverse=True)[:10]
128
+
129
+ depth2_data, depth2_error = request_json(
130
+ "GET",
131
+ f"{base}/network",
132
+ params={
133
+ "identifiers": "\r".join(expand_genes),
134
+ "species": 9606,
135
+ "required_score": string_score,
136
+ "caller_identity": "ct-celltype",
137
+ },
138
+ timeout=15,
139
+ retries=2,
140
+ )
141
+ if not depth2_error:
142
+ existing_keys = {tuple(sorted([e["gene_a"], e["gene_b"]])) for e in edges}
143
+ for ix in depth2_data:
144
+ a = ix.get("preferredName_A", ix.get("stringId_A", ""))
145
+ b = ix.get("preferredName_B", ix.get("stringId_B", ""))
146
+ score = round(ix.get("score", 0), 3)
147
+ # Only include edges not already seen
148
+ edge_key = tuple(sorted([a, b]))
149
+ if edge_key not in existing_keys:
150
+ depth2_edges.append({
151
+ "gene_a": a,
152
+ "gene_b": b,
153
+ "score": score,
154
+ })
155
+ all_nodes.add(a)
156
+ all_nodes.add(b)
157
+ depth2_edges.sort(key=lambda x: x["score"], reverse=True)
158
+
159
+ # Step 3: Compute network statistics
160
+ node_count = len(all_nodes)
161
+ edge_count = len(edges) + len(depth2_edges)
162
+
163
+ # Degree distribution
164
+ degree = {}
165
+ for e in edges + depth2_edges:
166
+ degree[e["gene_a"]] = degree.get(e["gene_a"], 0) + 1
167
+ degree[e["gene_b"]] = degree.get(e["gene_b"], 0) + 1
168
+
169
+ avg_degree = sum(degree.values()) / max(len(degree), 1)
170
+
171
+ # Approximate clustering coefficient (fraction of possible triangles)
172
+ # For each node, count edges among its neighbors
173
+ adjacency = {}
174
+ for e in edges + depth2_edges:
175
+ adjacency.setdefault(e["gene_a"], set()).add(e["gene_b"])
176
+ adjacency.setdefault(e["gene_b"], set()).add(e["gene_a"])
177
+
178
+ clustering_coefficients = []
179
+ for node, neighbors in adjacency.items():
180
+ n = len(neighbors)
181
+ if n < 2:
182
+ clustering_coefficients.append(0.0)
183
+ continue
184
+ neighbor_list = list(neighbors)
185
+ triangles = 0
186
+ for i in range(len(neighbor_list)):
187
+ for j in range(i + 1, len(neighbor_list)):
188
+ if neighbor_list[j] in adjacency.get(neighbor_list[i], set()):
189
+ triangles += 1
190
+ possible = n * (n - 1) / 2
191
+ clustering_coefficients.append(triangles / possible if possible > 0 else 0)
192
+
193
+ avg_clustering = sum(clustering_coefficients) / max(len(clustering_coefficients), 1)
194
+
195
+ # Hub genes (top by degree)
196
+ hub_genes = sorted(degree.items(), key=lambda x: x[1], reverse=True)[:10]
197
+
198
+ network_stats = {
199
+ "node_count": node_count,
200
+ "edge_count": edge_count,
201
+ "avg_degree": round(avg_degree, 2),
202
+ "clustering_coefficient": round(avg_clustering, 3),
203
+ "hub_genes": [{"gene": g, "degree": d} for g, d in hub_genes],
204
+ }
205
+
206
+ # Step 4: Functional enrichment of the interactor set
207
+ enrichment = []
208
+ interactor_genes = list(all_nodes)
209
+ if len(interactor_genes) >= 2:
210
+ enrich_data, enrich_error = request_json(
211
+ "GET",
212
+ f"{base}/enrichment",
213
+ params={
214
+ "identifiers": "\r".join(interactor_genes),
215
+ "species": 9606,
216
+ "caller_identity": "ct-celltype",
217
+ },
218
+ timeout=15,
219
+ retries=2,
220
+ )
221
+ if not enrich_error:
222
+ for entry in enrich_data:
223
+ enrichment.append({
224
+ "category": entry.get("category", ""),
225
+ "term": entry.get("term", ""),
226
+ "description": entry.get("description", ""),
227
+ "p_value": entry.get("p_value", 1.0),
228
+ "fdr": entry.get("fdr", 1.0),
229
+ "gene_count": entry.get("number_of_genes", 0),
230
+ "genes": entry.get("preferredNames", ""),
231
+ })
232
+
233
+ # Sort by FDR, keep top 20
234
+ enrichment.sort(key=lambda x: x["fdr"])
235
+ enrichment = enrichment[:20]
236
+
237
+ # Build summary
238
+ query_set = set(genes)
239
+ seen_partners = set()
240
+ top_partners = []
241
+ for e in edges:
242
+ partner = e["gene_b"] if e["gene_a"] in query_set else e["gene_a"]
243
+ if partner not in seen_partners and partner not in query_set:
244
+ seen_partners.add(partner)
245
+ top_partners.append(partner)
246
+ if len(top_partners) >= 5:
247
+ break
248
+ top_str = ", ".join(top_partners) if top_partners else "none"
249
+ top_pathway = enrichment[0]["description"] if enrichment else "N/A"
250
+
251
+ summary = (
252
+ f"PPI network for {', '.join(genes)}: "
253
+ f"{node_count} nodes, {edge_count} edges (score >= {min_score})\n"
254
+ f"Top interactors: {top_str}\n"
255
+ f"Avg clustering coefficient: {avg_clustering:.3f}\n"
256
+ f"Top enriched pathway: {top_pathway}"
257
+ )
258
+
259
+ result = {
260
+ "summary": summary,
261
+ "query_genes": genes,
262
+ "interactions": edges[:50], # Cap to keep response manageable
263
+ "network_stats": network_stats,
264
+ "enrichment": enrichment,
265
+ }
266
+ if depth2_edges:
267
+ result["depth2_interactions"] = depth2_edges[:30]
268
+
269
+ return result
270
+
271
+
272
+ @registry.register(
273
+ name="network.pathway_crosstalk",
274
+ description="Analyze pathway membership and crosstalk for a gene set using Reactome",
275
+ category="network",
276
+ parameters={
277
+ "genes": "Comma-separated gene symbols (e.g. 'CRBN,DDB1,CUL4A,RBX1')",
278
+ },
279
+ usage_guide="You want to understand which biological pathways a set of genes participate in and how those pathways overlap. Use for mechanism-of-action analysis and understanding pathway-level effects of perturbations.",
280
+ )
281
+ def pathway_crosstalk(genes: str, **kwargs) -> dict:
282
+ """Analyze pathway membership and crosstalk via Reactome Content Service.
283
+
284
+ Submits gene list for pathway over-representation analysis, then analyzes
285
+ which genes appear in multiple pathways to identify crosstalk nodes.
286
+ """
287
+ gene_list = _coerce_gene_list(genes)
288
+ if not gene_list:
289
+ return {"error": "No gene symbols provided", "summary": "No gene symbols provided"}
290
+ # Reactome analysis endpoint: POST gene list for over-representation
291
+ reactome_url = "https://reactome.org/AnalysisService/identifiers/projection"
292
+ body = "\n".join(gene_list)
293
+
294
+ data, error = request_json(
295
+ "POST",
296
+ reactome_url,
297
+ data=body,
298
+ headers={"Content-Type": "text/plain"},
299
+ params={"pageSize": 20, "page": 1},
300
+ timeout=15,
301
+ retries=2,
302
+ )
303
+ if error:
304
+ return {"error": f"Reactome analysis failed: {error}", "summary": f"Reactome analysis failed: {error}"}
305
+ # Parse pathway results
306
+ pathways_raw = data.get("pathways", [])
307
+ pathways = []
308
+ gene_pathway_map = {} # gene -> list of pathways
309
+
310
+ for pw in pathways_raw:
311
+ stid = pw.get("stId", "")
312
+ name = pw.get("name", "")
313
+ p_value = pw.get("entities", {}).get("pValue", 1.0)
314
+ fdr = pw.get("entities", {}).get("fdr", 1.0)
315
+ found = pw.get("entities", {}).get("found", 0)
316
+ total = pw.get("entities", {}).get("total", 0)
317
+ ratio = pw.get("entities", {}).get("ratio", 0)
318
+
319
+ pathways.append({
320
+ "pathway_id": stid,
321
+ "name": name,
322
+ "p_value": p_value,
323
+ "fdr": fdr,
324
+ "genes_found": found,
325
+ "genes_total": total,
326
+ "ratio": round(ratio, 4) if ratio else 0,
327
+ })
328
+
329
+ # Get identifiers mapping (which input genes map to which pathways)
330
+ # Reactome returns this in the 'identifiers' section
331
+ not_found = data.get("identifiersNotFound", 0)
332
+ found_ids = data.get("foundEntities", 0)
333
+
334
+ # Step 2: For each significant pathway, get the participant genes
335
+ # Use Reactome content service to get contained participants
336
+ significant_pathways = [p for p in pathways if p["fdr"] < 0.05][:10]
337
+
338
+ for pw in significant_pathways:
339
+ part_resp, part_error = request(
340
+ "GET",
341
+ f"https://reactome.org/ContentService/data/participants/{pw['pathway_id']}",
342
+ headers={"Accept": "application/json"},
343
+ timeout=10,
344
+ raise_for_status=False,
345
+ )
346
+ if part_error or part_resp.status_code != 200:
347
+ pw["matched_input_genes"] = []
348
+ continue
349
+ try:
350
+ participants = part_resp.json()
351
+ except Exception:
352
+ pw["matched_input_genes"] = []
353
+ continue
354
+
355
+ pw_genes = set()
356
+ for participant in participants:
357
+ # Each participant has refEntities with gene names
358
+ ref_entities = participant.get("refEntities", [])
359
+ for ref in ref_entities:
360
+ gene_name = ref.get("displayName", "")
361
+ # Reactome format: "UniProt:XXXXX GENE_NAME"
362
+ if " " in gene_name:
363
+ gene_name = gene_name.split(" ")[-1]
364
+ if gene_name in gene_list:
365
+ pw_genes.add(gene_name)
366
+ gene_pathway_map.setdefault(gene_name, []).append(pw["name"])
367
+
368
+ pw["matched_input_genes"] = sorted(pw_genes)
369
+
370
+ # Crosstalk analysis: genes appearing in multiple pathways
371
+ crosstalk_nodes = []
372
+ for g, pws in gene_pathway_map.items():
373
+ if len(pws) > 1:
374
+ crosstalk_nodes.append({
375
+ "gene": g,
376
+ "pathway_count": len(pws),
377
+ "pathways": pws,
378
+ })
379
+ crosstalk_nodes.sort(key=lambda x: x["pathway_count"], reverse=True)
380
+
381
+ # Pathway overlap matrix: count shared genes between pathway pairs
382
+ pathway_overlaps = []
383
+ pathway_gene_sets = {}
384
+ for pw in significant_pathways:
385
+ matched = pw.get("matched_input_genes", [])
386
+ if matched:
387
+ pathway_gene_sets[pw["name"]] = set(matched)
388
+
389
+ pw_names = list(pathway_gene_sets.keys())
390
+ for i in range(len(pw_names)):
391
+ for j in range(i + 1, len(pw_names)):
392
+ shared = pathway_gene_sets[pw_names[i]] & pathway_gene_sets[pw_names[j]]
393
+ if shared:
394
+ pathway_overlaps.append({
395
+ "pathway_a": pw_names[i],
396
+ "pathway_b": pw_names[j],
397
+ "shared_genes": sorted(shared),
398
+ "shared_count": len(shared),
399
+ })
400
+ pathway_overlaps.sort(key=lambda x: x["shared_count"], reverse=True)
401
+
402
+ # Build summary
403
+ sig_count = len([p for p in pathways if p["fdr"] < 0.05])
404
+ top_pathway = pathways[0]["name"] if pathways else "N/A"
405
+ top_fdr = pathways[0]["fdr"] if pathways else "N/A"
406
+
407
+ summary = (
408
+ f"Reactome pathway analysis for {len(gene_list)} genes: "
409
+ f"{len(pathways)} pathways enriched, {sig_count} significant (FDR < 0.05)\n"
410
+ f"Top pathway: {top_pathway} (FDR={top_fdr})\n"
411
+ f"Crosstalk nodes (multi-pathway genes): {len(crosstalk_nodes)}\n"
412
+ f"Pathway pairs with shared genes: {len(pathway_overlaps)}"
413
+ )
414
+
415
+ return {
416
+ "summary": summary,
417
+ "query_genes": gene_list,
418
+ "genes_not_found": not_found,
419
+ "pathways": pathways,
420
+ "crosstalk_nodes": crosstalk_nodes,
421
+ "pathway_overlaps": pathway_overlaps,
422
+ }
@@ -0,0 +1,111 @@
1
+ """
2
+ Notification tools: email sending via SendGrid with dry-run support.
3
+ """
4
+
5
+ from ct.tools import registry
6
+ from ct.tools.http_client import request
7
+
8
+
9
+ @registry.register(
10
+ name="notification.send_email",
11
+ description="Send an email notification (dry_run=True by default logs without sending)",
12
+ category="notification",
13
+ parameters={
14
+ "to": "Recipient email address",
15
+ "subject": "Email subject line",
16
+ "body": "Email body text",
17
+ "from_email": "Sender email (default: from config or ct@celltype.bio)",
18
+ "dry_run": "If True (default), only log the email without sending",
19
+ },
20
+ usage_guide=(
21
+ "You need to send an email notification, typically a CRO inquiry or "
22
+ "results summary. Always dry_run=True unless user explicitly requests sending."
23
+ ),
24
+ )
25
+ def send_email(
26
+ to: str,
27
+ subject: str,
28
+ body: str,
29
+ from_email: str = None,
30
+ dry_run: bool = True,
31
+ **kwargs,
32
+ ) -> dict:
33
+ """Send an email via SendGrid, or log it in dry-run mode."""
34
+ from datetime import datetime, timezone
35
+ from pathlib import Path
36
+
37
+ from ct.agent.config import Config
38
+
39
+ config = Config.load()
40
+
41
+ if from_email is None:
42
+ from_email = config.get("notification.from_email", "ct@celltype.bio")
43
+
44
+ # Ensure log directory exists
45
+ log_dir = Path.home() / ".ct"
46
+ log_dir.mkdir(parents=True, exist_ok=True)
47
+ log_file = log_dir / "sent_emails.log"
48
+
49
+ timestamp = datetime.now(timezone.utc).isoformat()
50
+ sent = False
51
+ error = None
52
+
53
+ if not dry_run:
54
+ api_key = config.get("notification.sendgrid_api_key")
55
+ if not api_key:
56
+ return {
57
+ "summary": "SendGrid API key not configured. Set it with: ct config set notification.sendgrid_api_key <key>",
58
+ "to": to,
59
+ "subject": subject,
60
+ "body": body,
61
+ "dry_run": dry_run,
62
+ "sent": False,
63
+ "error": "missing_api_key",
64
+ }
65
+
66
+ payload = {
67
+ "personalizations": [{"to": [{"email": to}]}],
68
+ "from": {"email": from_email},
69
+ "subject": subject,
70
+ "content": [{"type": "text/plain", "value": body}],
71
+ }
72
+
73
+ resp, req_error = request(
74
+ "POST",
75
+ "https://api.sendgrid.com/v3/mail/send",
76
+ json=payload,
77
+ headers={
78
+ "Authorization": f"Bearer {api_key}",
79
+ "Content-Type": "application/json",
80
+ },
81
+ timeout=30,
82
+ retries=2,
83
+ )
84
+ if req_error:
85
+ error = req_error
86
+ else:
87
+ sent = True
88
+
89
+ # Log the email
90
+ status = "DRY_RUN" if dry_run else ("SENT" if sent else f"FAILED: {error}")
91
+ log_line = f"[{timestamp}] {status} | to={to} | subject={subject}\n"
92
+ with open(log_file, "a") as f:
93
+ f.write(log_line)
94
+
95
+ if dry_run:
96
+ summary = f"[DRY RUN] Would send email to {to}: '{subject}'"
97
+ elif sent:
98
+ summary = f"Email sent to {to}: '{subject}'"
99
+ else:
100
+ summary = f"Failed to send email to {to}: {error}"
101
+
102
+ return {
103
+ "summary": summary,
104
+ "to": to,
105
+ "subject": subject,
106
+ "body": body,
107
+ "from_email": from_email,
108
+ "dry_run": dry_run,
109
+ "sent": sent,
110
+ "error": error,
111
+ }