pylocuszoom 1.1.2__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pylocuszoom/__init__.py CHANGED
@@ -89,6 +89,7 @@ from .finemapping import (
89
89
  filter_finemapping_by_region,
90
90
  get_credible_sets,
91
91
  get_top_pip_variants,
92
+ plot_finemapping,
92
93
  prepare_finemapping_for_plotting,
93
94
  validate_finemapping_df,
94
95
  )
@@ -144,6 +145,7 @@ from .plotter import LocusZoomPlotter
144
145
  from .recombination import (
145
146
  add_recombination_overlay,
146
147
  download_canine_recombination_maps,
148
+ ensure_recomb_maps,
147
149
  get_recombination_rate_for_region,
148
150
  load_recombination_map,
149
151
  )
@@ -182,6 +184,7 @@ __all__ = [
182
184
  "add_snp_labels",
183
185
  # Recombination
184
186
  "add_recombination_overlay",
187
+ "ensure_recomb_maps",
185
188
  "get_recombination_rate_for_region",
186
189
  "load_recombination_map",
187
190
  # eQTL
@@ -198,6 +201,7 @@ __all__ = [
198
201
  "filter_by_credible_set",
199
202
  "get_credible_sets",
200
203
  "get_top_pip_variants",
204
+ "plot_finemapping",
201
205
  "prepare_finemapping_for_plotting",
202
206
  "FinemappingValidationError",
203
207
  # Logging
@@ -4,18 +4,20 @@ Provides utilities for loading, validating, and preparing statistical
4
4
  fine-mapping results (SuSiE, FINEMAP, etc.) for visualization.
5
5
  """
6
6
 
7
- from typing import List, Optional
7
+ from typing import Any, List, Optional
8
8
 
9
9
  import pandas as pd
10
10
 
11
+ from .backends.base import PlotBackend
12
+ from .backends.hover import HoverConfig, HoverDataBuilder
13
+ from .colors import PIP_LINE_COLOR, get_credible_set_color
11
14
  from .exceptions import FinemappingValidationError, ValidationError
12
15
  from .logging import logger
13
16
  from .utils import filter_by_region
14
17
  from .validation import DataFrameValidator
15
18
 
16
- # Required columns for fine-mapping data
19
+ # Required columns for fine-mapping data (default column names)
17
20
  REQUIRED_FINEMAPPING_COLS = ["pos", "pip"]
18
- OPTIONAL_FINEMAPPING_COLS = ["rs", "cs", "cs_id", "effect", "se"]
19
21
 
20
22
 
21
23
  def validate_finemapping_df(
@@ -207,3 +209,109 @@ def calculate_credible_set_coverage(
207
209
  coverage[cs_id] = cs_data[pip_col].sum()
208
210
 
209
211
  return coverage
212
+
213
+
214
+ def plot_finemapping(
215
+ backend: PlotBackend,
216
+ ax: Any,
217
+ df: pd.DataFrame,
218
+ pos_col: str = "pos",
219
+ pip_col: str = "pip",
220
+ cs_col: Optional[str] = "cs",
221
+ show_credible_sets: bool = True,
222
+ pip_threshold: float = 0.0,
223
+ ) -> None:
224
+ """Plot fine-mapping results (PIP line with credible set coloring).
225
+
226
+ Renders posterior inclusion probabilities as a line plot, with optional
227
+ scatter points colored by credible set membership.
228
+
229
+ Args:
230
+ backend: Plotting backend implementing PlotBackend protocol.
231
+ ax: Axes or panel to plot on.
232
+ df: Fine-mapping DataFrame with pos and pip columns.
233
+ pos_col: Column name for position.
234
+ pip_col: Column name for posterior inclusion probability.
235
+ cs_col: Column name for credible set assignment (optional).
236
+ show_credible_sets: Whether to color points by credible set.
237
+ pip_threshold: Minimum PIP to display as scatter point.
238
+ """
239
+ # Build hover data using HoverDataBuilder
240
+ extra_cols = {pip_col: "PIP"}
241
+ if cs_col and cs_col in df.columns:
242
+ extra_cols[cs_col] = "Credible Set"
243
+ hover_config = HoverConfig(
244
+ pos_col=pos_col if pos_col in df.columns else None,
245
+ extra_cols=extra_cols,
246
+ )
247
+ hover_builder = HoverDataBuilder(hover_config)
248
+
249
+ # Sort by position for line plotting
250
+ df = df.sort_values(pos_col)
251
+
252
+ # Plot PIP as line
253
+ backend.line(
254
+ ax,
255
+ df[pos_col],
256
+ df[pip_col],
257
+ color=PIP_LINE_COLOR,
258
+ linewidth=1.5,
259
+ alpha=0.8,
260
+ zorder=1,
261
+ )
262
+
263
+ # Check if credible sets are available
264
+ has_cs = cs_col is not None and cs_col in df.columns and show_credible_sets
265
+ credible_sets = get_credible_sets(df, cs_col) if has_cs else []
266
+
267
+ if credible_sets:
268
+ # Plot points colored by credible set
269
+ for cs_id in credible_sets:
270
+ cs_data = df[df[cs_col] == cs_id]
271
+ color = get_credible_set_color(cs_id)
272
+ backend.scatter(
273
+ ax,
274
+ cs_data[pos_col],
275
+ cs_data[pip_col],
276
+ colors=color,
277
+ sizes=50,
278
+ marker="o",
279
+ edgecolor="black",
280
+ linewidth=0.5,
281
+ zorder=3,
282
+ hover_data=hover_builder.build_dataframe(cs_data),
283
+ )
284
+ # Plot variants not in any credible set (only if threshold is set)
285
+ if pip_threshold > 0:
286
+ non_cs_data = df[(df[cs_col].isna()) | (df[cs_col] == 0)]
287
+ non_cs_data = non_cs_data[non_cs_data[pip_col] >= pip_threshold]
288
+ if not non_cs_data.empty:
289
+ backend.scatter(
290
+ ax,
291
+ non_cs_data[pos_col],
292
+ non_cs_data[pip_col],
293
+ colors="#BEBEBE",
294
+ sizes=30,
295
+ marker="o",
296
+ edgecolor="black",
297
+ linewidth=0.3,
298
+ zorder=2,
299
+ hover_data=hover_builder.build_dataframe(non_cs_data),
300
+ )
301
+ else:
302
+ # No credible sets - show all points above threshold
303
+ if pip_threshold > 0:
304
+ high_pip = df[df[pip_col] >= pip_threshold]
305
+ if not high_pip.empty:
306
+ backend.scatter(
307
+ ax,
308
+ high_pip[pos_col],
309
+ high_pip[pip_col],
310
+ colors=PIP_LINE_COLOR,
311
+ sizes=50,
312
+ marker="o",
313
+ edgecolor="black",
314
+ linewidth=0.5,
315
+ zorder=3,
316
+ hover_data=hover_builder.build_dataframe(high_pip),
317
+ )
pylocuszoom/plotter.py CHANGED
@@ -15,7 +15,6 @@ from typing import Any, List, Optional, Tuple
15
15
  import matplotlib.pyplot as plt
16
16
  import numpy as np
17
17
  import pandas as pd
18
- import requests
19
18
 
20
19
  from ._plotter_utils import DEFAULT_GENOMEWIDE_THRESHOLD
21
20
  from .backends import BackendType, get_backend
@@ -25,7 +24,6 @@ from .colors import (
25
24
  EQTL_POSITIVE_BINS,
26
25
  LD_BINS,
27
26
  LEAD_SNP_COLOR,
28
- PIP_LINE_COLOR,
29
27
  get_credible_set_color,
30
28
  get_eqtl_color,
31
29
  get_ld_bin,
@@ -36,6 +34,7 @@ from .ensembl import get_genes_for_region
36
34
  from .eqtl import validate_eqtl_df
37
35
  from .finemapping import (
38
36
  get_credible_sets,
37
+ plot_finemapping,
39
38
  prepare_finemapping_for_plotting,
40
39
  )
41
40
  from .gene_track import (
@@ -44,14 +43,11 @@ from .gene_track import (
44
43
  )
45
44
  from .ld import calculate_ld, find_plink
46
45
  from .logging import enable_logging, logger
47
- from .manhattan_plotter import ManhattanPlotter
48
46
  from .recombination import (
49
47
  RECOMB_COLOR,
50
- download_canine_recombination_maps,
51
- get_default_data_dir,
48
+ ensure_recomb_maps,
52
49
  get_recombination_rate_for_region,
53
50
  )
54
- from .stats_plotter import StatsPlotter
55
51
  from .utils import normalize_chrom, validate_genes_df, validate_gwas_df
56
52
 
57
53
  # Precomputed significance line value (used for plotting)
@@ -149,27 +145,6 @@ class LocusZoomPlotter:
149
145
  # Cache for loaded data
150
146
  self._recomb_cache = {}
151
147
 
152
- @property
153
- def _manhattan_plotter(self) -> ManhattanPlotter:
154
- """Lazy-load ManhattanPlotter with shared configuration."""
155
- if not hasattr(self, "_manhattan_plotter_instance"):
156
- self._manhattan_plotter_instance = ManhattanPlotter(
157
- species=self.species,
158
- backend=self._backend_name,
159
- genomewide_threshold=self.genomewide_threshold,
160
- )
161
- return self._manhattan_plotter_instance
162
-
163
- @property
164
- def _stats_plotter(self) -> StatsPlotter:
165
- """Lazy-load StatsPlotter with shared configuration."""
166
- if not hasattr(self, "_stats_plotter_instance"):
167
- self._stats_plotter_instance = StatsPlotter(
168
- backend=self._backend_name,
169
- genomewide_threshold=self.genomewide_threshold,
170
- )
171
- return self._stats_plotter_instance
172
-
173
148
  @staticmethod
174
149
  def _default_build(species: str) -> Optional[str]:
175
150
  """Get default genome build for species."""
@@ -177,37 +152,14 @@ class LocusZoomPlotter:
177
152
  return builds.get(species)
178
153
 
179
154
  def _ensure_recomb_maps(self) -> Optional[Path]:
180
- """Ensure recombination maps are downloaded.
155
+ """Ensure recombination maps are available.
181
156
 
182
- Returns path to recombination map directory, or None if not available.
157
+ Delegates to the recombination module's ensure_recomb_maps function.
158
+
159
+ Returns:
160
+ Path to recombination map directory, or None if not available.
183
161
  """
184
- if self.species == "canine":
185
- if self.recomb_data_dir:
186
- return Path(self.recomb_data_dir)
187
- # Check if already downloaded
188
- default_dir = get_default_data_dir()
189
- if (
190
- default_dir.exists()
191
- and len(list(default_dir.glob("chr*_recomb.tsv"))) >= 39
192
- ): # 38 autosomes + X
193
- return default_dir
194
- # Download
195
- try:
196
- return download_canine_recombination_maps()
197
- except (requests.RequestException, OSError, IOError) as e:
198
- # Expected network/file errors - graceful fallback
199
- logger.warning(f"Could not download recombination maps: {e}")
200
- return None
201
- except Exception as e:
202
- # JUSTIFICATION: Download failure should not prevent plotting.
203
- # We catch broadly here because graceful degradation is acceptable
204
- # for optional recombination map downloads. Error-level logging
205
- # ensures the issue is visible.
206
- logger.error(f"Unexpected error downloading recombination maps: {e}")
207
- return None
208
- elif self.recomb_data_dir:
209
- return Path(self.recomb_data_dir)
210
- return None
162
+ return ensure_recomb_maps(species=self.species, data_dir=self.recomb_data_dir)
211
163
 
212
164
  def _get_recomb_for_region(
213
165
  self, chrom: int, start: int, end: int
@@ -238,7 +190,7 @@ class LocusZoomPlotter:
238
190
  def _transform_pvalues(self, df: pd.DataFrame, p_col: str) -> pd.DataFrame:
239
191
  """Add neglog10p column with -log10 transformed p-values.
240
192
 
241
- Delegates to shared utility function. Assumes df is already a copy.
193
+ Modifies df in place. Callers should pass a copy to avoid side effects.
242
194
 
243
195
  Args:
244
196
  df: DataFrame with p-value column (should be a copy).
@@ -670,107 +622,6 @@ class LocusZoomPlotter:
670
622
  if isinstance(twin_result, Axes):
671
623
  secondary_ax.spines["top"].set_visible(False)
672
624
 
673
- def _plot_finemapping(
674
- self,
675
- ax: Any,
676
- df: pd.DataFrame,
677
- pos_col: str = "pos",
678
- pip_col: str = "pip",
679
- cs_col: Optional[str] = "cs",
680
- show_credible_sets: bool = True,
681
- pip_threshold: float = 0.0,
682
- ) -> None:
683
- """Plot fine-mapping results (PIP line with credible set coloring).
684
-
685
- Args:
686
- ax: Matplotlib axes object.
687
- df: Fine-mapping DataFrame with pos and pip columns.
688
- pos_col: Column name for position.
689
- pip_col: Column name for posterior inclusion probability.
690
- cs_col: Column name for credible set assignment (optional).
691
- show_credible_sets: Whether to color points by credible set.
692
- pip_threshold: Minimum PIP to display as scatter point.
693
- """
694
- # Build hover data using HoverDataBuilder
695
- extra_cols = {pip_col: "PIP"}
696
- if cs_col and cs_col in df.columns:
697
- extra_cols[cs_col] = "Credible Set"
698
- hover_config = HoverConfig(
699
- pos_col=pos_col if pos_col in df.columns else None,
700
- extra_cols=extra_cols,
701
- )
702
- hover_builder = HoverDataBuilder(hover_config)
703
-
704
- # Sort by position for line plotting
705
- df = df.sort_values(pos_col)
706
-
707
- # Plot PIP as line
708
- self._backend.line(
709
- ax,
710
- df[pos_col],
711
- df[pip_col],
712
- color=PIP_LINE_COLOR,
713
- linewidth=1.5,
714
- alpha=0.8,
715
- zorder=1,
716
- )
717
-
718
- # Check if credible sets are available
719
- has_cs = cs_col is not None and cs_col in df.columns and show_credible_sets
720
- credible_sets = get_credible_sets(df, cs_col) if has_cs else []
721
-
722
- if credible_sets:
723
- # Plot points colored by credible set
724
- for cs_id in credible_sets:
725
- cs_data = df[df[cs_col] == cs_id]
726
- color = get_credible_set_color(cs_id)
727
- self._backend.scatter(
728
- ax,
729
- cs_data[pos_col],
730
- cs_data[pip_col],
731
- colors=color,
732
- sizes=50,
733
- marker="o",
734
- edgecolor="black",
735
- linewidth=0.5,
736
- zorder=3,
737
- hover_data=hover_builder.build_dataframe(cs_data),
738
- )
739
- # Plot variants not in any credible set
740
- non_cs_data = df[(df[cs_col].isna()) | (df[cs_col] == 0)]
741
- if not non_cs_data.empty and pip_threshold > 0:
742
- non_cs_data = non_cs_data[non_cs_data[pip_col] >= pip_threshold]
743
- if not non_cs_data.empty:
744
- self._backend.scatter(
745
- ax,
746
- non_cs_data[pos_col],
747
- non_cs_data[pip_col],
748
- colors="#BEBEBE",
749
- sizes=30,
750
- marker="o",
751
- edgecolor="black",
752
- linewidth=0.3,
753
- zorder=2,
754
- hover_data=hover_builder.build_dataframe(non_cs_data),
755
- )
756
- else:
757
- # No credible sets - show all points above threshold
758
- if pip_threshold > 0:
759
- high_pip = df[df[pip_col] >= pip_threshold]
760
- if not high_pip.empty:
761
- self._backend.scatter(
762
- ax,
763
- high_pip[pos_col],
764
- high_pip[pip_col],
765
- colors=PIP_LINE_COLOR,
766
- sizes=50,
767
- marker="o",
768
- edgecolor="black",
769
- linewidth=0.5,
770
- zorder=3,
771
- hover_data=hover_builder.build_dataframe(high_pip),
772
- )
773
-
774
625
  def plot_stacked(
775
626
  self,
776
627
  gwas_dfs: List[pd.DataFrame],
@@ -1070,7 +921,8 @@ class LocusZoomPlotter:
1070
921
  )
1071
922
 
1072
923
  if not fm_data.empty:
1073
- self._plot_finemapping(
924
+ plot_finemapping(
925
+ self._backend,
1074
926
  ax,
1075
927
  fm_data,
1076
928
  pos_col="pos",
@@ -1232,180 +1084,3 @@ class LocusZoomPlotter:
1232
1084
  self._backend.finalize_layout(fig, hspace=0.1)
1233
1085
 
1234
1086
  return fig
1235
-
1236
- def plot_phewas(
1237
- self,
1238
- phewas_df: pd.DataFrame,
1239
- variant_id: str,
1240
- phenotype_col: str = "phenotype",
1241
- p_col: str = "p_value",
1242
- category_col: str = "category",
1243
- effect_col: Optional[str] = None,
1244
- significance_threshold: float = 5e-8,
1245
- figsize: Tuple[float, float] = (10, 8),
1246
- ) -> Any:
1247
- """Create a PheWAS plot. See StatsPlotter.plot_phewas for docs."""
1248
- return self._stats_plotter.plot_phewas(
1249
- phewas_df=phewas_df,
1250
- variant_id=variant_id,
1251
- phenotype_col=phenotype_col,
1252
- p_col=p_col,
1253
- category_col=category_col,
1254
- effect_col=effect_col,
1255
- significance_threshold=significance_threshold,
1256
- figsize=figsize,
1257
- )
1258
-
1259
- def plot_forest(
1260
- self,
1261
- forest_df: pd.DataFrame,
1262
- variant_id: str,
1263
- study_col: str = "study",
1264
- effect_col: str = "effect",
1265
- ci_lower_col: str = "ci_lower",
1266
- ci_upper_col: str = "ci_upper",
1267
- weight_col: Optional[str] = None,
1268
- null_value: float = 0.0,
1269
- effect_label: str = "Effect Size",
1270
- figsize: Tuple[float, float] = (8, 6),
1271
- ) -> Any:
1272
- """Create a forest plot. See StatsPlotter.plot_forest for docs."""
1273
- return self._stats_plotter.plot_forest(
1274
- forest_df=forest_df,
1275
- variant_id=variant_id,
1276
- study_col=study_col,
1277
- effect_col=effect_col,
1278
- ci_lower_col=ci_lower_col,
1279
- ci_upper_col=ci_upper_col,
1280
- weight_col=weight_col,
1281
- null_value=null_value,
1282
- effect_label=effect_label,
1283
- figsize=figsize,
1284
- )
1285
-
1286
- def plot_manhattan(
1287
- self,
1288
- df: pd.DataFrame,
1289
- chrom_col: str = "chrom",
1290
- pos_col: str = "pos",
1291
- p_col: str = "p",
1292
- custom_chrom_order: Optional[List[str]] = None,
1293
- category_col: Optional[str] = None,
1294
- category_order: Optional[List[str]] = None,
1295
- significance_threshold: Optional[float] = DEFAULT_GENOMEWIDE_THRESHOLD,
1296
- figsize: Tuple[float, float] = (12, 5),
1297
- title: Optional[str] = None,
1298
- ) -> Any:
1299
- """Create a Manhattan plot. See ManhattanPlotter.plot_manhattan for docs."""
1300
- return self._manhattan_plotter.plot_manhattan(
1301
- df=df,
1302
- chrom_col=chrom_col,
1303
- pos_col=pos_col,
1304
- p_col=p_col,
1305
- custom_chrom_order=custom_chrom_order,
1306
- category_col=category_col,
1307
- category_order=category_order,
1308
- significance_threshold=significance_threshold,
1309
- figsize=figsize,
1310
- title=title,
1311
- )
1312
-
1313
- def plot_qq(
1314
- self,
1315
- df: pd.DataFrame,
1316
- p_col: str = "p",
1317
- show_confidence_band: bool = True,
1318
- show_lambda: bool = True,
1319
- figsize: Tuple[float, float] = (6, 6),
1320
- title: Optional[str] = None,
1321
- ) -> Any:
1322
- """Create a QQ plot. See ManhattanPlotter.plot_qq for docs."""
1323
- return self._manhattan_plotter.plot_qq(
1324
- df=df,
1325
- p_col=p_col,
1326
- show_confidence_band=show_confidence_band,
1327
- show_lambda=show_lambda,
1328
- figsize=figsize,
1329
- title=title,
1330
- )
1331
-
1332
- def plot_manhattan_stacked(
1333
- self,
1334
- gwas_dfs: List[pd.DataFrame],
1335
- chrom_col: str = "chrom",
1336
- pos_col: str = "pos",
1337
- p_col: str = "p",
1338
- custom_chrom_order: Optional[List[str]] = None,
1339
- significance_threshold: Optional[float] = DEFAULT_GENOMEWIDE_THRESHOLD,
1340
- panel_labels: Optional[List[str]] = None,
1341
- figsize: Tuple[float, float] = (12, 8),
1342
- title: Optional[str] = None,
1343
- ) -> Any:
1344
- """Create stacked Manhattan plots. See ManhattanPlotter.plot_manhattan_stacked for docs."""
1345
- return self._manhattan_plotter.plot_manhattan_stacked(
1346
- gwas_dfs=gwas_dfs,
1347
- chrom_col=chrom_col,
1348
- pos_col=pos_col,
1349
- p_col=p_col,
1350
- custom_chrom_order=custom_chrom_order,
1351
- significance_threshold=significance_threshold,
1352
- panel_labels=panel_labels,
1353
- figsize=figsize,
1354
- title=title,
1355
- )
1356
-
1357
- def plot_manhattan_qq(
1358
- self,
1359
- df: pd.DataFrame,
1360
- chrom_col: str = "chrom",
1361
- pos_col: str = "pos",
1362
- p_col: str = "p",
1363
- custom_chrom_order: Optional[List[str]] = None,
1364
- significance_threshold: Optional[float] = DEFAULT_GENOMEWIDE_THRESHOLD,
1365
- show_confidence_band: bool = True,
1366
- show_lambda: bool = True,
1367
- figsize: Tuple[float, float] = (14, 5),
1368
- title: Optional[str] = None,
1369
- ) -> Any:
1370
- """Create side-by-side Manhattan and QQ plots. See ManhattanPlotter.plot_manhattan_qq for docs."""
1371
- return self._manhattan_plotter.plot_manhattan_qq(
1372
- df=df,
1373
- chrom_col=chrom_col,
1374
- pos_col=pos_col,
1375
- p_col=p_col,
1376
- custom_chrom_order=custom_chrom_order,
1377
- significance_threshold=significance_threshold,
1378
- show_confidence_band=show_confidence_band,
1379
- show_lambda=show_lambda,
1380
- figsize=figsize,
1381
- title=title,
1382
- )
1383
-
1384
- def plot_manhattan_qq_stacked(
1385
- self,
1386
- gwas_dfs: List[pd.DataFrame],
1387
- chrom_col: str = "chrom",
1388
- pos_col: str = "pos",
1389
- p_col: str = "p",
1390
- custom_chrom_order: Optional[List[str]] = None,
1391
- significance_threshold: Optional[float] = DEFAULT_GENOMEWIDE_THRESHOLD,
1392
- show_confidence_band: bool = True,
1393
- show_lambda: bool = True,
1394
- panel_labels: Optional[List[str]] = None,
1395
- figsize: Tuple[float, float] = (14, 8),
1396
- title: Optional[str] = None,
1397
- ) -> Any:
1398
- """Create stacked Manhattan+QQ plots. See ManhattanPlotter.plot_manhattan_qq_stacked for docs."""
1399
- return self._manhattan_plotter.plot_manhattan_qq_stacked(
1400
- gwas_dfs=gwas_dfs,
1401
- chrom_col=chrom_col,
1402
- pos_col=pos_col,
1403
- p_col=p_col,
1404
- custom_chrom_order=custom_chrom_order,
1405
- significance_threshold=significance_threshold,
1406
- show_confidence_band=show_confidence_band,
1407
- show_lambda=show_lambda,
1408
- panel_labels=panel_labels,
1409
- figsize=figsize,
1410
- title=title,
1411
- )
@@ -395,6 +395,45 @@ def get_recombination_rate_for_region(
395
395
  return region_df[["pos", "rate"]]
396
396
 
397
397
 
398
+ def ensure_recomb_maps(
399
+ species: str = "canine",
400
+ data_dir: Optional[str] = None,
401
+ ) -> Optional[Path]:
402
+ """Ensure recombination maps are available, downloading if needed.
403
+
404
+ Args:
405
+ species: Species name ('canine', 'feline', etc.).
406
+ data_dir: Directory for recombination maps. Uses default if None.
407
+
408
+ Returns:
409
+ Path to recombination maps directory, or None if species not supported
410
+ or download fails.
411
+ """
412
+ if species != "canine":
413
+ logger.debug(f"No built-in recombination maps for species: {species}")
414
+ return None
415
+
416
+ if data_dir is not None:
417
+ output_path = Path(data_dir)
418
+ else:
419
+ output_path = get_default_data_dir()
420
+
421
+ # Check if maps already exist
422
+ if output_path.exists():
423
+ existing_files = list(output_path.glob("chr*_recomb.tsv"))
424
+ if len(existing_files) >= 39: # 38 autosomes + X
425
+ logger.debug(f"Recombination maps already exist at {output_path}")
426
+ return output_path
427
+
428
+ # Download maps with error handling
429
+ logger.info("Downloading canine recombination maps...")
430
+ try:
431
+ return download_canine_recombination_maps(output_dir=str(output_path))
432
+ except Exception as e:
433
+ logger.warning(f"Could not download recombination maps: {e}")
434
+ return None
435
+
436
+
398
437
  def add_recombination_overlay(
399
438
  ax: Axes,
400
439
  recomb_df: pd.DataFrame,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pylocuszoom
3
- Version: 1.1.2
3
+ Version: 1.2.0
4
4
  Summary: Publication-ready regional association plots with LD coloring, gene tracks, and recombination overlays
5
5
  Project-URL: Homepage, https://github.com/michael-denyer/pylocuszoom
6
6
  Project-URL: Documentation, https://github.com/michael-denyer/pylocuszoom#readme
@@ -35,6 +35,7 @@ Requires-Dist: tqdm>=4.60.0
35
35
  Provides-Extra: all
36
36
  Requires-Dist: pyspark>=3.0.0; extra == 'all'
37
37
  Provides-Extra: dev
38
+ Requires-Dist: hypothesis>=6.0.0; extra == 'dev'
38
39
  Requires-Dist: pytest-cov>=4.0.0; extra == 'dev'
39
40
  Requires-Dist: pytest-randomly>=3.0.0; extra == 'dev'
40
41
  Requires-Dist: pytest-xdist>=3.0.0; extra == 'dev'
@@ -320,13 +321,16 @@ fig = plotter.plot_stacked(
320
321
  Visualize associations of a single variant across multiple phenotypes:
321
322
 
322
323
  ```python
324
+ from pylocuszoom import StatsPlotter
325
+
323
326
  phewas_df = pd.DataFrame({
324
327
  "phenotype": ["Height", "BMI", "T2D", "CAD", "HDL"],
325
328
  "p_value": [1e-15, 0.05, 1e-8, 1e-3, 1e-10],
326
329
  "category": ["Anthropometric", "Anthropometric", "Metabolic", "Cardiovascular", "Lipids"],
327
330
  })
328
331
 
329
- fig = plotter.plot_phewas(
332
+ stats_plotter = StatsPlotter()
333
+ fig = stats_plotter.plot_phewas(
330
334
  phewas_df,
331
335
  variant_id="rs12345",
332
336
  category_col="category",
@@ -341,6 +345,8 @@ fig = plotter.plot_phewas(
341
345
  Create forest plots for meta-analysis visualization:
342
346
 
343
347
  ```python
348
+ from pylocuszoom import StatsPlotter
349
+
344
350
  forest_df = pd.DataFrame({
345
351
  "study": ["Study A", "Study B", "Study C", "Meta-analysis"],
346
352
  "effect": [0.45, 0.52, 0.38, 0.46],
@@ -349,7 +355,8 @@ forest_df = pd.DataFrame({
349
355
  "weight": [25, 35, 20, 100],
350
356
  })
351
357
 
352
- fig = plotter.plot_forest(
358
+ stats_plotter = StatsPlotter()
359
+ fig = stats_plotter.plot_forest(
353
360
  forest_df,
354
361
  variant_id="rs12345",
355
362
  weight_col="weight",
@@ -364,9 +371,9 @@ fig = plotter.plot_forest(
364
371
  Create genome-wide Manhattan plots showing associations across all chromosomes:
365
372
 
366
373
  ```python
367
- from pylocuszoom import LocusZoomPlotter
374
+ from pylocuszoom import ManhattanPlotter
368
375
 
369
- plotter = LocusZoomPlotter(species="human")
376
+ plotter = ManhattanPlotter(species="human")
370
377
 
371
378
  fig = plotter.plot_manhattan(
372
379
  gwas_df,
@@ -397,9 +404,9 @@ fig = plotter.plot_manhattan(
397
404
  Create quantile-quantile plots to assess p-value distribution:
398
405
 
399
406
  ```python
400
- from pylocuszoom import LocusZoomPlotter
407
+ from pylocuszoom import ManhattanPlotter
401
408
 
402
- plotter = LocusZoomPlotter()
409
+ plotter = ManhattanPlotter()
403
410
 
404
411
  fig = plotter.plot_qq(
405
412
  gwas_df,
@@ -419,9 +426,9 @@ fig.savefig("qq_plot.png", dpi=150)
419
426
  Compare multiple GWAS results in vertically stacked Manhattan plots:
420
427
 
421
428
  ```python
422
- from pylocuszoom import LocusZoomPlotter
429
+ from pylocuszoom import ManhattanPlotter
423
430
 
424
- plotter = LocusZoomPlotter()
431
+ plotter = ManhattanPlotter()
425
432
 
426
433
  fig = plotter.plot_manhattan_stacked(
427
434
  [gwas_study1, gwas_study2, gwas_study3],
@@ -444,9 +451,9 @@ fig.savefig("manhattan_stacked.png", dpi=150)
444
451
  Create combined Manhattan and QQ plots in a single figure:
445
452
 
446
453
  ```python
447
- from pylocuszoom import LocusZoomPlotter
454
+ from pylocuszoom import ManhattanPlotter
448
455
 
449
- plotter = LocusZoomPlotter()
456
+ plotter = ManhattanPlotter()
450
457
 
451
458
  fig = plotter.plot_manhattan_qq(
452
459
  gwas_df,
@@ -1,11 +1,11 @@
1
- pylocuszoom/__init__.py,sha256=WWoPLRzx-ptSs6WU5ABz1-HzK5o06RV-KXqtDeQKPgQ,6108
1
+ pylocuszoom/__init__.py,sha256=l_P-moa2FZkrqvpyRj_PT97Q70X7l29bCjzNB5NI_mM,6204
2
2
  pylocuszoom/_plotter_utils.py,sha256=ELdSOcKk2KvOo_AxEWHeutmmUS4zZMaDMmQfpQUWaF0,1541
3
3
  pylocuszoom/colors.py,sha256=B28rfhWwGZ-e6Q-F43iXxC6NZpjUo0yWk4S_-vp9ZvU,7686
4
4
  pylocuszoom/config.py,sha256=qjIEodI-RY71RVyQ5QmE6WXcPXU4Re_xEWiDlkEww3g,13266
5
5
  pylocuszoom/ensembl.py,sha256=w2msgBoIrY79iHI3hURSbevvdFHxHyWF9Z78hXtAaBc,14296
6
6
  pylocuszoom/eqtl.py,sha256=9hGcFARABQRCMN3rco0pVlFJdmlh4SLBBKSgOvdIH_U,5924
7
7
  pylocuszoom/exceptions.py,sha256=nd-rWMUodW62WVV4TfcYVPQcb66xV6v9FA-_4xHb5VY,926
8
- pylocuszoom/finemapping.py,sha256=VYQs4o4dVREXicueT1anzuENiFZk6YXb6HpbwyF0FD0,5828
8
+ pylocuszoom/finemapping.py,sha256=S3ulQj3fkaDM3n4I8EBymbWym_kTD5NEqfIEj93Mdjk,9630
9
9
  pylocuszoom/forest.py,sha256=K-wBinxBOqIzsNMtZJ587e_oMhUXIXEqmEzVTUbmHSY,1161
10
10
  pylocuszoom/gene_track.py,sha256=Sh0JCSdLNAAH0NQEiDVMvyXjm63PiCMq3gLvewcagvo,17277
11
11
  pylocuszoom/labels.py,sha256=l4PHAR_err75Z9kTmb3a2h0eunkFj6UjzhKBUgmZTDc,3623
@@ -15,10 +15,10 @@ pylocuszoom/logging.py,sha256=nZHEkbnjp8zoyWj_S-Hy9UQvUYLoMoxyiOWRozBT2dg,4987
15
15
  pylocuszoom/manhattan.py,sha256=sNhPnsfsIqe1ls74D-kKMFyF_ZmaYB9Ul8qf4UMWnF0,8022
16
16
  pylocuszoom/manhattan_plotter.py,sha256=1QQxaXEh5YG4x6ZIxpdhdfQPI2KuO_525qYKI7c32n4,27584
17
17
  pylocuszoom/phewas.py,sha256=6g2LmwA5kmxYlHgPxJvuXIMerEqfqgsrth110Y3CgVU,968
18
- pylocuszoom/plotter.py,sha256=mMOQxyLU3d1XJGpDJUuy71fAFm6IAnQfMZQXHgN6Mzk,54689
18
+ pylocuszoom/plotter.py,sha256=Z24UWRRdp9E-nJx2urKmM_jz-i0OZ9BOwSmJJestdS4,42362
19
19
  pylocuszoom/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  pylocuszoom/qq.py,sha256=GPIFHXYCLvhP4IUgjcU3QELLREH8r1AEYXMord8gtEo,3650
21
- pylocuszoom/recombination.py,sha256=e11IlFRXKILEAP-vgMcbFK28zbAQ5jY-fESsisogq0o,14570
21
+ pylocuszoom/recombination.py,sha256=M-wDBdGbC5qGDHPFoGzBTPmTdiRD7bpRskyxBAKxTUY,15878
22
22
  pylocuszoom/schemas.py,sha256=XxeivyRm5LGDwJw4GToxzOSdyx1yXvFYk3xgeFJ6VW0,11858
23
23
  pylocuszoom/stats_plotter.py,sha256=67bgU-TXGnmVTxfTRWT3-PFemVVy6lTu4-ZlxUnwHS4,11171
24
24
  pylocuszoom/utils.py,sha256=Z2P__Eau3ilF2ftuAZBm11EZ1NqCFQzfr4br9jCiJmg,6887
@@ -30,7 +30,7 @@ pylocuszoom/backends/hover.py,sha256=Hjm_jcxJL8dDxO_Ye7jeWAUcHKlbH6oO8ZfGJ2MzIFM
30
30
  pylocuszoom/backends/matplotlib_backend.py,sha256=9WAFLWcclj2-4WKi6bE6IPJfQ_HNoIekOE45ibBGPa0,22824
31
31
  pylocuszoom/backends/plotly_backend.py,sha256=VDEZMdP7nOeFYLli-YOc_2DG00ZA6VVRNUcvT5PU0HM,39084
32
32
  pylocuszoom/reference_data/__init__.py,sha256=qqHqAUt1jebGlCN3CjqW3Z-_coHVNo5K3a3bb9o83hA,109
33
- pylocuszoom-1.1.2.dist-info/METADATA,sha256=dkrVAdEcwo5yQku8yUfX7Dlpiv3u01FzdHrZ23c3WyM,22289
34
- pylocuszoom-1.1.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
35
- pylocuszoom-1.1.2.dist-info/licenses/LICENSE.md,sha256=U2y_hv8RcN5lECA3uK88irU3ODUE1TDAPictcmnP0Q4,698
36
- pylocuszoom-1.1.2.dist-info/RECORD,,
33
+ pylocuszoom-1.2.0.dist-info/METADATA,sha256=O0SkzFPHNYqOrjh0zUfL_73UvE2z2NzzH1nKYECchw4,22488
34
+ pylocuszoom-1.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
35
+ pylocuszoom-1.2.0.dist-info/licenses/LICENSE.md,sha256=U2y_hv8RcN5lECA3uK88irU3ODUE1TDAPictcmnP0Q4,698
36
+ pylocuszoom-1.2.0.dist-info/RECORD,,