offtracker 2.11.5__zip → 2.12.0__zip

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {offtracker-2.11.5/offtracker.egg-info → offtracker-2.12.0}/PKG-INFO +1 -1
  2. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/X_offtracker.py +58 -18
  3. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/_version.py +2 -1
  4. {offtracker-2.11.5 → offtracker-2.12.0/offtracker.egg-info}/PKG-INFO +1 -1
  5. {offtracker-2.11.5 → offtracker-2.12.0}/scripts/offtracker_analysis.py +7 -4
  6. {offtracker-2.11.5 → offtracker-2.12.0}/scripts/offtracker_candidates.py +12 -1
  7. {offtracker-2.11.5 → offtracker-2.12.0}/LICENSE.txt +0 -0
  8. {offtracker-2.11.5 → offtracker-2.12.0}/MANIFEST.in +0 -0
  9. {offtracker-2.11.5 → offtracker-2.12.0}/README.md +0 -0
  10. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/X_offplot.py +0 -0
  11. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/X_sequence.py +0 -0
  12. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/__init__.py +0 -0
  13. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/snakefile/Snakefile_QC.smk +0 -0
  14. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/snakefile/Snakefile_offtracker.smk +0 -0
  15. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/utility/1.1_bed2fr.py +0 -0
  16. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/utility/1.3_bdg_normalize_v4.0.py +0 -0
  17. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/utility/bedGraphToBigWig +0 -0
  18. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/utility/hg38.chrom.sizes +0 -0
  19. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/utility/mm10.chrom.sizes +0 -0
  20. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/utility/offtracker_blacklist_hg38.merged.bed +0 -0
  21. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker/utility/offtracker_blacklist_mm10.merged.bed +0 -0
  22. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker.egg-info/SOURCES.txt +0 -0
  23. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker.egg-info/dependency_links.txt +0 -0
  24. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker.egg-info/requires.txt +0 -0
  25. {offtracker-2.11.5 → offtracker-2.12.0}/offtracker.egg-info/top_level.txt +0 -0
  26. {offtracker-2.11.5 → offtracker-2.12.0}/scripts/offtracker_config.py +0 -0
  27. {offtracker-2.11.5 → offtracker-2.12.0}/scripts/offtracker_init.py +0 -0
  28. {offtracker-2.11.5 → offtracker-2.12.0}/scripts/offtracker_plot.py +0 -0
  29. {offtracker-2.11.5 → offtracker-2.12.0}/scripts/offtracker_qc.py +0 -0
  30. {offtracker-2.11.5 → offtracker-2.12.0}/setup.cfg +0 -0
  31. {offtracker-2.11.5 → offtracker-2.12.0}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: offtracker
3
- Version: 2.11.5
3
+ Version: 2.12.0
4
4
  Summary: Tracking-seq data analysis
5
5
  Home-page: https://github.com/Lan-lab/offtracker
6
6
  Author: Runda Xu
@@ -1,5 +1,6 @@
1
1
 
2
2
  import pandas as pd
3
+ import polars as pl
3
4
  import numpy as np
4
5
  import os, sys
5
6
  sys.path.append( os.path.abspath(os.path.dirname(__file__)) )
@@ -8,26 +9,65 @@ def fdr(p_vals):
8
9
  # Benjamini-Hochberg
9
10
  from scipy.stats import rankdata
10
11
  ranked_p_values = rankdata(p_vals)
11
- fdr = p_vals * len(p_vals) / ranked_p_values
12
- fdr[fdr > 1] = 1
13
- return fdr
12
+ fdr_value = p_vals * len(p_vals) / ranked_p_values
13
+ fdr_value[fdr_value > 1] = 1
14
+ return fdr_value
14
15
 
15
- def dedup_two( df_loc, col_ID_1='ID_1', col_ID_2='ID_2'):
16
- # 会根据 df_loc 的排序保留第一个 location
17
- # dedup 结束后,剩下的 ID_1 + ID_2 并集可能会小于 dedup 前的并集
18
- list_nondup = []
19
- set_IDs = set()
20
- df_IDs = df_loc[[col_ID_1,col_ID_2]]
21
- for a_row in df_IDs.iterrows():
22
- temp = a_row[1]
23
- if (temp[col_ID_1] in set_IDs) or (temp[col_ID_2] in set_IDs):
24
- # 只要有一ID出现过,即便另一ID没出现过,也不更新 set_IDs
25
- list_nondup.append(False)
16
+
17
+ def mark_regions_single_chr(dp, min_distance=1000):
18
+ unique_chr = dp['chr'].unique()
19
+ assert len(unique_chr) == 1
20
+ unique_chr = unique_chr[0]
21
+
22
+ # Initialize variables for marking regions
23
+ region_id = 1
24
+ current_start = None
25
+ current_end = None
26
+ marked_regions = []
27
+
28
+ for row in dp.iter_rows(named=True):
29
+ start, end = row['st'], row['ed']
30
+
31
+ if current_start is None:
32
+ # First region
33
+ current_start = start
34
+ current_end = end
35
+ marked_regions.append(f'{unique_chr}_region_{region_id}')
26
36
  else:
27
- set_IDs.add(temp[col_ID_1])
28
- set_IDs.add(temp[col_ID_2])
29
- list_nondup.append(True)
30
- return list_nondup
37
+ if start <= current_end + min_distance:
38
+ # Mark as the same region
39
+ marked_regions.append(f'{unique_chr}_region_{region_id}')
40
+ else:
41
+ # New region
42
+ region_id += 1
43
+ marked_regions.append(f'{unique_chr}_region_{region_id}')
44
+ current_start = start
45
+ current_end = end
46
+
47
+ current_end = max(current_end, end)
48
+
49
+ return dp.with_columns(region_index=pl.Series(marked_regions))
50
+
51
+
52
+
53
+
54
+
55
+ # def dedup_two( df_loc, col_ID_1='ID_1', col_ID_2='ID_2'):
56
+ # # 会根据 df_loc 的排序保留第一个 location
57
+ # # dedup 结束后,剩下的 ID_1 + ID_2 并集可能会小于 dedup 前的并集
58
+ # list_nondup = []
59
+ # set_IDs = set()
60
+ # df_IDs = df_loc[[col_ID_1,col_ID_2]]
61
+ # for a_row in df_IDs.iterrows():
62
+ # temp = a_row[1]
63
+ # if (temp[col_ID_1] in set_IDs) or (temp[col_ID_2] in set_IDs):
64
+ # # 只要有一ID出现过,即便另一ID没出现过,也不更新 set_IDs
65
+ # list_nondup.append(False)
66
+ # else:
67
+ # set_IDs.add(temp[col_ID_1])
68
+ # set_IDs.add(temp[col_ID_2])
69
+ # list_nondup.append(True)
70
+ # return list_nondup
31
71
 
32
72
  def window_smooth(sr_smooth, window_size=3, times=1):
33
73
  window = np.ones(window_size) / window_size
@@ -1,4 +1,4 @@
1
- __version__ = "2.11.5"
1
+ __version__ = "2.12.0"
2
2
  # 2023.08.11. v1.1.0 adding a option for not normalizing the bw file
3
3
  # 2023.10.26. v1.9.0 prerelease for v2.0
4
4
  # 2023.10.27. v2.0.0 大更新,还没微调
@@ -40,3 +40,4 @@ __version__ = "2.11.5"
40
40
  # 2025.06.28. v2.10.11 回滚到2.10.9外加修正
41
41
  # 2025.07.02. v2.11.4 基于 blast 的缺陷更新 candidates,去除 quick mode
42
42
  # 2025.07.04. v2.11.5 offtracker_analysis 提前 skip 已有结果的样本
43
+ # 2025.07.04. v2.12.0 新增 region_index 标记区域,用于更好的去重
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: offtracker
3
- Version: 2.11.5
3
+ Version: 2.12.0
4
4
  Summary: Tracking-seq data analysis
5
5
  Home-page: https://github.com/Lan-lab/offtracker
6
6
  Author: Runda Xu
@@ -82,7 +82,7 @@ def main():
82
82
  df_candidate.index = df_candidate['target_location']
83
83
  df_candidate_brief = df_candidate[['chr','st','ed','best_strand','best_target','best_seq_score',
84
84
  'deletion', 'insertion','mismatch', 'GG',
85
- 'target_location', 'cleavage_site', 'ID_1','ID_2']]
85
+ 'target_location', 'cleavage_site', 'ID_1','ID_2', 'region_index']] # 2025.07.06 添加 region_index
86
86
  df_candidate_sub = df_candidate[['chr','cleavage_site']]
87
87
  except FileNotFoundError:
88
88
  return 'Please run offtracker_candidates.py first and provide the correct directory with --seqfolder'
@@ -300,9 +300,12 @@ def main():
300
300
  df_score['raw_score'] = df_score['final_score_1'] + df_score['final_score_2']
301
301
  df_score = df_score.sort_values('raw_score', ascending=False)
302
302
 
303
- # local dedup
304
- list_nondup = offtracker.dedup_two(df_score,'ID_1','ID_2')
305
- df_result = df_score[list_nondup].copy()
303
+ # # local dedup
304
+ # list_nondup = offtracker.dedup_two(df_score,'ID_1','ID_2')
305
+ # df_result = df_score[list_nondup].copy()
306
+
307
+ # 2025.07.06 更新去重方式
308
+ df_result = df_score.drop_duplicates(subset=['region_index'], keep='first')
306
309
 
307
310
  # 标准化分布
308
311
  target_std=0.15
@@ -310,7 +310,18 @@ def main():
310
310
  df_candidate['mis_all'] = df_candidate[['mismatch','deletion','insertion']].sum(axis=1)
311
311
  df_candidate = df_candidate[df_candidate['mis_all']<8]
312
312
 
313
- df_candidate.to_csv(dir_df_candidate)
313
+ # 2025.07.06 增加 region 标记用于去重
314
+ # 将 df_candidate 按照染色体分组
315
+ candidate_groups = df_candidate.groupby('chr')
316
+ # 定义一个空的列表,用于存储每个染色体的数据
317
+ list_dp = []
318
+ for chr_name, chr_candidate in candidate_groups:
319
+ dp_marked = offtracker.mark_regions_single_chr(pl.DataFrame(chr_candidate))
320
+ list_dp.append(dp_marked)
321
+ df_candidate = pl.concat(list_dp)
322
+
323
+ # 改成 pl 输出
324
+ df_candidate.write_csv(dir_df_candidate)
314
325
  print(f'Output df_candidate_{sgRNA_name}.csv')
315
326
  os.remove(temp_bed)
316
327
 
File without changes
File without changes
File without changes
File without changes
File without changes