spacr 0.3.52__py3-none-any.whl → 0.3.55__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/gui_elements.py +1 -1
- spacr/gui_utils.py +0 -111
- spacr/io.py +114 -140
- spacr/measure.py +10 -11
- spacr/ml.py +41 -32
- spacr/plot.py +24 -293
- spacr/sequencing.py +13 -9
- spacr/settings.py +15 -9
- spacr/submodules.py +19 -19
- spacr/timelapse.py +16 -16
- spacr/toxo.py +15 -15
- spacr/utils.py +72 -164
- {spacr-0.3.52.dist-info → spacr-0.3.55.dist-info}/METADATA +1 -1
- {spacr-0.3.52.dist-info → spacr-0.3.55.dist-info}/RECORD +18 -18
- {spacr-0.3.52.dist-info → spacr-0.3.55.dist-info}/LICENSE +0 -0
- {spacr-0.3.52.dist-info → spacr-0.3.55.dist-info}/WHEEL +0 -0
- {spacr-0.3.52.dist-info → spacr-0.3.55.dist-info}/entry_points.txt +0 -0
- {spacr-0.3.52.dist-info → spacr-0.3.55.dist-info}/top_level.txt +0 -0
spacr/utils.py
CHANGED
@@ -78,7 +78,7 @@ def filepaths_to_database(img_paths, settings, source_folder, crop_mode):
|
|
78
78
|
|
79
79
|
parts = png_df['file_name'].apply(lambda x: pd.Series(_map_wells_png(x, timelapse=settings['timelapse'])))
|
80
80
|
|
81
|
-
columns = ['plate', '
|
81
|
+
columns = ['plate', 'row_name', 'column_name', 'field']
|
82
82
|
|
83
83
|
if settings['timelapse']:
|
84
84
|
columns = columns + ['time_id']
|
@@ -113,7 +113,7 @@ def activation_maps_to_database(img_paths, source_folder, settings):
|
|
113
113
|
png_df = pd.DataFrame(img_paths, columns=['png_path'])
|
114
114
|
png_df['file_name'] = png_df['png_path'].apply(lambda x: os.path.basename(x))
|
115
115
|
parts = png_df['file_name'].apply(lambda x: pd.Series(_map_wells_png(x, timelapse=False)))
|
116
|
-
columns = ['plate', '
|
116
|
+
columns = ['plate', 'row_name', 'column_name', 'field', 'prcfo', 'object']
|
117
117
|
png_df[columns] = parts
|
118
118
|
|
119
119
|
dataset_name = os.path.splitext(os.path.basename(settings['dataset']))[0]
|
@@ -136,7 +136,7 @@ def activation_correlations_to_database(df, img_paths, source_folder, settings):
|
|
136
136
|
png_df = pd.DataFrame(img_paths, columns=['png_path'])
|
137
137
|
png_df['file_name'] = png_df['png_path'].apply(lambda x: os.path.basename(x))
|
138
138
|
parts = png_df['file_name'].apply(lambda x: pd.Series(_map_wells_png(x, timelapse=False)))
|
139
|
-
columns = ['plate', '
|
139
|
+
columns = ['plate', 'row_name', 'column_name', 'field', 'prcfo', 'object']
|
140
140
|
png_df[columns] = parts
|
141
141
|
|
142
142
|
# Align both DataFrames by file_name
|
@@ -547,56 +547,6 @@ def _get_cellpose_batch_size():
|
|
547
547
|
except Exception as e:
|
548
548
|
return 8
|
549
549
|
|
550
|
-
def _extract_filename_metadata_v1(filenames, src, regular_expression, metadata_type='cellvoyager', pick_slice=False, skip_mode='01'):
|
551
|
-
|
552
|
-
images_by_key = defaultdict(list)
|
553
|
-
|
554
|
-
for filename in filenames:
|
555
|
-
match = regular_expression.match(filename)
|
556
|
-
if match:
|
557
|
-
try:
|
558
|
-
try:
|
559
|
-
plate = match.group('plateID')
|
560
|
-
except:
|
561
|
-
plate = os.path.basename(src)
|
562
|
-
|
563
|
-
well = match.group('wellID')
|
564
|
-
field = match.group('fieldID')
|
565
|
-
channel = match.group('chanID')
|
566
|
-
mode = None
|
567
|
-
|
568
|
-
if well[0].isdigit():
|
569
|
-
well = str(_safe_int_convert(well))
|
570
|
-
if field[0].isdigit():
|
571
|
-
field = str(_safe_int_convert(field))
|
572
|
-
if channel[0].isdigit():
|
573
|
-
channel = str(_safe_int_convert(channel))
|
574
|
-
|
575
|
-
if metadata_type =='cq1':
|
576
|
-
orig_wellID = wellID
|
577
|
-
wellID = _convert_cq1_well_id(wellID)
|
578
|
-
print(f'Converted Well ID: {orig_wellID} to {wellID}', end='\r', flush=True)
|
579
|
-
|
580
|
-
if pick_slice:
|
581
|
-
try:
|
582
|
-
mode = match.group('AID')
|
583
|
-
except IndexError:
|
584
|
-
sliceid = '00'
|
585
|
-
|
586
|
-
if mode == skip_mode:
|
587
|
-
continue
|
588
|
-
|
589
|
-
key = (plate, well, field, channel, mode)
|
590
|
-
with Image.open(os.path.join(src, filename)) as img:
|
591
|
-
images_by_key[key].append(np.array(img))
|
592
|
-
except IndexError:
|
593
|
-
print(f"Could not extract information from filename {filename} using provided regex")
|
594
|
-
else:
|
595
|
-
print(f"Filename {filename} did not match provided regex")
|
596
|
-
continue
|
597
|
-
|
598
|
-
return images_by_key
|
599
|
-
|
600
550
|
def _extract_filename_metadata(filenames, src, regular_expression, metadata_type='cellvoyager', pick_slice=False, skip_mode='01'):
|
601
551
|
|
602
552
|
images_by_key = defaultdict(list)
|
@@ -685,11 +635,11 @@ def _update_database_with_merged_info(db_path, df, table='png_list', columns=['p
|
|
685
635
|
if 'prcfo' not in df.columns:
|
686
636
|
print(f'generating prcfo columns')
|
687
637
|
try:
|
688
|
-
df['prcfo'] = df['plate'].astype(str) + '_' + df['
|
638
|
+
df['prcfo'] = df['plate'].astype(str) + '_' + df['row_name'].astype(str) + '_' + df['column_name'].astype(str) + '_' + df['field'].astype(str) + '_o' + df['object_label'].astype(int).astype(str)
|
689
639
|
except Exception as e:
|
690
640
|
print('Merging on cell failed, trying with cell_id')
|
691
641
|
try:
|
692
|
-
df['prcfo'] = df['plate'].astype(str) + '_' + df['
|
642
|
+
df['prcfo'] = df['plate'].astype(str) + '_' + df['row_name'].astype(str) + '_' + df['column_name'].astype(str) + '_' + df['field'].astype(str) + '_o' + df['cell_id'].astype(int).astype(str)
|
693
643
|
except Exception as e:
|
694
644
|
print(e)
|
695
645
|
|
@@ -781,7 +731,7 @@ def _map_values(row, values, locs):
|
|
781
731
|
if locs:
|
782
732
|
value_dict = {loc: value for value, loc_list in zip(values, locs) for loc in loc_list}
|
783
733
|
# Determine if we're dealing with row or column based on first location identifier
|
784
|
-
type_ = '
|
734
|
+
type_ = 'row_name' if locs[0][0][0] == 'r' else 'column_name'
|
785
735
|
return value_dict.get(row[type_], None)
|
786
736
|
return values[0] if values else None
|
787
737
|
|
@@ -966,21 +916,21 @@ def _merge_and_save_to_database(morph_df, intensity_df, table_type, source_folde
|
|
966
916
|
merged_df['file_name'] = file_name
|
967
917
|
merged_df['path_name'] = os.path.join(source_folder, file_name + '.npy')
|
968
918
|
if timelapse:
|
969
|
-
merged_df[['plate', '
|
919
|
+
merged_df[['plate', 'row_name', 'column_name', 'field', 'timeid', 'prcf']] = merged_df['file_name'].apply(lambda x: pd.Series(_map_wells(x, timelapse)))
|
970
920
|
else:
|
971
|
-
merged_df[['plate', '
|
921
|
+
merged_df[['plate', 'row_name', 'column_name', 'field', 'prcf']] = merged_df['file_name'].apply(lambda x: pd.Series(_map_wells(x, timelapse)))
|
972
922
|
cols = merged_df.columns.tolist() # get the list of all columns
|
973
923
|
if table_type == 'cell' or table_type == 'cytoplasm':
|
974
|
-
column_list = ['object_label', 'plate', '
|
924
|
+
column_list = ['object_label', 'plate', 'row_name', 'column_name', 'field', 'prcf', 'file_name', 'path_name']
|
975
925
|
elif table_type == 'nucleus' or table_type == 'pathogen':
|
976
|
-
column_list = ['object_label', 'cell_id', 'plate', '
|
926
|
+
column_list = ['object_label', 'cell_id', 'plate', 'row_name', 'column_name', 'field', 'prcf', 'file_name', 'path_name']
|
977
927
|
else:
|
978
928
|
raise ValueError(f"Invalid table_type: {table_type}")
|
979
929
|
# Check if all columns in column_list are in cols
|
980
930
|
missing_columns = [col for col in column_list if col not in cols]
|
981
931
|
if len(missing_columns) == 1 and missing_columns[0] == 'cell_id':
|
982
932
|
missing_columns = False
|
983
|
-
column_list = ['object_label', 'plate', '
|
933
|
+
column_list = ['object_label', 'plate', 'row_name', 'column_name', 'field', 'prcf', 'file_name', 'path_name']
|
984
934
|
if missing_columns:
|
985
935
|
raise ValueError(f"Columns missing in DataFrame: {missing_columns}")
|
986
936
|
for i, col in enumerate(column_list):
|
@@ -1373,11 +1323,11 @@ def annotate_conditions(df, cells=None, cell_loc=None, pathogens=None, pathogen_
|
|
1373
1323
|
"""
|
1374
1324
|
|
1375
1325
|
def _get_type(val):
|
1376
|
-
"""Determine if a value maps to '
|
1326
|
+
"""Determine if a value maps to 'row_name' or 'column_name'."""
|
1377
1327
|
if isinstance(val, str) and val.startswith('c'):
|
1378
|
-
return '
|
1328
|
+
return 'column_name'
|
1379
1329
|
elif isinstance(val, str) and val.startswith('r'):
|
1380
|
-
return '
|
1330
|
+
return 'row_name'
|
1381
1331
|
return None
|
1382
1332
|
|
1383
1333
|
def _map_or_default(column_name, values, loc, df):
|
@@ -1514,7 +1464,7 @@ def _group_by_well(df):
|
|
1514
1464
|
non_numeric_cols = df.select_dtypes(include=['object']).columns
|
1515
1465
|
|
1516
1466
|
# Apply mean function to numeric columns and first to non-numeric
|
1517
|
-
df_grouped = df.groupby(['plate', '
|
1467
|
+
df_grouped = df.groupby(['plate', 'row_name', 'column_name']).agg({**{col: np.mean for col in numeric_cols}, **{col: 'first' for col in non_numeric_cols}})
|
1518
1468
|
return df_grouped
|
1519
1469
|
|
1520
1470
|
###################################################
|
@@ -2348,7 +2298,7 @@ def check_multicollinearity(x):
|
|
2348
2298
|
|
2349
2299
|
def lasso_reg(merged_df, alpha_value=0.01, reg_type='lasso'):
|
2350
2300
|
# Separate predictors and response
|
2351
|
-
X = merged_df[['gene', 'grna', 'plate', '
|
2301
|
+
X = merged_df[['gene', 'grna', 'plate', 'row_name', 'column']]
|
2352
2302
|
y = merged_df['pred']
|
2353
2303
|
|
2354
2304
|
# One-hot encode the categorical predictors
|
@@ -3999,36 +3949,6 @@ def plot_grid(cluster_images, colors, figuresize, black_background, verbose):
|
|
3999
3949
|
plt.show()
|
4000
3950
|
return grid_fig
|
4001
3951
|
|
4002
|
-
def generate_path_list_from_db_v1(db_path, file_metadata):
|
4003
|
-
|
4004
|
-
all_paths = []
|
4005
|
-
|
4006
|
-
# Connect to the database and retrieve the image paths
|
4007
|
-
print(f"Reading DataBase: {db_path}")
|
4008
|
-
try:
|
4009
|
-
with sqlite3.connect(db_path) as conn:
|
4010
|
-
cursor = conn.cursor()
|
4011
|
-
if file_metadata:
|
4012
|
-
if isinstance(file_metadata, str):
|
4013
|
-
cursor.execute("SELECT png_path FROM png_list WHERE png_path LIKE ?", (f"%{file_metadata}%",))
|
4014
|
-
else:
|
4015
|
-
cursor.execute("SELECT png_path FROM png_list")
|
4016
|
-
|
4017
|
-
while True:
|
4018
|
-
rows = cursor.fetchmany(1000)
|
4019
|
-
if not rows:
|
4020
|
-
break
|
4021
|
-
all_paths.extend([row[0] for row in rows])
|
4022
|
-
|
4023
|
-
except sqlite3.Error as e:
|
4024
|
-
print(f"Database error: {e}")
|
4025
|
-
return
|
4026
|
-
except Exception as e:
|
4027
|
-
print(f"Error: {e}")
|
4028
|
-
return
|
4029
|
-
|
4030
|
-
return all_paths
|
4031
|
-
|
4032
3952
|
def generate_path_list_from_db(db_path, file_metadata):
|
4033
3953
|
all_paths = []
|
4034
3954
|
|
@@ -4738,11 +4658,11 @@ def process_vision_results(df, threshold=0.5):
|
|
4738
4658
|
mapped_values = df['path'].apply(lambda x: _map_wells(x))
|
4739
4659
|
|
4740
4660
|
df['plate'] = mapped_values.apply(lambda x: x[0])
|
4741
|
-
df['
|
4661
|
+
df['row_name'] = mapped_values.apply(lambda x: x[1])
|
4742
4662
|
df['column'] = mapped_values.apply(lambda x: x[2])
|
4743
4663
|
df['field'] = mapped_values.apply(lambda x: x[3])
|
4744
4664
|
df['object'] = df['path'].str.split('_').str[3].str.split('.').str[0]
|
4745
|
-
df['prc'] = df['plate'].astype(str) + '_' + df['
|
4665
|
+
df['prc'] = df['plate'].astype(str) + '_' + df['row_name'].astype(str) + '_' + df['column'].astype(str)
|
4746
4666
|
df['cv_predictions'] = (df['pred'] >= threshold).astype(int)
|
4747
4667
|
|
4748
4668
|
return df
|
@@ -5031,66 +4951,6 @@ def download_models(repo_id="einarolafsson/models", retries=5, delay=5):
|
|
5031
4951
|
|
5032
4952
|
raise Exception("Failed to download model files after multiple attempts.")
|
5033
4953
|
|
5034
|
-
def download_models_v1(repo_id="einarolafsson/models", local_dir=None, retries=5, delay=5):
|
5035
|
-
"""
|
5036
|
-
Downloads all model files from Hugging Face and stores them in the specified local directory.
|
5037
|
-
|
5038
|
-
Args:
|
5039
|
-
repo_id (str): The repository ID on Hugging Face (default is 'einarolafsson/models').
|
5040
|
-
local_dir (str): The local directory where models will be saved. Defaults to '/home/carruthers/Desktop/test'.
|
5041
|
-
retries (int): Number of retry attempts in case of failure.
|
5042
|
-
delay (int): Delay in seconds between retries.
|
5043
|
-
|
5044
|
-
Returns:
|
5045
|
-
str: The local path to the downloaded models.
|
5046
|
-
"""
|
5047
|
-
# Create the local directory if it doesn't exist
|
5048
|
-
if not os.path.exists(local_dir):
|
5049
|
-
os.makedirs(local_dir)
|
5050
|
-
elif len(os.listdir(local_dir)) > 0:
|
5051
|
-
print(f"Models already downloaded to: {local_dir}")
|
5052
|
-
return local_dir
|
5053
|
-
|
5054
|
-
attempt = 0
|
5055
|
-
while attempt < retries:
|
5056
|
-
try:
|
5057
|
-
# List all files in the repo
|
5058
|
-
files = list_repo_files(repo_id, repo_type="dataset")
|
5059
|
-
print(f"Files in repository: {files}") # Debugging print to check file list
|
5060
|
-
|
5061
|
-
# Download each file
|
5062
|
-
for file_name in files:
|
5063
|
-
for download_attempt in range(retries):
|
5064
|
-
try:
|
5065
|
-
url = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{file_name}?download=true"
|
5066
|
-
print(f"Downloading file from: {url}") # Debugging
|
5067
|
-
|
5068
|
-
response = requests.get(url, stream=True)
|
5069
|
-
print(f"HTTP response status: {response.status_code}") # Debugging
|
5070
|
-
response.raise_for_status()
|
5071
|
-
|
5072
|
-
# Save the file locally
|
5073
|
-
local_file_path = os.path.join(local_dir, os.path.basename(file_name))
|
5074
|
-
with open(local_file_path, 'wb') as file:
|
5075
|
-
for chunk in response.iter_content(chunk_size=8192):
|
5076
|
-
file.write(chunk)
|
5077
|
-
print(f"Downloaded model file: {file_name} to {local_file_path}")
|
5078
|
-
break # Exit the retry loop if successful
|
5079
|
-
except (requests.HTTPError, requests.Timeout) as e:
|
5080
|
-
print(f"Error downloading {file_name}: {e}. Retrying in {delay} seconds...")
|
5081
|
-
time.sleep(delay)
|
5082
|
-
else:
|
5083
|
-
raise Exception(f"Failed to download {file_name} after multiple attempts.")
|
5084
|
-
|
5085
|
-
return local_dir # Return the directory where models are saved
|
5086
|
-
|
5087
|
-
except (requests.HTTPError, requests.Timeout) as e:
|
5088
|
-
print(f"Error downloading files: {e}. Retrying in {delay} seconds...")
|
5089
|
-
attempt += 1
|
5090
|
-
time.sleep(delay)
|
5091
|
-
|
5092
|
-
raise Exception("Failed to download model files after multiple attempts.")
|
5093
|
-
|
5094
4954
|
def generate_cytoplasm_mask(nucleus_mask, cell_mask):
|
5095
4955
|
|
5096
4956
|
"""
|
@@ -5216,21 +5076,21 @@ def correct_metadata_column_names(df):
|
|
5216
5076
|
df = df.rename(columns={'plate_name': 'plate'})
|
5217
5077
|
if 'column_name' in df.columns:
|
5218
5078
|
df = df.rename(columns={'column_name': 'column'})
|
5219
|
-
if '
|
5220
|
-
df = df.rename(columns={'
|
5079
|
+
if 'column_name' in df.columns:
|
5080
|
+
df = df.rename(columns={'column_name': 'column'})
|
5221
5081
|
if 'row_name' in df.columns:
|
5222
|
-
df = df.rename(columns={'row_name': '
|
5082
|
+
df = df.rename(columns={'row_name': 'row_name'})
|
5223
5083
|
if 'grna_name' in df.columns:
|
5224
5084
|
df = df.rename(columns={'grna_name': 'grna'})
|
5225
5085
|
if 'plate_row' in df.columns:
|
5226
|
-
df[['plate', '
|
5086
|
+
df[['plate', 'row_name']] = df['plate_row'].str.split('_', expand=True)
|
5227
5087
|
return df
|
5228
5088
|
|
5229
5089
|
def control_filelist(folder, mode='column', values=['01','02']):
|
5230
5090
|
files = os.listdir(folder)
|
5231
5091
|
if mode is 'column':
|
5232
5092
|
filtered_files = [file for file in files if file.split('_')[1][1:] in values]
|
5233
|
-
if mode is '
|
5093
|
+
if mode is 'row_name':
|
5234
5094
|
filtered_files = [file for file in files if file.split('_')[1][:1] in values]
|
5235
5095
|
return filtered_files
|
5236
5096
|
|
@@ -5255,4 +5115,52 @@ def choose_p_adjust_method(num_groups, num_data_points):
|
|
5255
5115
|
elif num_comparisons <= 10:
|
5256
5116
|
return 'sidak' # Less conservative than Bonferroni, good for independent comparisons
|
5257
5117
|
else:
|
5258
|
-
return 'bonferroni' # Very conservative, use for strict control of Type I errors
|
5118
|
+
return 'bonferroni' # Very conservative, use for strict control of Type I errors
|
5119
|
+
|
5120
|
+
def rename_columns_in_db(db_path):
|
5121
|
+
with sqlite3.connect(db_path) as conn:
|
5122
|
+
cursor = conn.cursor()
|
5123
|
+
|
5124
|
+
# Retrieve all table names in the database
|
5125
|
+
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
|
5126
|
+
tables = [table[0] for table in cursor.fetchall()]
|
5127
|
+
|
5128
|
+
for table in tables:
|
5129
|
+
# Retrieve column names for each table
|
5130
|
+
cursor.execute(f"PRAGMA table_info({table});")
|
5131
|
+
columns_info = cursor.fetchall()
|
5132
|
+
column_names = [col[1] for col in columns_info]
|
5133
|
+
|
5134
|
+
# Check if columns 'row' or 'col' exist
|
5135
|
+
columns_to_rename = {}
|
5136
|
+
if 'row' in column_names:
|
5137
|
+
columns_to_rename['row'] = 'row_name'
|
5138
|
+
if 'col' in column_names:
|
5139
|
+
columns_to_rename['col'] = 'column_name'
|
5140
|
+
|
5141
|
+
# Rename columns if necessary
|
5142
|
+
if columns_to_rename:
|
5143
|
+
# Rename existing table to a temporary name
|
5144
|
+
temp_table = f"{table}_old"
|
5145
|
+
cursor.execute(f"ALTER TABLE `{table}` RENAME TO `{temp_table}`")
|
5146
|
+
|
5147
|
+
# Define new columns with updated names
|
5148
|
+
column_definitions = ", ".join(
|
5149
|
+
[f"`{columns_to_rename.get(col[1], col[1])}` {col[2]}" for col in columns_info]
|
5150
|
+
)
|
5151
|
+
cursor.execute(f"CREATE TABLE `{table}` ({column_definitions})")
|
5152
|
+
|
5153
|
+
# Copy data to the new table
|
5154
|
+
old_columns = ", ".join([f"`{col}`" for col in column_names])
|
5155
|
+
new_columns = ", ".join(
|
5156
|
+
[f"`{columns_to_rename.get(col, col)}`" for col in column_names]
|
5157
|
+
)
|
5158
|
+
cursor.execute(f"INSERT INTO `{table}` ({new_columns}) SELECT {old_columns} FROM `{temp_table}`")
|
5159
|
+
try:
|
5160
|
+
cursor.execute(f"DROP TABLE `{temp_table}`")
|
5161
|
+
except sqlite3.Error as e:
|
5162
|
+
print(f"Error while dropping temporary table '{temp_table}': {e}")
|
5163
|
+
|
5164
|
+
# After closing the 'with' block, run VACUUM outside of any transaction
|
5165
|
+
with sqlite3.connect(db_path) as conn:
|
5166
|
+
conn.execute("VACUUM;")
|
@@ -13,22 +13,22 @@ spacr/core.py,sha256=dW9RrAKFLfVsFhX0-kaVMc2T7b47Ky0pTXK-CEVOeWQ,48235
|
|
13
13
|
spacr/deep_spacr.py,sha256=HdOcNU8cHcE_19nP7_5uTz-ih3E169ffr2Hm--NvMvA,43255
|
14
14
|
spacr/gui.py,sha256=ARyn9Q_g8HoP-cXh1nzMLVFCKqthY4v2u9yORyaQqQE,8230
|
15
15
|
spacr/gui_core.py,sha256=N7R7yvfK_dJhOReM_kW3Ci8Bokhi1OzsxeKqvSGdvV4,41460
|
16
|
-
spacr/gui_elements.py,sha256=
|
17
|
-
spacr/gui_utils.py,sha256=
|
18
|
-
spacr/io.py,sha256=
|
16
|
+
spacr/gui_elements.py,sha256=EKlvEg_4_je7jciEdR3NTgPrcTraowa2e2RUt-xqd6M,138254
|
17
|
+
spacr/gui_utils.py,sha256=Ud6hRRPhombKjeGUhlleEr9I75SNnFj8UD11yKfp9Wo,40860
|
18
|
+
spacr/io.py,sha256=VHs6h8o0gBEyKxfdNqEhpzjQXPrj7UGG47DwHeUyUDw,143390
|
19
19
|
spacr/logger.py,sha256=lJhTqt-_wfAunCPl93xE65Wr9Y1oIHJWaZMjunHUeIw,1538
|
20
|
-
spacr/measure.py,sha256=
|
20
|
+
spacr/measure.py,sha256=2lK-ZcTxLM-MpXV1oZnucRD9iz5aprwahRKw9IEqshg,55085
|
21
21
|
spacr/mediar.py,sha256=FwLvbLQW5LQzPgvJZG8Lw7GniA2vbZx6Jv6vIKu7I5c,14743
|
22
|
-
spacr/ml.py,sha256=
|
22
|
+
spacr/ml.py,sha256=MpoHjziUM35GVuoYQCXN5DI77u1r7m7rtv7j3xqMNUc,68044
|
23
23
|
spacr/openai.py,sha256=5vBZ3Jl2llYcW3oaTEXgdyCB2aJujMUIO5K038z7w_A,1246
|
24
|
-
spacr/plot.py,sha256=
|
25
|
-
spacr/sequencing.py,sha256=
|
26
|
-
spacr/settings.py,sha256=
|
24
|
+
spacr/plot.py,sha256=Y5_VuRHNsIH7iezK8kWXHg9fwh5sW3S34ncIFshbBco,157893
|
25
|
+
spacr/sequencing.py,sha256=ClUfwPPK6rNUbUuiEkzcwakzVyDKKUMv9ricrxT8qQY,25227
|
26
|
+
spacr/settings.py,sha256=6_GB1QQw_w_4yq8dH-Ypc4rJw__Cgs6g_BnR9bIjdZI,77669
|
27
27
|
spacr/sim.py,sha256=1xKhXimNU3ukzIw-3l9cF3Znc_brW8h20yv8fSTzvss,71173
|
28
|
-
spacr/submodules.py,sha256=
|
29
|
-
spacr/timelapse.py,sha256=
|
30
|
-
spacr/toxo.py,sha256=
|
31
|
-
spacr/utils.py,sha256=
|
28
|
+
spacr/submodules.py,sha256=dn-QSKX6ZqyyEr8_v69jVGpB-wd3KbaMRacIA8DXONU,28155
|
29
|
+
spacr/timelapse.py,sha256=KGfG4L4-QnFfgbF7L6C5wL_3gd_rqr05Foje6RsoTBg,39603
|
30
|
+
spacr/toxo.py,sha256=z2nT5aAze3NUIlwnBQcnkARihDwoPfqOgQIVoUluyK0,25087
|
31
|
+
spacr/utils.py,sha256=5XGA0aPray3DzCAgwJjPRlsaxsuSRJyTTTZ7rNDTRTg,219202
|
32
32
|
spacr/version.py,sha256=axH5tnGwtgSnJHb5IDhiu4Zjk5GhLyAEDRe-rnaoFOA,409
|
33
33
|
spacr/resources/MEDIAR/.gitignore,sha256=Ff1q9Nme14JUd-4Q3jZ65aeQ5X4uttptssVDgBVHYo8,152
|
34
34
|
spacr/resources/MEDIAR/LICENSE,sha256=yEj_TRDLUfDpHDNM0StALXIt6mLqSgaV2hcCwa6_TcY,1065
|
@@ -151,9 +151,9 @@ spacr/resources/icons/umap.png,sha256=dOLF3DeLYy9k0nkUybiZMe1wzHQwLJFRmgccppw-8b
|
|
151
151
|
spacr/resources/images/plate1_E01_T0001F001L01A01Z01C02.tif,sha256=Tl0ZUfZ_AYAbu0up_nO0tPRtF1BxXhWQ3T3pURBCCRo,7958528
|
152
152
|
spacr/resources/images/plate1_E01_T0001F001L01A02Z01C01.tif,sha256=m8N-V71rA1TT4dFlENNg8s0Q0YEXXs8slIn7yObmZJQ,7958528
|
153
153
|
spacr/resources/images/plate1_E01_T0001F001L01A03Z01C03.tif,sha256=Pbhk7xn-KUP6RSIhJsxQcrHFImBm3GEpLkzx7WOc-5M,7958528
|
154
|
-
spacr-0.3.
|
155
|
-
spacr-0.3.
|
156
|
-
spacr-0.3.
|
157
|
-
spacr-0.3.
|
158
|
-
spacr-0.3.
|
159
|
-
spacr-0.3.
|
154
|
+
spacr-0.3.55.dist-info/LICENSE,sha256=SR-2MeGc6SCM1UORJYyarSWY_A-JaOMFDj7ReSs9tRM,1083
|
155
|
+
spacr-0.3.55.dist-info/METADATA,sha256=7MVci9IvzjK-fdFuASU8NAo3oOccK_g6cAYd06_IZLY,6032
|
156
|
+
spacr-0.3.55.dist-info/WHEEL,sha256=HiCZjzuy6Dw0hdX5R3LCFPDmFS4BWl8H-8W39XfmgX4,91
|
157
|
+
spacr-0.3.55.dist-info/entry_points.txt,sha256=BMC0ql9aNNpv8lUZ8sgDLQMsqaVnX5L535gEhKUP5ho,296
|
158
|
+
spacr-0.3.55.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
|
159
|
+
spacr-0.3.55.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|