celldetective 1.3.2__py3-none-any.whl → 1.3.3.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,7 @@ import numpy as np
2
2
  from celldetective.io import auto_load_number_of_frames, load_frames
3
3
  from celldetective.filters import *
4
4
  from celldetective.segmentation import filter_image, threshold_image
5
- from celldetective.measure import contour_of_instance_segmentation
5
+ from celldetective.measure import contour_of_instance_segmentation, extract_blobs_in_image
6
6
  from celldetective.utils import _get_img_num_per_channel, estimate_unreliable_edge
7
7
  from tifffile import imread
8
8
  import matplotlib.pyplot as plt
@@ -13,7 +13,7 @@ import os
13
13
 
14
14
  from PyQt5.QtWidgets import QWidget, QHBoxLayout, QPushButton, QLabel, QComboBox, QLineEdit, QListWidget, QShortcut
15
15
  from PyQt5.QtCore import Qt, QSize
16
- from PyQt5.QtGui import QKeySequence
16
+ from PyQt5.QtGui import QKeySequence, QDoubleValidator
17
17
  from celldetective.gui.gui_utils import FigureCanvas, center_window, QuickSliderLayout, QHSeperationLine, ThresholdLineEdit
18
18
  from celldetective.gui import Styles
19
19
  from superqt import QLabeledDoubleSlider, QLabeledSlider, QLabeledDoubleRangeSlider
@@ -22,7 +22,11 @@ from fonticon_mdi6 import MDI6
22
22
  from matplotlib_scalebar.scalebar import ScaleBar
23
23
  import gc
24
24
  from celldetective.utils import mask_edges
25
- from scipy.ndimage import shift
25
+ from scipy.ndimage import shift, grey_dilation
26
+ from skimage.feature import blob_dog
27
+ import math
28
+ from skimage.morphology import disk
29
+ from matplotlib.patches import Circle
26
30
 
27
31
  class StackVisualizer(QWidget, Styles):
28
32
 
@@ -80,6 +84,7 @@ class StackVisualizer(QWidget, Styles):
80
84
  self.imshow_kwargs = imshow_kwargs
81
85
  self.PxToUm = PxToUm
82
86
  self.init_contrast = False
87
+ self.channel_trigger = False
83
88
 
84
89
  self.load_stack() # need to get stack, frame etc
85
90
  self.generate_figure_canvas()
@@ -123,13 +128,8 @@ class StackVisualizer(QWidget, Styles):
123
128
 
124
129
  def locate_image_virtual(self):
125
130
  # Locate the stack of images if provided as a file
126
- self.stack_length = auto_load_number_of_frames(self.stack_path)
127
- if self.stack_length is None:
128
- stack = imread(self.stack_path)
129
- self.stack_length = len(stack)
130
- del stack
131
- gc.collect()
132
131
 
132
+ self.stack_length = auto_load_number_of_frames(self.stack_path)
133
133
  self.mid_time = self.stack_length // 2
134
134
  self.img_num_per_channel = _get_img_num_per_channel(np.arange(self.n_channels), self.stack_length, self.n_channels)
135
135
 
@@ -146,7 +146,7 @@ class StackVisualizer(QWidget, Styles):
146
146
  self.fig, self.ax = plt.subplots(figsize=(5,5),tight_layout=True) #figsize=(5, 5)
147
147
  self.canvas = FigureCanvas(self.fig, title=self.window_title, interactive=True)
148
148
  self.ax.clear()
149
- self.im = self.ax.imshow(self.init_frame, cmap='gray', interpolation='none', **self.imshow_kwargs)
149
+ self.im = self.ax.imshow(self.init_frame, cmap='gray', interpolation='none', zorder=0, **self.imshow_kwargs)
150
150
  if self.PxToUm is not None:
151
151
  scalebar = ScaleBar(self.PxToUm,
152
152
  "um",
@@ -250,12 +250,23 @@ class StackVisualizer(QWidget, Styles):
250
250
  self.last_frame = load_frames(self.img_num_per_channel[self.target_channel, self.stack_length-1],
251
251
  self.stack_path,
252
252
  normalize_input=False).astype(float)[:,:,0]
253
- self.change_frame(self.frame_slider.value())
253
+ self.change_frame_from_channel_switch(self.frame_slider.value())
254
+ self.channel_trigger = False
254
255
  self.init_contrast = False
255
256
 
257
+ def change_frame_from_channel_switch(self, value):
258
+
259
+ self.channel_trigger = True
260
+ self.change_frame(value)
261
+
256
262
  def change_frame(self, value):
263
+
257
264
  # Change the displayed frame based on slider value
258
-
265
+ if self.channel_trigger:
266
+ self.switch_from_channel = True
267
+ else:
268
+ self.switch_from_channel = False
269
+
259
270
  if self.mode=='virtual':
260
271
 
261
272
  self.init_frame = load_frames(self.img_num_per_channel[self.target_channel, value],
@@ -279,7 +290,7 @@ class StackVisualizer(QWidget, Styles):
279
290
 
280
291
  def closeEvent(self, event):
281
292
  # Event handler for closing the widget
282
- self.canvas.close()
293
+ self.canvas.close()
283
294
 
284
295
 
285
296
  class ThresholdedStackVisualizer(StackVisualizer):
@@ -617,6 +628,249 @@ class CellEdgeVisualizer(StackVisualizer):
617
628
 
618
629
  self.edge_labels = contour_of_instance_segmentation(self.init_label, edge_size)
619
630
 
631
+ class SpotDetectionVisualizer(StackVisualizer):
632
+
633
+ def __init__(self, parent_channel_cb=None, parent_diameter_le=None, parent_threshold_le=None, cell_type='targets', labels=None, *args, **kwargs):
634
+
635
+ super().__init__(*args, **kwargs)
636
+
637
+ self.cell_type = cell_type
638
+ self.labels = labels
639
+ self.detection_channel = self.target_channel
640
+ self.parent_channel_cb = parent_channel_cb
641
+ self.parent_diameter_le = parent_diameter_le
642
+ self.parent_threshold_le = parent_threshold_le
643
+ self.spot_sizes = []
644
+
645
+ self.floatValidator = QDoubleValidator()
646
+ self.init_scatter()
647
+ self.generate_detection_channel()
648
+ self.generate_spot_detection_params()
649
+ self.generate_add_measurement_btn()
650
+ self.load_labels()
651
+ self.change_frame(self.mid_time)
652
+
653
+ self.ax.callbacks.connect('xlim_changed', self.update_marker_sizes)
654
+ self.ax.callbacks.connect('ylim_changed', self.update_marker_sizes)
655
+
656
+ self.apply_diam_btn.clicked.connect(self.detect_and_display_spots)
657
+ self.apply_thresh_btn.clicked.connect(self.detect_and_display_spots)
658
+
659
+ self.channels_cb.setCurrentIndex(self.target_channel)
660
+ self.detection_channel_cb.setCurrentIndex(self.target_channel)
661
+
662
+ def update_marker_sizes(self, event=None):
663
+
664
+ # Get axis bounds
665
+ xlim = self.ax.get_xlim()
666
+ ylim = self.ax.get_ylim()
667
+
668
+ # Data-to-pixel scale
669
+ ax_width_in_pixels = self.ax.bbox.width
670
+ ax_height_in_pixels = self.ax.bbox.height
671
+
672
+ x_scale = (xlim[1] - xlim[0]) / ax_width_in_pixels
673
+ y_scale = (ylim[1] - ylim[0]) / ax_height_in_pixels
674
+
675
+ # Choose the smaller scale for square pixels
676
+ scale = min(x_scale, y_scale)
677
+
678
+ # Convert radius_px to data units
679
+ if len(self.spot_sizes)>0:
680
+
681
+ radius_data_units = self.spot_sizes / scale
682
+
683
+ # Convert to scatter `s` size (points squared)
684
+ radius_pts = radius_data_units * (72. / self.fig.dpi )
685
+ size = np.pi * (radius_pts ** 2)
686
+
687
+ # Update scatter sizes
688
+ self.spot_scat.set_sizes(size)
689
+ self.fig.canvas.draw_idle()
690
+
691
+ def init_scatter(self):
692
+ self.spot_scat = self.ax.scatter([],[], s=50, facecolors='none', edgecolors='tab:red',zorder=100)
693
+ self.canvas.canvas.draw()
694
+
695
+ def change_frame(self, value):
696
+
697
+ super().change_frame(value)
698
+ if not self.switch_from_channel:
699
+ self.reset_detection()
700
+
701
+ if self.mode=='virtual':
702
+ self.init_label = imread(self.mask_paths[value])
703
+ self.target_img = load_frames(self.img_num_per_channel[self.detection_channel, value],
704
+ self.stack_path,
705
+ normalize_input=False).astype(float)[:,:,0]
706
+ elif self.mode=='direct':
707
+ self.init_label = self.labels[value,:,:]
708
+ self.target_img = self.stack[value,:,:,self.detection_channel].copy()
709
+
710
+ def detect_and_display_spots(self):
711
+
712
+ self.reset_detection()
713
+ self.control_valid_parameters() # set current diam and threshold
714
+ blobs_filtered = extract_blobs_in_image(self.target_img, self.init_label,threshold=self.thresh, diameter=self.diameter)
715
+ if blobs_filtered is not None:
716
+ self.spot_positions = np.array([[x,y] for y,x,_ in blobs_filtered])
717
+
718
+ self.spot_sizes = np.sqrt(2)*np.array([sig for _,_,sig in blobs_filtered])
719
+ print(f"{self.spot_sizes=}")
720
+ #radius_pts = self.spot_sizes * (self.fig.dpi / 72.0)
721
+ #sizes = np.pi*(radius_pts**2)
722
+
723
+ self.spot_scat.set_offsets(self.spot_positions)
724
+ #self.spot_scat.set_sizes(sizes)
725
+ self.update_marker_sizes()
726
+ self.canvas.canvas.draw()
727
+
728
+ def reset_detection(self):
729
+
730
+ self.ax.scatter([], []).get_offsets()
731
+ empty_offset = np.ma.masked_array([0, 0], mask=True)
732
+ self.spot_scat.set_offsets(empty_offset)
733
+ self.canvas.canvas.draw()
734
+
735
+ def load_labels(self):
736
+
737
+ # Load the cell labels
738
+ if self.labels is not None:
739
+
740
+ if isinstance(self.labels, list):
741
+ self.labels = np.array(self.labels)
742
+
743
+ assert self.labels.ndim==3,'Wrong dimensions for the provided labels, expect TXY'
744
+ assert len(self.labels)==self.stack_length
745
+
746
+ self.mode = 'direct'
747
+ self.init_label = self.labels[self.mid_time,:,:]
748
+ else:
749
+ self.mode = 'virtual'
750
+ assert isinstance(self.stack_path, str)
751
+ assert self.stack_path.endswith('.tif')
752
+ self.locate_labels_virtual()
753
+
754
+ def locate_labels_virtual(self):
755
+ # Locate virtual labels
756
+
757
+ labels_path = str(Path(self.stack_path).parent.parent) + os.sep + f'labels_{self.cell_type}' + os.sep
758
+ self.mask_paths = natsorted(glob(labels_path + '*.tif'))
759
+
760
+ if len(self.mask_paths) == 0:
761
+
762
+ msgBox = QMessageBox()
763
+ msgBox.setIcon(QMessageBox.Critical)
764
+ msgBox.setText("No labels were found for the selected cells. Abort.")
765
+ msgBox.setWindowTitle("Critical")
766
+ msgBox.setStandardButtons(QMessageBox.Ok)
767
+ returnValue = msgBox.exec()
768
+ self.close()
769
+
770
+ self.init_label = imread(self.mask_paths[self.frame_slider.value()])
771
+
772
+ def generate_detection_channel(self):
773
+
774
+ assert self.channel_names is not None
775
+ assert len(self.channel_names)==self.n_channels
776
+
777
+ channel_layout = QHBoxLayout()
778
+ channel_layout.setContentsMargins(15,0,15,0)
779
+ channel_layout.addWidget(QLabel('Detection\nchannel: '), 25)
780
+
781
+ self.detection_channel_cb = QComboBox()
782
+ self.detection_channel_cb.addItems(self.channel_names)
783
+ self.detection_channel_cb.currentIndexChanged.connect(self.set_detection_channel_index)
784
+ channel_layout.addWidget(self.detection_channel_cb, 75)
785
+ self.canvas.layout.addLayout(channel_layout)
786
+
787
+ def set_detection_channel_index(self, value):
788
+
789
+ self.detection_channel = value
790
+ if self.mode == 'direct':
791
+ self.last_frame = self.stack[-1,:,:,self.target_channel]
792
+ elif self.mode == 'virtual':
793
+ self.target_img = load_frames(self.img_num_per_channel[self.detection_channel, self.stack_length-1],
794
+ self.stack_path,
795
+ normalize_input=False).astype(float)[:,:,0]
796
+
797
+ def generate_spot_detection_params(self):
798
+
799
+ self.spot_diam_le = QLineEdit('1')
800
+ self.spot_diam_le.setValidator(self.floatValidator)
801
+ self.apply_diam_btn = QPushButton('Set')
802
+ self.apply_diam_btn.setStyleSheet(self.button_style_sheet_2)
803
+
804
+ self.spot_thresh_le = QLineEdit('0')
805
+ self.spot_thresh_le.setValidator(self.floatValidator)
806
+ self.apply_thresh_btn = QPushButton('Set')
807
+ self.apply_thresh_btn.setStyleSheet(self.button_style_sheet_2)
808
+
809
+ self.spot_diam_le.textChanged.connect(self.control_valid_parameters)
810
+ self.spot_thresh_le.textChanged.connect(self.control_valid_parameters)
811
+
812
+ spot_diam_layout = QHBoxLayout()
813
+ spot_diam_layout.setContentsMargins(15,0,15,0)
814
+ spot_diam_layout.addWidget(QLabel('Spot diameter: '), 25)
815
+ spot_diam_layout.addWidget(self.spot_diam_le, 65)
816
+ spot_diam_layout.addWidget(self.apply_diam_btn, 10)
817
+ self.canvas.layout.addLayout(spot_diam_layout)
818
+
819
+ spot_thresh_layout = QHBoxLayout()
820
+ spot_thresh_layout.setContentsMargins(15,0,15,0)
821
+ spot_thresh_layout.addWidget(QLabel('Detection\nthreshold: '), 25)
822
+ spot_thresh_layout.addWidget(self.spot_thresh_le, 65)
823
+ spot_thresh_layout.addWidget(self.apply_thresh_btn, 10)
824
+ self.canvas.layout.addLayout(spot_thresh_layout)
825
+
826
+ def generate_add_measurement_btn(self):
827
+
828
+ add_hbox = QHBoxLayout()
829
+ self.add_measurement_btn = QPushButton('Add measurement')
830
+ self.add_measurement_btn.clicked.connect(self.set_measurement_in_parent_list)
831
+ self.add_measurement_btn.setIcon(icon(MDI6.plus,color="white"))
832
+ self.add_measurement_btn.setIconSize(QSize(20, 20))
833
+ self.add_measurement_btn.setStyleSheet(self.button_style_sheet)
834
+ add_hbox.addWidget(QLabel(''),33)
835
+ add_hbox.addWidget(self.add_measurement_btn, 33)
836
+ add_hbox.addWidget(QLabel(''),33)
837
+ self.canvas.layout.addLayout(add_hbox)
838
+
839
+ def control_valid_parameters(self):
840
+
841
+ valid_diam = False
842
+ try:
843
+ self.diameter = float(self.spot_diam_le.text().replace(',','.'))
844
+ valid_diam = True
845
+ except:
846
+ valid_diam = False
847
+
848
+ valid_thresh = False
849
+ try:
850
+ self.thresh = float(self.spot_thresh_le.text().replace(',','.'))
851
+ valid_thresh = True
852
+ except:
853
+ valid_thresh = False
854
+
855
+ if valid_diam and valid_thresh:
856
+ self.apply_diam_btn.setEnabled(True)
857
+ self.apply_thresh_btn.setEnabled(True)
858
+ self.add_measurement_btn.setEnabled(True)
859
+ else:
860
+ self.apply_diam_btn.setEnabled(False)
861
+ self.apply_thresh_btn.setEnabled(False)
862
+ self.add_measurement_btn.setEnabled(False)
863
+
864
+ def set_measurement_in_parent_list(self):
865
+
866
+ if self.parent_channel_cb is not None:
867
+ self.parent_channel_cb.setCurrentIndex(self.detection_channel)
868
+ if self.parent_diameter_le is not None:
869
+ self.parent_diameter_le.setText(self.spot_diam_le.text())
870
+ if self.parent_threshold_le is not None:
871
+ self.parent_threshold_le.setText(self.spot_thresh_le.text())
872
+ self.close()
873
+
620
874
  class CellSizeViewer(StackVisualizer):
621
875
 
622
876
  """
celldetective/io.py CHANGED
@@ -14,7 +14,7 @@ from btrack.datasets import cell_config
14
14
  from magicgui import magicgui
15
15
  from csbdeep.io import save_tiff_imagej_compatible
16
16
  from pathlib import Path, PurePath
17
- from shutil import copyfile
17
+ from shutil import copyfile, rmtree
18
18
  from celldetective.utils import ConfigSectionMap, extract_experiment_channels, _extract_labels_from_config, get_zenodo_files, download_zenodo_file
19
19
  import json
20
20
  from skimage.measure import regionprops_table
@@ -674,10 +674,20 @@ def locate_stack(position, prefix='Aligned'):
674
674
  stack_path = glob(position + os.sep.join(['movie', f'{prefix}*.tif']))
675
675
  assert len(stack_path) > 0, f"No movie with prefix {prefix} found..."
676
676
  stack = imread(stack_path[0].replace('\\', '/'))
677
+ stack_length = auto_load_number_of_frames(stack_path[0])
678
+
677
679
  if stack.ndim == 4:
678
680
  stack = np.moveaxis(stack, 1, -1)
679
681
  elif stack.ndim == 3:
680
- stack = stack[:, :, :, np.newaxis]
682
+ if min(stack.shape)!=stack_length:
683
+ channel_axis = np.argmin(stack.shape)
684
+ if channel_axis!=(stack.ndim-1):
685
+ stack = np.moveaxis(stack, channel_axis, -1)
686
+ stack = stack[np.newaxis, :, :, :]
687
+ else:
688
+ stack = stack[:, :, :, np.newaxis]
689
+ elif stack.ndim==2:
690
+ stack = stack[np.newaxis, :, :, np.newaxis]
681
691
 
682
692
  return stack
683
693
 
@@ -912,6 +922,7 @@ def auto_load_number_of_frames(stack_path):
912
922
  return None
913
923
 
914
924
  stack_path = stack_path.replace('\\','/')
925
+ n_channels=1
915
926
 
916
927
  with TiffFile(stack_path) as tif:
917
928
  try:
@@ -921,7 +932,8 @@ def auto_load_number_of_frames(stack_path):
921
932
  tif_tags[name] = value
922
933
  img_desc = tif_tags["ImageDescription"]
923
934
  attr = img_desc.split("\n")
924
- except:
935
+ n_channels = int(attr[np.argmax([s.startswith("channels") for s in attr])].split("=")[-1])
936
+ except Exception as e:
925
937
  pass
926
938
  try:
927
939
  # Try nframes
@@ -948,6 +960,10 @@ def auto_load_number_of_frames(stack_path):
948
960
  if 'len_movie' not in locals():
949
961
  stack = imread(stack_path)
950
962
  len_movie = len(stack)
963
+ if len_movie==n_channels and stack.ndim==3:
964
+ len_movie = 1
965
+ if stack.ndim==2:
966
+ len_movie = 1
951
967
  del stack
952
968
  gc.collect()
953
969
 
@@ -1585,6 +1601,8 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1585
1601
 
1586
1602
  for k, sq in enumerate(squares):
1587
1603
  print(f"ROI: {sq}")
1604
+ pad_to_256=False
1605
+
1588
1606
  xmin = int(sq[0, 1])
1589
1607
  xmax = int(sq[2, 1])
1590
1608
  if xmax < xmin:
@@ -1596,8 +1614,9 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1596
1614
  print(f"{xmin=};{xmax=};{ymin=};{ymax=}")
1597
1615
  frame = viewer.layers['Image'].data[t][xmin:xmax, ymin:ymax]
1598
1616
  if frame.shape[1] < 256 or frame.shape[0] < 256:
1599
- print("crop too small!")
1600
- continue
1617
+ pad_to_256 = True
1618
+ print("Crop too small! Padding with zeros to reach 256*256 pixels...")
1619
+ #continue
1601
1620
  multichannel = [frame]
1602
1621
  for i in range(len(channel_indices) - 1):
1603
1622
  try:
@@ -1605,8 +1624,21 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1605
1624
  multichannel.append(frame)
1606
1625
  except:
1607
1626
  pass
1608
- multichannel = np.array(multichannel)
1609
- save_tiff_imagej_compatible(annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}_roi_{xmin}_{xmax}_{ymin}_{ymax}_labelled.tif", labels_layer[xmin:xmax,ymin:ymax].astype(np.int16), axes='YX')
1627
+ multichannel = np.array(multichannel)
1628
+ lab = labels_layer[xmin:xmax,ymin:ymax].astype(np.int16)
1629
+ if pad_to_256:
1630
+ shape = multichannel.shape
1631
+ pad_length_x = max([0,256 - multichannel.shape[1]])
1632
+ if pad_length_x>0 and pad_length_x%2==1:
1633
+ pad_length_x += 1
1634
+ pad_length_y = max([0,256 - multichannel.shape[2]])
1635
+ if pad_length_y>0 and pad_length_y%2==1:
1636
+ pad_length_y += 1
1637
+ padded_image = np.array([np.pad(im, ((pad_length_x//2,pad_length_x//2), (pad_length_y//2,pad_length_y//2)), mode='constant') for im in multichannel])
1638
+ padded_label = np.pad(lab,((pad_length_x//2,pad_length_x//2), (pad_length_y//2,pad_length_y//2)), mode='constant')
1639
+ lab = padded_label; multichannel = padded_image;
1640
+
1641
+ save_tiff_imagej_compatible(annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}_roi_{xmin}_{xmax}_{ymin}_{ymax}_labelled.tif", lab, axes='YX')
1610
1642
  save_tiff_imagej_compatible(annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}_roi_{xmin}_{xmax}_{ymin}_{ymax}.tif", multichannel, axes='CYX')
1611
1643
  info = {"spatial_calibration": spatial_calibration, "channels": list(channel_names), 'cell_type': ct, 'antibody': ab, 'concentration': conc, 'pharmaceutical_agent': pa}
1612
1644
  info_name = annotation_folder + f"{exp_name}_{position.split(os.sep)[-2]}_{str(t).zfill(4)}_roi_{xmin}_{xmax}_{ymin}_{ymax}.json"
@@ -1645,6 +1677,7 @@ def control_segmentation_napari(position, prefix='Aligned', population="target",
1645
1677
  population += 's'
1646
1678
  output_folder = position + f'labels_{population}{os.sep}'
1647
1679
 
1680
+ print(f"{stack.shape}")
1648
1681
  viewer = napari.Viewer()
1649
1682
  viewer.add_image(stack, channel_axis=-1, colormap=["gray"] * stack.shape[-1])
1650
1683
  viewer.add_labels(labels.astype(int), name='segmentation', opacity=0.4)
@@ -1832,10 +1865,24 @@ def get_segmentation_models_list(mode='targets', return_path=False):
1832
1865
 
1833
1866
  available_models = natsorted(glob(modelpath + '*/'))
1834
1867
  available_models = [m.replace('\\', '/').split('/')[-2] for m in available_models]
1868
+
1869
+ # Auto model cleanup
1870
+ to_remove = []
1871
+ for model in available_models:
1872
+ path = modelpath + model
1873
+ files = glob(path+os.sep+"*")
1874
+ if path+os.sep+"config_input.json" not in files:
1875
+ rmtree(path)
1876
+ to_remove.append(model)
1877
+ for m in to_remove:
1878
+ available_models.remove(m)
1879
+
1880
+
1835
1881
  for rm in repository_models:
1836
1882
  if rm not in available_models:
1837
1883
  available_models.append(rm)
1838
1884
 
1885
+
1839
1886
  if not return_path:
1840
1887
  return available_models
1841
1888
  else: