vfbquery 0.3.3__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -64,7 +64,7 @@ class TermInfoQueriesTest(unittest.TestCase):
64
64
 
65
65
  self.assertEqual(0, len(terminfo.xrefs))
66
66
 
67
- self.assertEqual(6, len(terminfo.pub_syn))
67
+ self.assertEqual(7, len(terminfo.pub_syn))
68
68
 
69
69
  # Check that we have the expected synonym labels (order-independent)
70
70
  synonym_labels = [entry.synonym.label for entry in terminfo.pub_syn]
@@ -145,7 +145,7 @@ class TermInfoQueriesTest(unittest.TestCase):
145
145
  self.assertFalse("link" in serialized)
146
146
  self.assertEqual(4, len(serialized["types"]))
147
147
  self.assertTrue("Anatomy" in serialized["types"])
148
- self.assertEqual("Cyst composed of two cyst cells following the division of a newly-formed cystoblast in the germarium. The two cells are connected by a cytoplasmic bridge.\n([Spradling, 1993](FBrf0064777), [King, 1970](FBrf0021038))", serialized["description"])
148
+ self.assertEqual("Cyst composed of two cyst cells following the division of a newly-formed cystoblast in the germarium. The two cells are connected by a cytoplasmic bridge.\n([King, 1970](FBrf0021038))", serialized["description"])
149
149
  self.assertTrue("synonyms" in serialized)
150
150
  self.assertEqual(1, len(serialized["synonyms"]))
151
151
  self.assertEqual("has_exact_synonym: germarial 2-cell cluster ([King, 1970](FBrf0021038))", serialized["synonyms"][0])
@@ -166,13 +166,10 @@ class TermInfoQueriesTest(unittest.TestCase):
166
166
  self.assertFalse("examples" in serialized)
167
167
  self.assertFalse("thumbnail" in serialized)
168
168
  self.assertTrue("references" in serialized)
169
- self.assertEqual(2, len(serialized["references"]))
170
- self.assertEqual({'link': '[Spradling, 1993, Bate, Martinez Arias, 1993: 1--70](FBrf0064777)',
171
- 'refs': ['http://flybase.org/reports/FBrf0064777'],
172
- 'types': ' pub'}, serialized["references"][0])
169
+ self.assertEqual(1, len(serialized["references"]))
173
170
  self.assertEqual({'link': '[King, 1970, Ovarian Development in Drosophila melanogaster. ](FBrf0021038)',
174
171
  'refs': ['http://flybase.org/reports/FBrf0021038'],
175
- 'types': ' pub'}, serialized["references"][1])
172
+ 'types': ' pub'}, serialized["references"][0])
176
173
  self.assertFalse("targetingSplits" in serialized)
177
174
  self.assertFalse("targetingNeurons" in serialized)
178
175
 
@@ -261,7 +258,7 @@ class TermInfoQueriesTest(unittest.TestCase):
261
258
  self.assertTrue("Turner-Evans et al., 2020" in description)
262
259
 
263
260
  self.assertTrue("synonyms" in serialized)
264
- self.assertEqual(9, len(serialized["synonyms"]))
261
+ self.assertEqual(10, len(serialized["synonyms"]))
265
262
  print(serialized["synonyms"][0])
266
263
  self.assertTrue("has_exact_synonym: EB-PB 1 glomerulus-D/Vgall neuron" in serialized["synonyms"])
267
264
  self.assertFalse("source" in serialized)
@@ -293,7 +290,7 @@ class TermInfoQueriesTest(unittest.TestCase):
293
290
  self.assertFalse("thumbnail" in serialized)
294
291
 
295
292
  self.assertTrue("references" in serialized)
296
- self.assertEqual(6, len(serialized["references"]))
293
+ self.assertEqual(7, len(serialized["references"]))
297
294
 
298
295
  self.assertTrue("targetingSplits" in serialized)
299
296
  self.assertEqual(6, len(serialized["targetingSplits"]))
vfbquery/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
1
  from .vfb_queries import *
2
2
 
3
3
  # Version information
4
- __version__ = "0.3.3"
4
+ __version__ = "0.3.4"
@@ -1,5 +1,21 @@
1
1
  import re
2
2
  import json
3
+ import numpy as np
4
+
5
+ # Custom JSON encoder to handle NumPy and pandas types
6
+ class NumpyEncoder(json.JSONEncoder):
7
+ def default(self, obj):
8
+ if isinstance(obj, np.integer):
9
+ return int(obj)
10
+ elif isinstance(obj, np.floating):
11
+ return float(obj)
12
+ elif isinstance(obj, np.ndarray):
13
+ return obj.tolist()
14
+ elif isinstance(obj, np.bool_):
15
+ return bool(obj)
16
+ elif hasattr(obj, 'item'): # Handle pandas scalar types
17
+ return obj.item()
18
+ return super(NumpyEncoder, self).default(obj)
3
19
  import requests
4
20
  from dataclasses import dataclass
5
21
  from dataclasses_json import dataclass_json
@@ -15,7 +31,7 @@ class Coordinates:
15
31
  Z: float
16
32
 
17
33
  def __str__(self):
18
- return json.dumps([str(self.X), str(self.Y), str(self.Z)])
34
+ return json.dumps([str(self.X), str(self.Y), str(self.Z)], cls=NumpyEncoder)
19
35
 
20
36
 
21
37
  class CoordinatesFactory:
@@ -1062,7 +1078,7 @@ def serialize_term_info_to_json(vfb_term: VfbTerminfo, show_types=False) -> str:
1062
1078
  :return: json string representation of the term info object
1063
1079
  """
1064
1080
  term_info_dict = serialize_term_info_to_dict(vfb_term, show_types)
1065
- return json.dumps(term_info_dict, indent=4)
1081
+ return json.dumps(term_info_dict, indent=4, cls=NumpyEncoder)
1066
1082
 
1067
1083
 
1068
1084
  def process(term_info_response: dict, variable, loaded_template: Optional[str] = None, show_types=False) -> dict:
vfbquery/test_utils.py CHANGED
@@ -1,6 +1,41 @@
1
1
  import pandas as pd
2
+ import json
3
+ import numpy as np
2
4
  from typing import Any, Dict, Union
3
5
 
6
+ # Custom JSON encoder to handle NumPy and pandas types
7
+ class NumpyEncoder(json.JSONEncoder):
8
+ def default(self, obj):
9
+ if isinstance(obj, np.integer):
10
+ return int(obj)
11
+ elif isinstance(obj, np.floating):
12
+ return float(obj)
13
+ elif isinstance(obj, np.ndarray):
14
+ return obj.tolist()
15
+ elif isinstance(obj, np.bool_):
16
+ return bool(obj)
17
+ elif hasattr(obj, 'item'): # Handle pandas scalar types
18
+ return obj.item()
19
+ return super(NumpyEncoder, self).default(obj)
20
+
21
+ def safe_to_dict(df, sort_by_id=True):
22
+ """Convert DataFrame to dict with numpy types converted to native Python types"""
23
+ if isinstance(df, pd.DataFrame):
24
+ # Convert numpy dtypes to native Python types
25
+ df_copy = df.copy()
26
+ for col in df_copy.columns:
27
+ if df_copy[col].dtype.name.startswith('int'):
28
+ df_copy[col] = df_copy[col].astype('object')
29
+ elif df_copy[col].dtype.name.startswith('float'):
30
+ df_copy[col] = df_copy[col].astype('object')
31
+
32
+ # Sort by id column in descending order if it exists and sort_by_id is True
33
+ if sort_by_id and 'id' in df_copy.columns:
34
+ df_copy = df_copy.sort_values('id', ascending=False)
35
+
36
+ return df_copy.to_dict("records")
37
+ return df
38
+
4
39
  def safe_extract_row(result: Any, index: int = 0) -> Dict:
5
40
  """
6
41
  Safely extract a row from a pandas DataFrame or return the object itself if not a DataFrame.
@@ -11,11 +46,83 @@ def safe_extract_row(result: Any, index: int = 0) -> Dict:
11
46
  """
12
47
  if isinstance(result, pd.DataFrame):
13
48
  if not result.empty and len(result.index) > index:
14
- return result.iloc[index].to_dict()
49
+ # Convert to dict using safe method to handle numpy types
50
+ row_series = result.iloc[index]
51
+ return {col: (val.item() if hasattr(val, 'item') else val) for col, val in row_series.items()}
15
52
  else:
16
53
  return {}
17
54
  return result
18
55
 
56
+ def sanitize_for_json(obj: Any) -> Any:
57
+ """
58
+ Recursively sanitize any data structure to make it JSON serializable.
59
+ Converts numpy types, pandas types, and other non-serializable types to native Python types.
60
+
61
+ :param obj: Object to sanitize
62
+ :return: JSON-serializable version of the object
63
+ """
64
+ if isinstance(obj, dict):
65
+ return {key: sanitize_for_json(value) for key, value in obj.items()}
66
+ elif isinstance(obj, (list, tuple)):
67
+ return [sanitize_for_json(item) for item in obj]
68
+ elif isinstance(obj, np.integer):
69
+ return int(obj)
70
+ elif isinstance(obj, np.floating):
71
+ return float(obj)
72
+ elif isinstance(obj, np.ndarray):
73
+ return obj.tolist()
74
+ elif isinstance(obj, np.bool_):
75
+ return bool(obj)
76
+ elif hasattr(obj, 'item'): # Handle pandas scalar types
77
+ return obj.item()
78
+ elif isinstance(obj, pd.DataFrame):
79
+ return safe_to_dict(obj)
80
+ elif hasattr(obj, '__dict__'): # Handle custom objects
81
+ return sanitize_for_json(obj.__dict__)
82
+ else:
83
+ return obj
84
+
85
+ def safe_json_dumps(obj: Any, **kwargs) -> str:
86
+ """
87
+ Safely serialize any object to JSON string, handling numpy and pandas types.
88
+
89
+ :param obj: Object to serialize
90
+ :param kwargs: Additional arguments to pass to json.dumps
91
+ :return: JSON string
92
+ """
93
+ # Set default arguments
94
+ default_kwargs = {'indent': 2, 'ensure_ascii': False, 'cls': NumpyEncoder}
95
+ default_kwargs.update(kwargs)
96
+
97
+ try:
98
+ # First try with the NumpyEncoder
99
+ return json.dumps(obj, **default_kwargs)
100
+ except (TypeError, ValueError):
101
+ # If that fails, sanitize the object first
102
+ sanitized_obj = sanitize_for_json(obj)
103
+ return json.dumps(sanitized_obj, **default_kwargs)
104
+
105
+ def pretty_print_vfb_result(result: Any, max_length: int = 1000) -> None:
106
+ """
107
+ Pretty print any VFB query result in a safe, readable format.
108
+
109
+ :param result: Result from any VFB query function
110
+ :param max_length: Maximum length of output (truncates if longer)
111
+ """
112
+ try:
113
+ json_str = safe_json_dumps(result)
114
+ if len(json_str) > max_length:
115
+ print(json_str[:max_length] + f'\n... (truncated, full length: {len(json_str)} characters)')
116
+ else:
117
+ print(json_str)
118
+ except Exception as e:
119
+ print(f'Error printing result: {e}')
120
+ print(f'Result type: {type(result)}')
121
+ if hasattr(result, '__dict__'):
122
+ print(f'Result attributes: {list(result.__dict__.keys())}')
123
+ else:
124
+ print(f'Result: {str(result)[:max_length]}...')
125
+
19
126
  def patch_vfb_connect_query_wrapper():
20
127
  """
21
128
  Apply monkey patches to VfbConnect.neo_query_wrapper to make it handle DataFrame results safely.
@@ -28,8 +135,8 @@ def patch_vfb_connect_query_wrapper():
28
135
  def patched_get_term_info(self, terms, *args, **kwargs):
29
136
  result = original_get_term_info(self, terms, *args, **kwargs)
30
137
  if isinstance(result, pd.DataFrame):
31
- # Return list of row dictionaries instead of DataFrame
32
- return [row.to_dict() for i, row in result.iterrows()]
138
+ # Return list of row dictionaries instead of DataFrame using safe conversion
139
+ return safe_to_dict(result)
33
140
  return result
34
141
 
35
142
  NeoQueryWrapper._get_TermInfo = patched_get_term_info
vfbquery/vfb_queries.py CHANGED
@@ -8,6 +8,40 @@ from typing import List, Tuple, Dict, Any, Union
8
8
  import pandas as pd
9
9
  from marshmallow import ValidationError
10
10
  import json
11
+ import numpy as np
12
+
13
+ # Custom JSON encoder to handle NumPy and pandas types
14
+ class NumpyEncoder(json.JSONEncoder):
15
+ def default(self, obj):
16
+ if isinstance(obj, np.integer):
17
+ return int(obj)
18
+ elif isinstance(obj, np.floating):
19
+ return float(obj)
20
+ elif isinstance(obj, np.ndarray):
21
+ return obj.tolist()
22
+ elif isinstance(obj, np.bool_):
23
+ return bool(obj)
24
+ elif hasattr(obj, 'item'): # Handle pandas scalar types
25
+ return obj.item()
26
+ return super(NumpyEncoder, self).default(obj)
27
+
28
+ def safe_to_dict(df, sort_by_id=True):
29
+ """Convert DataFrame to dict with numpy types converted to native Python types"""
30
+ if isinstance(df, pd.DataFrame):
31
+ # Convert numpy dtypes to native Python types
32
+ df_copy = df.copy()
33
+ for col in df_copy.columns:
34
+ if df_copy[col].dtype.name.startswith('int'):
35
+ df_copy[col] = df_copy[col].astype('object')
36
+ elif df_copy[col].dtype.name.startswith('float'):
37
+ df_copy[col] = df_copy[col].astype('object')
38
+
39
+ # Sort by id column in descending order if it exists and sort_by_id is True
40
+ if sort_by_id and 'id' in df_copy.columns:
41
+ df_copy = df_copy.sort_values('id', ascending=False)
42
+
43
+ return df_copy.to_dict("records")
44
+ return df
11
45
 
12
46
  # Lazy import for dict_cursor to avoid GUI library issues
13
47
  def get_dict_cursor():
@@ -489,6 +523,11 @@ def term_info_parse_object(results, short_form):
489
523
  if "image_" in key and not ("thumbnail" in key or "folder" in key) and len(vars(image.channel_image.image)[key]) > 1:
490
524
  record[key.replace("image_","")] = vars(image.channel_image.image)[key].replace("http://","https://")
491
525
  images[image.channel_image.image.template_anatomy.short_form].append(record)
526
+
527
+ # Sort each template's images by id in descending order (newest first)
528
+ for template_key in images:
529
+ images[template_key] = sorted(images[template_key], key=lambda x: x["id"], reverse=True)
530
+
492
531
  termInfo["Examples"] = images
493
532
  # add a query to `queries` list for listing all available images
494
533
  q = ListAllAvailableImages_to_schema(termInfo["Name"], {"short_form":vfbTerm.term.core.short_form})
@@ -512,6 +551,11 @@ def term_info_parse_object(results, short_form):
512
551
  if "image_" in key and not ("thumbnail" in key or "folder" in key) and len(vars(image.image)[key]) > 1:
513
552
  record[key.replace("image_","")] = vars(image.image)[key].replace("http://","https://")
514
553
  images[image.image.template_anatomy.short_form].append(record)
554
+
555
+ # Sort each template's images by id in descending order (newest first)
556
+ for template_key in images:
557
+ images[template_key] = sorted(images[template_key], key=lambda x: x["id"], reverse=True)
558
+
515
559
  # Add the thumbnails to the term info
516
560
  termInfo["Images"] = images
517
561
 
@@ -780,8 +824,13 @@ def ListAllAvailableImages_to_schema(name, take_default):
780
824
  return Query(query=query, label=label, function=function, takes=takes, preview=preview, preview_columns=preview_columns)
781
825
 
782
826
  def serialize_solr_output(results):
783
- # Serialize the sanitized dictionary to JSON
784
- json_string = json.dumps(results.docs[0], ensure_ascii=False)
827
+ # Create a copy of the document and remove Solr-specific fields
828
+ doc = dict(results.docs[0])
829
+ # Remove the _version_ field which can cause serialization issues with large integers
830
+ doc.pop('_version_', None)
831
+
832
+ # Serialize the sanitized dictionary to JSON using NumpyEncoder
833
+ json_string = json.dumps(doc, ensure_ascii=False, cls=NumpyEncoder)
785
834
  json_string = json_string.replace('\\', '')
786
835
  json_string = json_string.replace('"{', '{')
787
836
  json_string = json_string.replace('}"', '}')
@@ -914,7 +963,7 @@ def get_instances(short_form: str, return_dataframe=True, limit: int = -1):
914
963
  "thumbnail"
915
964
  ]
916
965
  }
917
- for row in df.to_dict("records")
966
+ for row in safe_to_dict(df)
918
967
  ],
919
968
  "count": total_count
920
969
  }
@@ -1002,7 +1051,7 @@ def get_templates(limit: int = -1, return_dataframe: bool = False):
1002
1051
  "license"
1003
1052
  ]
1004
1053
  }
1005
- for row in df.to_dict("records")
1054
+ for row in safe_to_dict(df)
1006
1055
  ],
1007
1056
  "count": total_count
1008
1057
  }
@@ -1118,7 +1167,7 @@ def get_similar_neurons(neuron, similarity_score='NBLAST_score', return_datafram
1118
1167
  "thumbnail"
1119
1168
  ]
1120
1169
  }
1121
- for row in df.to_dict("records")
1170
+ for row in safe_to_dict(df)
1122
1171
  ],
1123
1172
  "count": total_count
1124
1173
  }
@@ -1228,7 +1277,7 @@ def get_individual_neuron_inputs(neuron_short_form: str, return_dataframe=True,
1228
1277
  "Images"
1229
1278
  ]
1230
1279
  }
1231
- for row in df.to_dict("records")
1280
+ for row in safe_to_dict(df)
1232
1281
  ],
1233
1282
  "count": total_count
1234
1283
  }
@@ -1248,7 +1297,7 @@ def get_individual_neuron_inputs(neuron_short_form: str, return_dataframe=True,
1248
1297
  "Weight",
1249
1298
  ]
1250
1299
  }
1251
- for row in df.to_dict("records")
1300
+ for row in safe_to_dict(df)
1252
1301
  ],
1253
1302
  "count": total_count
1254
1303
  }
@@ -1313,7 +1362,7 @@ def fill_query_results(term_info):
1313
1362
  filtered_item = item
1314
1363
  filtered_result.append(filtered_item)
1315
1364
  elif isinstance(result, pd.DataFrame):
1316
- filtered_result = result[query['preview_columns']].to_dict('records')
1365
+ filtered_result = safe_to_dict(result[query['preview_columns']])
1317
1366
  else:
1318
1367
  print(f"Unsupported result format for filtering columns in {query['function']}")
1319
1368
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vfbquery
3
- Version: 0.3.3
3
+ Version: 0.3.4
4
4
  Summary: Wrapper for querying VirtualFlyBrain knowledge graph.
5
5
  Home-page: https://github.com/VirtualFlyBrain/VFBquery
6
6
  Author: VirtualFlyBrain
@@ -53,7 +53,7 @@ vfb.get_term_info('FBbt_00003748')
53
53
  "Meta": {
54
54
  "Name": "[medulla](FBbt_00003748)",
55
55
  "Description": "The second optic neuropil, sandwiched between the lamina and the lobula complex. It is divided into 10 layers: 1-6 make up the outer (distal) medulla, the seventh (or serpentine) layer exhibits a distinct architecture and layers 8-10 make up the inner (proximal) medulla (Ito et al., 2014).",
56
- "Comment": "",
56
+ "Comment": "Nern et al. (2025) - doi:10.1038/s41586-025-08746-0 say distal is M1-5 and M6-7 is central medulla.",
57
57
  "Types": "[anterior ectoderm derivative](FBbt_00025991); [synaptic neuropil domain](FBbt_00040007)",
58
58
  "Relationships": "[develops from](RO_0002202): [medulla anlage](FBbt_00001935); [is part of](BFO_0000050): [adult optic lobe](FBbt_00003701)"
59
59
  },
@@ -143,20 +143,9 @@ vfb.get_term_info('FBbt_00003748')
143
143
  "count": 4
144
144
  }
145
145
  ],
146
- "IsIndividual": False,
147
- "IsClass": True,
146
+ "IsIndividual": false,
147
+ "IsClass": true,
148
148
  "Examples": {
149
- "VFB_00017894": [
150
- {
151
- "id": "VFB_00030624",
152
- "label": "medulla on adult brain template JFRC2",
153
- "thumbnail": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/thumbnail.png",
154
- "thumbnail_transparent": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/thumbnailT.png",
155
- "nrrd": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/volume.nrrd",
156
- "wlz": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/volume.wlz",
157
- "obj": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/volume_man.obj"
158
- }
159
- ],
160
149
  "VFB_00101384": [
161
150
  {
162
151
  "id": "VFB_00101385",
@@ -179,6 +168,17 @@ vfb.get_term_info('FBbt_00003748')
179
168
  "obj": "https://www.virtualflybrain.org/data/VFB/i/0010/2107/VFB_00101567/volume_man.obj"
180
169
  }
181
170
  ],
171
+ "VFB_00017894": [
172
+ {
173
+ "id": "VFB_00030624",
174
+ "label": "medulla on adult brain template JFRC2",
175
+ "thumbnail": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/thumbnail.png",
176
+ "thumbnail_transparent": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/thumbnailT.png",
177
+ "nrrd": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/volume.nrrd",
178
+ "wlz": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/volume.wlz",
179
+ "obj": "https://www.virtualflybrain.org/data/VFB/i/0003/0624/VFB_00017894/volume_man.obj"
180
+ }
181
+ ],
182
182
  "VFB_00030786": [
183
183
  {
184
184
  "id": "VFB_00030810",
@@ -191,7 +191,7 @@ vfb.get_term_info('FBbt_00003748')
191
191
  }
192
192
  ]
193
193
  },
194
- "IsTemplate": False,
194
+ "IsTemplate": false,
195
195
  "Synonyms": [
196
196
  {
197
197
  "label": "ME",
@@ -323,14 +323,14 @@ vfb.get_term_info('VFB_00000001')
323
323
  "score": "0.61",
324
324
  "name": "[fru-M-000204](VFB_00000333)",
325
325
  "tags": "Expression_pattern_fragment|Neuron|Adult|lineage_CM3",
326
- "thumbnail": "[![fru-M-000204 aligned to JRC2018U](http://www.virtualflybrain.org/data/VFB/i/0000/0333/VFB_00101567/thumbnail.png 'fru-M-000204 aligned to JRC2018U')](VFB_00101567,VFB_00000333)"
326
+ "thumbnail": "[![fru-M-000204 aligned to JFRC2](http://www.virtualflybrain.org/data/VFB/i/0000/0333/VFB_00017894/thumbnail.png 'fru-M-000204 aligned to JFRC2')](VFB_00017894,VFB_00000333)"
327
327
  },
328
328
  {
329
329
  "id": "VFB_00000333",
330
330
  "score": "0.61",
331
331
  "name": "[fru-M-000204](VFB_00000333)",
332
332
  "tags": "Expression_pattern_fragment|Neuron|Adult|lineage_CM3",
333
- "thumbnail": "[![fru-M-000204 aligned to JFRC2](http://www.virtualflybrain.org/data/VFB/i/0000/0333/VFB_00017894/thumbnail.png 'fru-M-000204 aligned to JFRC2')](VFB_00017894,VFB_00000333)"
333
+ "thumbnail": "[![fru-M-000204 aligned to JRC2018U](http://www.virtualflybrain.org/data/VFB/i/0000/0333/VFB_00101567/thumbnail.png 'fru-M-000204 aligned to JRC2018U')](VFB_00101567,VFB_00000333)"
334
334
  },
335
335
  {
336
336
  "id": "VFB_00002439",
@@ -347,32 +347,20 @@ vfb.get_term_info('VFB_00000001')
347
347
  "thumbnail": "[![fru-M-900020 aligned to JFRC2](http://www.virtualflybrain.org/data/VFB/i/0000/2439/VFB_00017894/thumbnail.png 'fru-M-900020 aligned to JFRC2')](VFB_00017894,VFB_00002439)"
348
348
  },
349
349
  {
350
- "id": "VFB_00001880",
350
+ "id": "VFB_00000845",
351
351
  "score": "0.59",
352
- "name": "[fru-M-100041](VFB_00001880)",
352
+ "name": "[fru-M-100191](VFB_00000845)",
353
353
  "tags": "Expression_pattern_fragment|Neuron|Adult|lineage_CM3",
354
- "thumbnail": "[![fru-M-100041 aligned to JRC2018U](http://www.virtualflybrain.org/data/VFB/i/0000/1880/VFB_00101567/thumbnail.png 'fru-M-100041 aligned to JRC2018U')](VFB_00101567,VFB_00001880)"
354
+ "thumbnail": "[![fru-M-100191 aligned to JRC2018U](http://www.virtualflybrain.org/data/VFB/i/0000/0845/VFB_00101567/thumbnail.png 'fru-M-100191 aligned to JRC2018U')](VFB_00101567,VFB_00000845)"
355
355
  }
356
356
  ]
357
357
  },
358
358
  "output_format": "table",
359
- "count": 44
359
+ "count": 60
360
360
  }
361
361
  ],
362
362
  "IsIndividual": True,
363
363
  "Images": {
364
- "VFB_00101567": [
365
- {
366
- "id": "VFB_00000001",
367
- "label": "fru-M-200266",
368
- "thumbnail": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/thumbnail.png",
369
- "thumbnail_transparent": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/thumbnailT.png",
370
- "nrrd": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/volume.nrrd",
371
- "wlz": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/volume.wlz",
372
- "obj": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/volume.obj",
373
- "swc": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/volume.swc"
374
- }
375
- ],
376
364
  "VFB_00017894": [
377
365
  {
378
366
  "id": "VFB_00000001",
@@ -384,6 +372,18 @@ vfb.get_term_info('VFB_00000001')
384
372
  "obj": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00017894/volume.obj",
385
373
  "swc": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00017894/volume.swc"
386
374
  }
375
+ ],
376
+ "VFB_00101567": [
377
+ {
378
+ "id": "VFB_00000001",
379
+ "label": "fru-M-200266",
380
+ "thumbnail": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/thumbnail.png",
381
+ "thumbnail_transparent": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/thumbnailT.png",
382
+ "nrrd": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/volume.nrrd",
383
+ "wlz": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/volume.wlz",
384
+ "obj": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/volume.obj",
385
+ "swc": "https://www.virtualflybrain.org/data/VFB/i/0000/0001/VFB_00101567/volume.swc"
386
+ }
387
387
  ]
388
388
  },
389
389
  "IsClass": False,
@@ -405,7 +405,6 @@ Template example:
405
405
  ```python
406
406
  vfb.get_term_info('VFB_00101567')
407
407
  ```
408
-
409
408
  ```json
410
409
  {
411
410
  "Name": "JRC2018U",
@@ -0,0 +1,14 @@
1
+ test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ test/readme_parser.py,sha256=puvcq4_oEltjx_faw1kQJ8mmIWiQU-40oLJjtJBQCsQ,4170
3
+ test/term_info_queries_test.py,sha256=EiL6Od5L9W6Xm6MPRMU4V_4TfoVsSSWjO0wh0F2ITH0,34242
4
+ test/test_examples_diff.py,sha256=ep_BzA-7az2OUPxUIsS3ReFV8LwuzGv8yIL0HirOGsc,15699
5
+ vfbquery/__init__.py,sha256=ZirzaFa-rgWYvqvK08rwxachUEz_qjhKrm3C8s3tlZY,76
6
+ vfbquery/solr_fetcher.py,sha256=U8mHaBJrwjncl1eU_gnNj5CGhEb-s9dCpcUTXTifQOY,3984
7
+ vfbquery/term_info_queries.py,sha256=oE-Ogm7jCPPlKtD3W3EtttYZcHnInwDOpOj-phAEOaI,42009
8
+ vfbquery/test_utils.py,sha256=7wUA3xgaGu3eLnjC98msNYt1wL538nOimVJjkC0ZLjU,5791
9
+ vfbquery/vfb_queries.py,sha256=NnkWB3shgnv2ovG-WimcuzXZtQCjRzIqdWPnQvoY4Hs,66014
10
+ vfbquery-0.3.4.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
11
+ vfbquery-0.3.4.dist-info/METADATA,sha256=T4Kxnz0tLOR_hmgIc-ZwfNWbkW_twWUYnMsy9t5jmzc,63097
12
+ vfbquery-0.3.4.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
13
+ vfbquery-0.3.4.dist-info/top_level.txt,sha256=UgaRTTOy4JBdKbkr_gkeknT4eaibm3ztF520G4NTQZs,14
14
+ vfbquery-0.3.4.dist-info/RECORD,,
@@ -1,14 +0,0 @@
1
- test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- test/readme_parser.py,sha256=puvcq4_oEltjx_faw1kQJ8mmIWiQU-40oLJjtJBQCsQ,4170
3
- test/term_info_queries_test.py,sha256=9FxV3ZmRdi9TRjAS-1N0YRpCKAu4EthdQYOe_NluUuc,34527
4
- test/test_examples_diff.py,sha256=ep_BzA-7az2OUPxUIsS3ReFV8LwuzGv8yIL0HirOGsc,15699
5
- vfbquery/__init__.py,sha256=fvjBDvRlhtKkMa69WAVHY0bteLh4qlLH4FAhiZBOLRE,76
6
- vfbquery/solr_fetcher.py,sha256=U8mHaBJrwjncl1eU_gnNj5CGhEb-s9dCpcUTXTifQOY,3984
7
- vfbquery/term_info_queries.py,sha256=79Bm2RJzAZyVPQE5HWhsvybeBYrz2AbFgbM0ympIxao,41399
8
- vfbquery/test_utils.py,sha256=HKFsQ2wqZYxR_wS9V6RIM3SguIi9kX5kyYDAXgpfp1A,1623
9
- vfbquery/vfb_queries.py,sha256=04CulLonZ_O7vgoiMvN3zBP5y2Ww9AXRdmO-ljD_r6Y,63914
10
- vfbquery-0.3.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
11
- vfbquery-0.3.3.dist-info/METADATA,sha256=mmUICAjBFXeF8poNW_I3wkbyssbeNE5RzmXXRaKhk7Q,62999
12
- vfbquery-0.3.3.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
13
- vfbquery-0.3.3.dist-info/top_level.txt,sha256=UgaRTTOy4JBdKbkr_gkeknT4eaibm3ztF520G4NTQZs,14
14
- vfbquery-0.3.3.dist-info/RECORD,,