jsongrapher 1.6__py3-none-any.whl → 2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,65 @@ import json
2
2
  #TODO: put an option to suppress warnings from JSONRecordCreator
3
3
 
4
4
 
5
+ #Start of the portion of the code for the GUI##
6
+ global_records_list = [] #This list holds onto records as they are added. Index 0 is the merged record. Each other index corresponds to record number (like 1 is first record, 2 is second record, etc)
7
+
8
+
9
+ #This is a JSONGrapher specific function
10
+ #That takes filenames and adds new JSONGrapher records to a global_records_list
11
+ #If the all_selected_file_paths and newest_file_name_and_path are [] and [], that means to clear the global_records_list.
12
+ def add_records_to_global_records_list_and_plot(all_selected_file_paths, newly_added_file_paths, plot_immediately=True):
13
+ #First check if we have received a "clear" condition.
14
+ if (len(all_selected_file_paths) == 0) and (len(newly_added_file_paths) == 0):
15
+ global_records_list.clear()
16
+ return global_records_list
17
+ if len(global_records_list) == 0: #this is for the "first time" the function is called, but the newly_added_file_paths could be a list longer than one.
18
+ first_record = create_new_JSONGrapherRecord()
19
+ first_record.import_from_file(newly_added_file_paths[0]) #get first newly added record record.
20
+ #index 0 will be the one we merge into.
21
+ global_records_list.append(first_record)
22
+ #index 1 will be where we store the first record, so we append again.
23
+ global_records_list.append(first_record)
24
+ #Now, check if there are more records.
25
+ if len(newly_added_file_paths) > 1:
26
+ for filename_and_path_index, filename_and_path in enumerate(newly_added_file_paths):
27
+ if filename_and_path_index == 0:
28
+ pass #passing because we've already added first file.
29
+ else:
30
+ current_record = create_new_JSONGrapherRecord() #make a new record
31
+ current_record.import_from_file(filename_and_path)
32
+ global_records_list.append(current_record) #append it to global records list
33
+ global_records_list[0] = merge_JSONGrapherRecords([global_records_list[0], current_record]) #merge into the main record of records list, which is at index 0.
34
+ else: #For case that global_records_list already exists when funciton is called.
35
+ for filename_and_path_index, filename_and_path in enumerate(newly_added_file_paths):
36
+ current_record = create_new_JSONGrapherRecord() #make a new record
37
+ current_record.import_from_file(filename_and_path)
38
+ global_records_list.append(current_record) #append it to global records list
39
+ global_records_list[0] = merge_JSONGrapherRecords([global_records_list[0], current_record]) #merge into the main record of records list, which is at index 0.
40
+ if plot_immediately:
41
+ #plot the index 0, which is the most up to date merged record.
42
+ global_records_list[0].plot_with_plotly()
43
+ json_string_for_download = json.dumps(global_records_list[0].fig_dict, indent=4)
44
+ return [json_string_for_download] #For the GUI, this function should return a list with something convertable to string to save to file, in index 0.
45
+
46
+
47
+
48
+ #This ia JSONGrapher specific wrapper function to drag_and_drop_gui create_and_launch.
49
+ #This launches the python based JSONGrapher GUI.
50
+ def launch():
51
+ #Check if we have the module we need. First try with package, then locally.
52
+ try:
53
+ import JSONGrapher.drag_and_drop_gui as drag_and_drop_gui
54
+ except:
55
+ #if the package is not present, or does not have it, try getting the module locally.
56
+ import drag_and_drop_gui
57
+ selected_files = drag_and_drop_gui.create_and_launch(app_name = "JSONGRapher", function_for_after_file_addition=add_records_to_global_records_list_and_plot)
58
+ #We will not return the selected_files, and instead will return the global_records_list.
59
+ return global_records_list
60
+
61
+ ## End of the portion of the code for the GUI##
62
+
63
+
5
64
  #the function create_new_JSONGrapherRecord is intended to be "like" a wrapper function for people who find it more
6
65
  # intuitive to create class objects that way, this variable is actually just a reference
7
66
  # so that we don't have to map the arguments.
@@ -14,20 +73,239 @@ def create_new_JSONGrapherRecord(hints=False):
14
73
  return new_record
15
74
 
16
75
 
76
+ #This is a function for merging JSONGrapher records.
77
+ #recordsList is a list of records
78
+ #Each record can be a JSONGrapherRecord object (a python class object) or a dictionary (meaning, a JSONGrapher JSON as a dictionary)
79
+ #If a record is received that is a string, then the function will attempt to convert that into a dictionary.
80
+ #The units used will be that of the first record encountered
81
+ def merge_JSONGrapherRecords(recordsList):
82
+ import copy
83
+ recordsAsDictionariesList = []
84
+ merged_JSONGrapherRecord = create_new_JSONGrapherRecord()
85
+ #first make a list of all the records as dictionaries.
86
+ for record in recordsList:
87
+ if type(record) == type({}):
88
+ recordsAsDictionariesList.append(record)
89
+ elif type(record) == type("string"):
90
+ record = json.loads(record)
91
+ recordsAsDictionariesList.append(record)
92
+ else: #this assumpes there is a JSONGrapherRecord type received.
93
+ record = record.fig_dict
94
+ recordsAsDictionariesList.append(record)
95
+ #next, iterate through the list of dictionaries and merge each data object together.
96
+ #We'll use the the units of the first dictionary.
97
+ #We'll put the first record in directly, keeping the units etc. Then will "merge" in the additional data sets.
98
+ #Iterate across all records received.
99
+ for dictionary_index, current_fig_dict in enumerate(recordsAsDictionariesList):
100
+ if dictionary_index == 0: #this is the first record case. We'll use this to start the list and also gather the units.
101
+ merged_JSONGrapherRecord.fig_dict = copy.deepcopy(recordsAsDictionariesList[0])
102
+ first_record_x_label = recordsAsDictionariesList[0]["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
103
+ first_record_y_label = recordsAsDictionariesList[0]["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
104
+ first_record_x_units = separate_label_text_from_units(first_record_x_label)["units"]
105
+ first_record_y_units = separate_label_text_from_units(first_record_y_label)["units"]
106
+ else:
107
+ #first get the units of this particular record.
108
+ this_record_x_label = recordsAsDictionariesList[dictionary_index]["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
109
+ this_record_y_label = recordsAsDictionariesList[dictionary_index]["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
110
+ this_record_x_units = separate_label_text_from_units(this_record_x_label)["units"]
111
+ this_record_y_units = separate_label_text_from_units(this_record_y_label)["units"]
112
+ #now get the ratio of the units for this record relative to the first record.
113
+ #if the units are identical, then just make the ratio 1.
114
+ if this_record_x_units == first_record_x_units:
115
+ x_units_ratio = 1
116
+ else:
117
+ x_units_ratio = get_units_scaling_ratio(this_record_x_units, first_record_x_units)
118
+ if this_record_y_units == first_record_y_units:
119
+ y_units_ratio = 1
120
+ else:
121
+ y_units_ratio = get_units_scaling_ratio(this_record_y_units, first_record_y_units)
122
+ #A record could have more than one data series, but they will all have the same units.
123
+ #Thus, we use a function that will scale all of the dataseries at one time.
124
+ if (x_units_ratio == 1) and (y_units_ratio == 1): #skip scaling if it's not necessary.
125
+ scaled_fig_dict = current_fig_dict
126
+ else:
127
+ scaled_fig_dict = scale_fig_dict_values(current_fig_dict, x_units_ratio, y_units_ratio)
128
+ #now, add the scaled data objects to the original one.
129
+ #This is fairly easy using a list extend.
130
+ merged_JSONGrapherRecord.fig_dict["data"].extend(scaled_fig_dict["data"])
131
+ return merged_JSONGrapherRecord
132
+
133
+
134
+ ### Start of portion of the file that has functions for scaling data to the same units ###
135
+ #The below function takes two units strings, such as
136
+ # "(((kg)/m))/s" and "(((g)/m))/s"
137
+ # and then returns the scaling ratio of units_string_1 / units_string_2
138
+ # So in the above example, would return 1000.
139
+ #Could add "tag_characters"='<>' as an optional argument to this and other functions
140
+ #to make the option of other characters for custom units.
141
+ def get_units_scaling_ratio(units_string_1, units_string_2):
142
+ # Ensure both strings are properly encoded in UTF-8
143
+ units_string_1 = units_string_1.encode("utf-8").decode("utf-8")
144
+ units_string_2 = units_string_2.encode("utf-8").decode("utf-8")
145
+ #If the unit strings are identical, there is no need to go further.
146
+ if units_string_1 == units_string_2:
147
+ return 1
148
+ import unitpy #this function uses unitpy.
149
+ #Replace "^" with "**" for unit conversion purposes.
150
+ #We won't need to replace back because this function only returns the ratio in the end.
151
+ units_string_1 = units_string_1.replace("^", "**")
152
+ units_string_2 = units_string_2.replace("^", "**")
153
+ #For now, we need to tag µ symbol units as if they are custom units. Because unitpy doesn't support that symbol yet (May 2025)
154
+ units_string_1 = tag_micro_units(units_string_1)
155
+ units_string_2 = tag_micro_units(units_string_2)
156
+ #Next, need to extract custom units and add them to unitpy
157
+ custom_units_1 = extract_tagged_strings(units_string_1)
158
+ custom_units_2 = extract_tagged_strings(units_string_2)
159
+ for custom_unit in custom_units_1:
160
+ add_custom_unit_to_unitpy(custom_unit)
161
+ for custom_unit in custom_units_2:
162
+ add_custom_unit_to_unitpy(custom_unit)
163
+ #Now, remove the "<" and ">" and will put them back later if needed.
164
+ units_string_1 = units_string_1.replace('<','').replace('>','')
165
+ units_string_2 = units_string_2.replace('<','').replace('>','')
166
+ try:
167
+ #First need to make unitpy "U" object and multiply it by 1.
168
+ #While it may be possible to find a way using the "Q" objects directly, this is the way I found so far, which converts the U object into a Q object.
169
+ units_object_converted = 1*unitpy.U(units_string_1)
170
+ ratio_with_units_object = units_object_converted.to(units_string_2)
171
+ except: #the above can fail if there are reciprocal units like 1/bar rather than (bar)**(-1), so we have an except statement that tries "that" fix if there is a failure.
172
+ units_string_1 = convert_inverse_units(units_string_1)
173
+ units_string_2 = convert_inverse_units(units_string_2)
174
+ units_object_converted = 1*unitpy.U(units_string_1)
175
+ ratio_with_units_object = units_object_converted.to(units_string_2)
176
+ ratio_with_units_string = str(ratio_with_units_object)
177
+ ratio_only = ratio_with_units_string.split(' ')[0] #what comes out may look like 1000 gram/(meter second), so we split and take first part.
178
+ ratio_only = float(ratio_only)
179
+ return ratio_only #function returns ratio only. If function is later changed to return more, then units_strings may need further replacements.
180
+
181
+ def return_custom_units_markup(units_string, custom_units_list):
182
+ """puts markup around custom units with '<' and '>' """
183
+ sorted_custom_units_list = sorted(custom_units_list, key=len, reverse=True)
184
+ #the units should be sorted from longest to shortest if not already sorted that way.
185
+ for custom_unit in sorted_custom_units_list:
186
+ units_string.replace(custom_unit, '<'+custom_unit+'>')
187
+ return units_string
188
+
189
+ #This function tags microunits.
190
+ #However, because unitpy gives unexpected behavior with the microsymbol,
191
+ #We are actually going to change them from "µm" to "<microfrogm>"
192
+ def tag_micro_units(units_string):
193
+ # Unicode representations of micro symbols:
194
+ # U+00B5 → µ (Micro Sign)
195
+ # U+03BC → μ (Greek Small Letter Mu)
196
+ # U+1D6C2 → 𝜇 (Mathematical Greek Small Letter Mu)
197
+ # U+1D6C1 → 𝝁 (Mathematical Bold Greek Small Letter Mu)
198
+ micro_symbols = ["µ", "μ", "𝜇", "𝝁"]
199
+ # Check if any micro symbol is in the string
200
+ if not any(symbol in units_string for symbol in micro_symbols):
201
+ return units_string # If none are found, return the original string unchanged
202
+ import re
203
+ # Construct a regex pattern to detect any micro symbol followed by letters
204
+ pattern = r"[" + "".join(micro_symbols) + r"][a-zA-Z]+"
205
+ # Extract matches and sort them by length (longest first)
206
+ matches = sorted(re.findall(pattern, units_string), key=len, reverse=True)
207
+ # Replace matches with custom unit notation <X>
208
+ for match in matches:
209
+ frogified_match = f"<microfrog{match[1:]}>"
210
+ units_string = units_string.replace(match, frogified_match)
211
+ return units_string
212
+
213
+ #We are actually going to change them back to "µm" from "<microfrogm>"
214
+ def untag_micro_units(units_string):
215
+ if "<microfrog" not in units_string: # Check if any frogified unit exists
216
+ return units_string
217
+ import re
218
+ # Pattern to detect the frogified micro-units
219
+ pattern = r"<microfrog([a-zA-Z]+)>"
220
+ # Replace frogified units with µ + the original unit suffix
221
+ return re.sub(pattern, r"µ\1", units_string)
222
+
223
+ def add_custom_unit_to_unitpy(unit_string):
224
+ import unitpy
225
+ from unitpy.definitions.entry import Entry
226
+ #need to put an entry into "bases" because the BaseSet class will pull from that dictionary.
227
+ unitpy.definitions.unit_base.bases[unit_string] = unitpy.definitions.unit_base.BaseUnit(label=unit_string, abbr=unit_string,dimension=unitpy.definitions.dimensions.dimensions["amount_of_substance"])
228
+ #Then need to make a BaseSet object to put in. Confusingly, we *do not* put a BaseUnit object into the base_unit argument, below.
229
+ #We use "mole" to avoid conflicting with any other existing units.
230
+ base_unit =unitpy.definitions.unit_base.BaseSet(mole = 1)
231
+ #base_unit = unitpy.definitions.unit_base.BaseUnit(label=unit_string, abbr=unit_string,dimension=unitpy.definitions.dimensions.dimensions["amount_of_substance"])
232
+ new_entry = Entry(label = unit_string, abbr = unit_string, base_unit = base_unit, multiplier= 1)
233
+ #only add the entry if it is missing. A duplicate entry would cause crashing later.
234
+ #We can't use the "unitpy.ledger.get_entry" function because the entries have custom == comparisons
235
+ # and for the new entry, it will also return a special NoneType that we can't easy check.
236
+ # the structer unitpy.ledger.units is a list, but unitpy.ledger._lookup is a dictionary we can use
237
+ # to check if the key for the new unit is added or not.
238
+ if unit_string not in unitpy.ledger._lookup:
239
+ unitpy.ledger.add_unit(new_entry) #implied return is here. No return needed.
240
+
241
+ def extract_tagged_strings(text):
242
+ """Extracts tags surrounded by <> from a given string. Used for custom units.
243
+ returns them as a list sorted from longest to shortest"""
244
+ import re
245
+ list_of_tags = re.findall(r'<(.*?)>', text)
246
+ set_of_tags = set(list_of_tags)
247
+ sorted_tags = sorted(set_of_tags, key=len, reverse=True)
248
+ return sorted_tags
249
+
250
+ #This function is to convert things like (1/bar) to (bar)**(-1)
251
+ #It was written by copilot and refined by further prompting of copilot by testing.
252
+ #The depth is because the function works iteratively and then stops when finished.
253
+ def convert_inverse_units(expression, depth=100):
254
+ import re
255
+ # Patterns to match valid reciprocals while ignoring multiplied units, so (1/bar)*bar should be handled correctly.
256
+ patterns = [r"1/\((1/.*?)\)", r"1/([a-zA-Z]+)"]
257
+ for _ in range(depth):
258
+ new_expression = expression
259
+ for pattern in patterns:
260
+ new_expression = re.sub(pattern, r"(\1)**(-1)", new_expression)
261
+
262
+ # Stop early if no more changes are made
263
+ if new_expression == expression:
264
+ break
265
+ expression = new_expression
266
+ return expression
267
+
268
+ #the below function takes in a fig_dict, as well as x and/or y scaling values.
269
+ #The function then scales the values in the data of the fig_dict and returns the scaled fig_dict.
270
+ def scale_fig_dict_values(fig_dict, num_to_scale_x_values_by = 1, num_to_scale_y_values_by = 1):
271
+ import copy
272
+ scaled_fig_dict = copy.deepcopy(fig_dict)
273
+ #iterate across the data objects inside, and change them.
274
+ for data_index, dataseries in enumerate(scaled_fig_dict["data"]):
275
+ dataseries = scale_dataseries_dict(dataseries, num_to_scale_x_values_by=num_to_scale_x_values_by, num_to_scale_y_values_by=num_to_scale_y_values_by)
276
+ scaled_fig_dict[data_index] = dataseries #this line shouldn't be needed due to mutable references, but adding for clarity and to be safe.
277
+ return scaled_fig_dict
278
+
279
+
280
+ def scale_dataseries_dict(dataseries_dict, num_to_scale_x_values_by = 1, num_to_scale_y_values_by = 1):
281
+ import numpy as np
282
+ dataseries = dataseries_dict
283
+ dataseries["x"] = list(np.array(dataseries["x"], dtype=float)*num_to_scale_x_values_by) #convert to numpy array for multiplication, then back to list.
284
+ dataseries["y"] = list(np.array(dataseries["y"], dtype=float)*num_to_scale_y_values_by) #convert to numpy array for multiplication, then back to list.
285
+
286
+ # Ensure elements are converted to standard Python types.
287
+ dataseries["x"] = [float(val) for val in dataseries["x"]] #This line written by copilot.
288
+ dataseries["y"] = [float(val) for val in dataseries["y"]] #This line written by copilot.
289
+ return dataseries_dict
290
+
291
+ ### End of portion of the file that has functions for scaling data to the same units ###
292
+
17
293
  class JSONGrapherRecord:
18
294
  """
19
295
  This class enables making JSONGrapher records. Each instance represents a structured JSON record for a graph.
20
296
  One can optionally provide an existing JSONGrapher record during creation to pre-populate the object.
297
+ One can also manipulate the fig_dict inside, directly, using syntax like Record.fig_dict["comments"] = ...
21
298
 
22
299
  Arguments & Attributes (all are optional):
23
- comments (str): General description or metadata related to the entire record. Can include citation links. Goes into the record's top level comments field.
24
- datatype: The datatype is the experiment type or similar, it is used to assess which records can be compared and which (if any) schema to compare to. Use of single underscores between words is recommended. This ends up being the datatype field of the full JSONGrapher file. Avoid using double underscores '__' in this field unless you have read the manual about hierarchical datatypes.
300
+ comments (str): Can be used to put in general description or metadata related to the entire record. Can include citation links. Goes into the record's top level comments field.
301
+ datatype: The datatype is the experiment type or similar, it is used to assess which records can be compared and which (if any) schema to compare to. Use of single underscores between words is recommended. This ends up being the datatype field of the full JSONGrapher file. Avoid using double underscores '__' in this field unless you have read the manual about hierarchical datatypes. The user can choose to provide a URL to a schema in this field, rather than a dataype name.
25
302
  graph_title: Title of the graph or the dataset being represented.
26
- data_objects_list (list): List of data series dictionaries to pre-populate the record.
303
+ data_objects_list (list): List of data series dictionaries to pre-populate the record. These may contain 'simulate' fields in them to call javascript source code for simulating on the fly.
304
+ simulate_as_added: Boolean. True by default. If true, any data series that are added with a simulation field will have an immediate simulation call attempt.
27
305
  x_data: Single series x data in a list or array-like structure.
28
306
  y_data: Single series y data in a list or array-like structure.
29
- x_axis_label_including_units: A string with units provided in parentheses. Use of multiplication "*" and division "/" and parentheses "( )" are allowed within in the units .
30
- y_axis_label_including_units: A string with units provided in parentheses. Use of multiplication "*" and division "/" and parentheses "( )" are allowed within in the units .
307
+ x_axis_label_including_units: A string with units provided in parentheses. Use of multiplication "*" and division "/" and parentheses "( )" are allowed within in the units . The dimensions of units can be multiple, such as mol/s. SI units are expected. Custom units must be inside < > and at the beginning. For example, (<frogs>*kg/s) would be permissible. Units should be non-plural (kg instead of kgs) and should be abbreviated (m not meter). Use “^” for exponents. It is recommended to have no numbers in the units other than exponents, and to thus use (bar)^(-1) rather than 1/bar.
308
+ y_axis_label_including_units: A string with units provided in parentheses. Use of multiplication "*" and division "/" and parentheses "( )" are allowed within in the units . The dimensions of units can be multiple, such as mol/s. SI units are expected. Custom units must be inside < > and at the beginning. For example, (<frogs>*kg/s) would be permissible. Units should be non-plural (kg instead of kgs) and should be abbreviated (m not meter). Use “^” for exponents. It is recommended to have no numbers in the units other than exponents, and to thus use (bar)^(-1) rather than 1/bar.
31
309
  layout: A dictionary defining the layout of the graph, including axis titles,
32
310
  comments, and general formatting options.
33
311
 
@@ -38,7 +316,7 @@ class JSONGrapherRecord:
38
316
  populate_from_existing_record: Populates the attributes from an existing JSONGrapher record.
39
317
  """
40
318
 
41
- def __init__(self, comments="", graph_title="", datatype="", data_objects_list = None, x_data=None, y_data=None, x_axis_label_including_units="", y_axis_label_including_units ="", plot_type ="", layout={}, existing_JSONGrapher_record=None):
319
+ def __init__(self, comments="", graph_title="", datatype="", data_objects_list = None, simulate_as_added = True, x_data=None, y_data=None, x_axis_label_including_units="", y_axis_label_including_units ="", plot_type ="", layout={}, existing_JSONGrapher_record=None):
42
320
  """
43
321
  Initialize a JSONGrapherRecord instance with optional attributes or an existing record.
44
322
 
@@ -63,28 +341,35 @@ class JSONGrapherRecord:
63
341
  "datatype": datatype, # Top-level datatype (datatype)
64
342
  "data": data_objects_list if data_objects_list else [], # Data series list
65
343
  "layout": layout if layout else {
66
- "title": graph_title,
67
- "xaxis": {"title": x_axis_label_including_units},
68
- "yaxis": {"title": y_axis_label_including_units}
344
+ "title": {"text": graph_title},
345
+ "xaxis": {"title": {"text": x_axis_label_including_units}},
346
+ "yaxis": {"title": {"text": y_axis_label_including_units}}
69
347
  }
70
348
  }
71
349
 
72
- self.plot_type = plot_type #the plot_type is actually a series level attribute. However, if somebody sets the plot_type at the record level, then we will use that plot_type for all of the individual series.
350
+
351
+ if simulate_as_added: #will try to simulate. But because this is the default, will use a try and except rather than crash program.
352
+ try:
353
+ self.fig_dict = simulate_as_needed_in_fig_dict(self.fig_dict)
354
+ except:
355
+ pass
356
+
357
+ self.plot_type = plot_type #the plot_type is normally actually a series level attribute. However, if somebody sets the plot_type at the record level, then we will use that plot_type for all of the individual series.
73
358
  if plot_type != "":
74
359
  self.fig_dict["plot_type"] = plot_type
75
360
 
76
- # Populate attributes if an existing JSONGrapher record is provided.
361
+ # Populate attributes if an existing JSONGrapher record is provided, as a dictionary.
77
362
  if existing_JSONGrapher_record:
78
363
  self.populate_from_existing_record(existing_JSONGrapher_record)
79
364
 
80
365
  # Initialize the hints dictionary, for use later, since the actual locations in the JSONRecord can be non-intuitive.
81
366
  self.hints_dictionary = {}
82
367
  # Adding hints. Here, the keys are the full field locations within the record.
83
- self.hints_dictionary["['comments']"] = "Use Record.set_comments() to populate this field. Put in a general description or metadata related to the entire record. Can include citation links. Goes into the record's top level comments field."
84
- self.hints_dictionary["['datatype']"] = "Use Record.set_datatype() to populate this field. This is the datatype, like experiment type, and is used to assess which records can be compared and which (if any) schema to compare to. Use of single underscores between words is recommended. Avoid using double underscores '__' in this field unless you have read the manual about hierarchical datatypes."
85
- self.hints_dictionary["['layout']['title']"] = "Use Record.set_graph_title() to populate this field. This is the title for the graph."
86
- self.hints_dictionary["['layout']['xaxis']['title']"] = "Use Record.set_x_axis_label() to populate this field. This is the x axis label and should have units in parentheses. The units can include multiplication '*', division '/' and parentheses '( )'. Scientific and imperial units are recommended. Custom units can be contained in pointy brackets'< >'." # x-axis label
87
- self.hints_dictionary["['layout']['yaxis']['title']"] = "Use Record.set_y_axis_label() to populate this field. This is the y axis label and should have units in parentheses. The units can include multiplication '*', division '/' and parentheses '( )'. Scientific and imperial units are recommended. Custom units can be contained in pointy brackets'< >'."
368
+ self.hints_dictionary["['comments']"] = "Use Record.set_comments() to populate this field. Can be used to put in a general description or metadata related to the entire record. Can include citations and links. Goes into the record's top level comments field."
369
+ self.hints_dictionary["['datatype']"] = "Use Record.set_datatype() to populate this field. This is the datatype, like experiment type, and is used to assess which records can be compared and which (if any) schema to compare to. Use of single underscores between words is recommended. Avoid using double underscores '__' in this field unless you have read the manual about hierarchical datatypes. The user can choose to provide a URL to a schema in this field, rather than a dataype name."
370
+ self.hints_dictionary["['layout']['title']['text']"] = "Use Record.set_graph_title() to populate this field. This is the title for the graph."
371
+ self.hints_dictionary["['layout']['xaxis']['title']['text']"] = "Use Record.set_x_axis_label() to populate this field. This is the x axis label and should have units in parentheses. The units can include multiplication '*', division '/' and parentheses '( )'. Scientific and imperial units are recommended. Custom units can be contained in pointy brackets'< >'." # x-axis label
372
+ self.hints_dictionary["['layout']['yaxis']['title']['text']"] = "Use Record.set_y_axis_label() to populate this field. This is the y axis label and should have units in parentheses. The units can include multiplication '*', division '/' and parentheses '( )'. Scientific and imperial units are recommended. Custom units can be contained in pointy brackets'< >'."
88
373
 
89
374
 
90
375
  #this function enables printing the current record.
@@ -92,11 +377,11 @@ class JSONGrapherRecord:
92
377
  """
93
378
  Returns a JSON-formatted string of the record with an indent of 4.
94
379
  """
95
- print("Warning: Printing directly will return the raw record without some automatic updates. Please use the syntax RecordObject.print_to_inspect() which will make automatic consistency updates and validation checks to the record before printing.")
380
+ print("Warning: Printing directly will return the raw record without some automatic updates. It is recommended to use the syntax RecordObject.print_to_inspect() which will make automatic consistency updates and validation checks to the record before printing.")
96
381
  return json.dumps(self.fig_dict, indent=4)
97
382
 
98
383
 
99
- def add_data_series(self, series_name, x_values=[], y_values=[], simulate={}, comments="", plot_type="", uid="", line="", extra_fields=None):
384
+ def add_data_series(self, series_name, x_values=[], y_values=[], simulate={}, simulate_as_added = True, comments="", plot_type="", uid="", line="", extra_fields=None):
100
385
  """
101
386
  This is the normal way of adding an x,y data series.
102
387
  """
@@ -104,6 +389,7 @@ class JSONGrapherRecord:
104
389
  # x: List of x-axis values. Or similar structure.
105
390
  # y: List of y-axis values. Or similar structure.
106
391
  # simulate: This is an optional field which, if used, is a JSON object with entries for calling external simulation scripts.
392
+ # simulate_as_added: Boolean for calling simulation scripts immediately.
107
393
  # comments: Optional description of the data series.
108
394
  # plot_type: Type of the data (e.g., scatter, line).
109
395
  # line: Dictionary describing line properties (e.g., shape, width).
@@ -128,6 +414,11 @@ class JSONGrapherRecord:
128
414
  #add simulate field if included.
129
415
  if simulate:
130
416
  data_series_dict["simulate"] = simulate
417
+ if simulate_as_added: #will try to simulate. But because this is the default, will use a try and except rather than crash program.
418
+ try:
419
+ data_series_dict = simulate_data_series(data_series_dict)
420
+ except:
421
+ pass
131
422
  # Add extra fields if provided, they will be added.
132
423
  if extra_fields:
133
424
  data_series_dict.update(extra_fields)
@@ -138,6 +429,17 @@ class JSONGrapherRecord:
138
429
  newest_record_index = len(self.fig_dict["data"]) - 1
139
430
  self.set_plot_type_one_data_series(newest_record_index, plot_type)
140
431
 
432
+ def change_data_series_name(self, series_index, series_name):
433
+ self.fig_dict["data"][series_index]["name"] = series_name
434
+
435
+ #this function forces the re-simulation of a particular dataseries.
436
+ #The simulator link will be extracted from the record, by default.
437
+ def simulate_data_series_by_index(self, data_series_index, simulator_link='', verbose=False):
438
+ data_series_dict = self.fig_dict["data"][data_series_index]
439
+ data_series_dict = simulate_data_series(data_series_dict, simulator_link=simulator_link, verbose=verbose)
440
+ self.fig_dict["data"][data_series_index] = data_series_dict #implied return
441
+ return data_series_dict #Extra regular return
442
+
141
443
  #this function returns the current record.
142
444
  def get_record(self):
143
445
  """
@@ -145,6 +447,7 @@ class JSONGrapherRecord:
145
447
  """
146
448
  return self.fig_dict
147
449
 
450
+ #The update_and_validate function will clean for plotly.
148
451
  def print_to_inspect(self, update_and_validate=True, validate=True, remove_remaining_hints=False):
149
452
  if remove_remaining_hints == True:
150
453
  self.remove_hints()
@@ -159,25 +462,68 @@ class JSONGrapherRecord:
159
462
  Populates attributes from an existing JSONGrapher record.
160
463
  existing_JSONGrapher_record: A dictionary representing an existing JSONGrapher record.
161
464
  """
162
- if "comments" in existing_JSONGrapher_record: self.fig_dict["comments"] = existing_JSONGrapher_record["comments"]
163
- if "datatype" in existing_JSONGrapher_record: self.fig_dict["datatype"] = existing_JSONGrapher_record["datatype"]
164
- if "data" in existing_JSONGrapher_record: self.fig_dict["data"] = existing_JSONGrapher_record["data"]
165
- if "layout" in existing_JSONGrapher_record: self.fig_dict["layout"] = existing_JSONGrapher_record["layout"]
465
+ #While we expect a dictionary, if a JSONGrapher ojbect is provided, we will simply pull the dictionary out of that.
466
+ if type(existing_JSONGrapher_record) != type({}):
467
+ existing_JSONGrapher_record = existing_JSONGrapher_record.fig_dict
468
+ if type(existing_JSONGrapher_record) == type({}):
469
+ if "comments" in existing_JSONGrapher_record: self.fig_dict["comments"] = existing_JSONGrapher_record["comments"]
470
+ if "datatype" in existing_JSONGrapher_record: self.fig_dict["datatype"] = existing_JSONGrapher_record["datatype"]
471
+ if "data" in existing_JSONGrapher_record: self.fig_dict["data"] = existing_JSONGrapher_record["data"]
472
+ if "layout" in existing_JSONGrapher_record: self.fig_dict["layout"] = existing_JSONGrapher_record["layout"]
473
+
474
+ #the below function takes in existin JSONGrpher record, and merges the data in.
475
+ #This requires scaling any data as needed, according to units.
476
+ def merge_in_JSONGrapherRecord(self, fig_dict_to_merge_in):
477
+ import copy
478
+ fig_dict_to_merge_in = copy.deepcopy(fig_dict_to_merge_in)
479
+ if type(fig_dict_to_merge_in) == type({}):
480
+ pass #this is what we are expecting.
481
+ elif type(fig_dict_to_merge_in) == type("string"):
482
+ fig_dict_to_merge_in = json.loads(fig_dict_to_merge_in)
483
+ else: #this assumpes there is a JSONGrapherRecord type received.
484
+ fig_dict_to_merge_in = fig_dict_to_merge_in.fig_dict
485
+ #Now extract the units of the current record.
486
+ first_record_x_label = self.fig_dict["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
487
+ first_record_y_label = self.fig_dict["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
488
+ first_record_x_units = separate_label_text_from_units(first_record_x_label)["units"]
489
+ first_record_y_units = separate_label_text_from_units(first_record_y_label)["units"]
490
+ #Get the units of the new record.
491
+ this_record_x_label = fig_dict_to_merge_in["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
492
+ this_record_y_label = fig_dict_to_merge_in["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
493
+ this_record_x_units = separate_label_text_from_units(this_record_x_label)["units"]
494
+ this_record_y_units = separate_label_text_from_units(this_record_y_label)["units"]
495
+ #now get the ratio of the units for this record relative to the first record.
496
+ x_units_ratio = get_units_scaling_ratio(this_record_x_units, first_record_x_units)
497
+ y_units_ratio = get_units_scaling_ratio(this_record_y_units, first_record_y_units)
498
+ #A record could have more than one data series, but they will all have the same units.
499
+ #Thus, we use a function that will scale all of the dataseries at one time.
500
+ scaled_fig_dict = scale_fig_dict_values(fig_dict_to_merge_in, x_units_ratio, y_units_ratio)
501
+ #now, add the scaled data objects to the original one.
502
+ #This is fairly easy using a list extend.
503
+ self.fig_dict["data"].extend(scaled_fig_dict["data"])
166
504
 
167
505
 
506
+
507
+ def import_from_dict(self, fig_dict):
508
+ self.fig_dict = fig_dict
509
+
510
+ def import_from_file(self, json_filename_or_object):
511
+ self.import_from_json(json_filename_or_object)
512
+
513
+ #the json object can be a filename string or can be json object which is actually a dictionary.
514
+ def import_from_json(self, json_filename_or_object):
515
+ if type(json_filename_or_object) == type(""): #assume it's a filename and path.
516
+ # Open the file in read mode with UTF-8 encoding
517
+ with open(json_filename_or_object, 'r', encoding='utf-8') as file:
518
+ # Read the entire content of the file
519
+ content = file.read()
520
+ self.fig_dict = json.loads(content)
521
+ else:
522
+ self.fig_dict = json_filename_or_object
523
+
168
524
  def set_plot_type_one_data_series(self, data_series_index, plot_type):
169
- fields_dict = plot_type_to_field_values(plot_type)
170
- #get the data_series_dict.
171
525
  data_series_dict = self.fig_dict['data'][data_series_index]
172
- #update the data_series_dict.
173
- if fields_dict.get("mode_field"):
174
- data_series_dict["mode"] = fields_dict["mode_field"]
175
- if fields_dict.get("type_field"):
176
- data_series_dict["type"] = fields_dict["type_field"]
177
- if fields_dict.get("line_shape_field") != "":
178
- data_series_dict.setdefault("line", {"shape": ''}) # Creates the field if it does not already exist.
179
- data_series_dict["line"]["shape"] = fields_dict["line_shape_field"]
180
-
526
+ data_series_dict = set_data_series_dict_plot_type(data_series_dict=data_series_dict, plot_type=plot_type)
181
527
  #now put the data_series_dict back:
182
528
  self.fig_dict['data'][data_series_index] = data_series_dict
183
529
 
@@ -193,7 +539,7 @@ class JSONGrapherRecord:
193
539
 
194
540
  def update_plot_types(self, plot_type=None):
195
541
  """
196
- updates the plot types for any existing data series.
542
+ updates the plot types for every existing data series.
197
543
 
198
544
  """
199
545
  #If optional argument not provided, take class instance setting.
@@ -202,6 +548,11 @@ class JSONGrapherRecord:
202
548
  #If the plot_type is not blank, use it for all series.
203
549
  if plot_type != "":
204
550
  self.set_plot_type_all_series(plot_type)
551
+ else: #if the plot_type is blank, then we will go through each data series and update them individually.
552
+ for data_series_index, data_series_dict in enumerate(self.fig_dict['data']):
553
+ #This will update the data_series_dict as needed, putting a plot_type if there is not one.
554
+ data_series_dict = set_data_series_dict_plot_type(data_series_dict=data_series_dict)
555
+ self.fig_dict['data'][data_series_index] = data_series_dict
205
556
 
206
557
  def set_datatype(self, datatype):
207
558
  """
@@ -222,7 +573,7 @@ class JSONGrapherRecord:
222
573
  Updates the title of the graph in the layout dictionary.
223
574
  graph_title (str): The new title to set for the graph.
224
575
  """
225
- self.fig_dict['layout']['title'] = graph_title
576
+ self.fig_dict['layout']['title']['text'] = graph_title
226
577
 
227
578
  def set_x_axis_label_including_units(self, x_axis_label_including_units, remove_plural_units=True):
228
579
  """
@@ -232,7 +583,7 @@ class JSONGrapherRecord:
232
583
  if "xaxis" not in self.fig_dict['layout'] or not isinstance(self.fig_dict['layout'].get("xaxis"), dict):
233
584
  self.fig_dict['layout']["xaxis"] = {} # Initialize x-axis as a dictionary if it doesn't exist.
234
585
  validation_result, warnings_list, x_axis_label_including_units = validate_JSONGrapher_axis_label(x_axis_label_including_units, axis_name="x", remove_plural_units=remove_plural_units)
235
- self.fig_dict['layout']["xaxis"]["title"] = x_axis_label_including_units
586
+ self.fig_dict['layout']["xaxis"]["title"]['text'] = x_axis_label_including_units
236
587
 
237
588
  def set_y_axis_label_including_units(self, y_axis_label_including_units, remove_plural_units=True):
238
589
  """
@@ -243,23 +594,35 @@ class JSONGrapherRecord:
243
594
  self.fig_dict['layout']["yaxis"] = {} # Initialize y-axis as a dictionary if it doesn't exist.
244
595
 
245
596
  validation_result, warnings_list, y_axis_label_including_units = validate_JSONGrapher_axis_label(y_axis_label_including_units, axis_name="y", remove_plural_units=remove_plural_units)
246
- self.fig_dict['layout']["yaxis"]["title"] = y_axis_label_including_units
597
+ self.fig_dict['layout']["yaxis"]["title"]['text'] = y_axis_label_including_units
598
+
599
+ #function to set the min and max of the x axis in plotly way.
600
+ def set_x_axis_range(self, min, max):
601
+ self.fig_dict["layout"]["xaxis"][0] = min
602
+ self.fig_dict["layout"]["xaxis"][1] = max
603
+ #function to set the min and max of the y axis in plotly way.
604
+ def set_y_axis_range(self, min, max):
605
+ self.fig_dict["layout"]["yaxis"][0] = min
606
+ self.fig_dict["layout"]["yaxis"][1] = max
607
+
608
+ #function to scale the values in the data series by arbitrary amounts.
609
+ def scale_record(self, num_to_scale_x_values_by = 1, num_to_scale_y_values_by = 1):
610
+ self.fig_dict = scale_fig_dict_values(self.fig_dict, num_to_scale_x_values_by=num_to_scale_x_values_by, num_to_scale_y_values_by=num_to_scale_y_values_by)
247
611
 
248
612
  def set_layout(self, comments="", graph_title="", x_axis_label_including_units="", y_axis_label_including_units="", x_axis_comments="",y_axis_comments="", remove_plural_units=True):
249
- # comments: General comments about the layout.
613
+ # comments: General comments about the layout. Allowed by JSONGrapher, but will be removed if converted to a plotly object.
250
614
  # graph_title: Title of the graph.
251
615
  # xaxis_title: Title of the x-axis, including units.
252
- # xaxis_comments: Comments related to the x-axis.
616
+ # xaxis_comments: Comments related to the x-axis. Allowed by JSONGrapher, but will be removed if converted to a plotly object.
253
617
  # yaxis_title: Title of the y-axis, including units.
254
- # yaxis_comments: Comments related to the y-axis.
618
+ # yaxis_comments: Comments related to the y-axis. Allowed by JSONGrapher, but will be removed if converted to a plotly object.
255
619
 
256
620
  validation_result, warnings_list, x_axis_label_including_units = validate_JSONGrapher_axis_label(x_axis_label_including_units, axis_name="x", remove_plural_units=remove_plural_units)
257
621
  validation_result, warnings_list, y_axis_label_including_units = validate_JSONGrapher_axis_label(y_axis_label_including_units, axis_name="y", remove_plural_units=remove_plural_units)
258
- self.fig_dict['layout'] = {
259
- "title": graph_title,
260
- "xaxis": {"title": x_axis_label_including_units},
261
- "yaxis": {"title": y_axis_label_including_units}
262
- }
622
+ self.fig_dict['layout']["title"]['text'] = graph_title
623
+ self.fig_dict['layout']["xaxis"]["title"]['text'] = x_axis_label_including_units
624
+ self.fig_dict['layout']["yaxis"]["title"]['text'] = y_axis_label_including_units
625
+
263
626
 
264
627
  #populate any optional fields, if provided:
265
628
  if len(comments) > 0:
@@ -272,13 +635,23 @@ class JSONGrapherRecord:
272
635
 
273
636
  return self.fig_dict['layout']
274
637
 
275
- #TODO: add record validation to this function.
276
- def export_to_json_file(self, filename, update_and_validate=True, validate=True, remove_remaining_hints=False):
638
+ #This function validates the output before exporting, and also has an option of removing hints.
639
+ #The update_and_validate function will clean for plotly.
640
+ #simulate all series will simulate any series as needed.
641
+ def export_to_json_file(self, filename, update_and_validate=True, validate=True, simulate_all_series = True, remove_simulate_fields= False, remove_remaining_hints=False):
277
642
  """
278
643
  writes the json to a file
279
644
  returns the json as a dictionary.
645
+ update_and_validate function will clean for plotly. One can alternatively only validate.
646
+ optionally simulates all series that have a simulate field (does so by default)
647
+ optionally removes simulate filed from all series that have a simulate field (does not do so by default)
280
648
  optionally removes hints before export and return.
281
649
  """
650
+ #if simulate_all_series is true, we'll try to simulate any series that need it, then clean the simulate fields out if requested.
651
+ if simulate_all_series == True:
652
+ self.fig_dict = simulate_as_needed_in_fig_dict(self.fig_dict)
653
+ if remove_simulate_fields == True:
654
+ self.fig_dict = clean_json_fig_dict(self.fig_dict, fields_to_update=['simulate'])
282
655
  if remove_remaining_hints == True:
283
656
  self.remove_hints()
284
657
  if update_and_validate == True: #this will do some automatic 'corrections' during the validation.
@@ -291,29 +664,89 @@ class JSONGrapherRecord:
291
664
  # Check if the filename has an extension and append `.json` if not
292
665
  if '.json' not in filename.lower():
293
666
  filename += ".json"
294
- #Write to file.
295
- with open(filename, 'w') as f:
667
+ #Write to file using UTF-8 encoding.
668
+ with open(filename, 'w', encoding='utf-8') as f:
296
669
  json.dump(self.fig_dict, f, indent=4)
297
670
  return self.fig_dict
298
671
 
299
- def get_matplotlib_fig(self, update_and_validate=True):
672
+ #simulate all series will simulate any series as needed.
673
+ def get_plotly_fig(self, simulate_all_series = True, update_and_validate=True):
674
+ import plotly.io as pio
675
+ import copy
676
+ if simulate_all_series == True:
677
+ self.fig_dict = simulate_as_needed_in_fig_dict(self.fig_dict)
678
+ original_fig_dict = copy.deepcopy(self.fig_dict) #we will get a copy, because otherwise the original fig_dict will be forced to be overwritten.
679
+ #if simulate_all_series is true, we'll try to simulate any series that need it, then clean the simulate fields out.
680
+ if update_and_validate == True: #this will do some automatic 'corrections' during the validation.
681
+ self.update_and_validate_JSONGrapher_record()
682
+ self.fig_dict = clean_json_fig_dict(self.fig_dict, fields_to_update=['simulate', 'custom_units_chevrons'])
683
+ fig = pio.from_json(json.dumps(self.fig_dict))
684
+ self.fig_dict = original_fig_dict #restore the original fig_dict.
685
+ return fig
686
+
687
+ #simulate all series will simulate any series as needed.
688
+ def plot_with_plotly(self, simulate_all_series = True, update_and_validate=True):
689
+ fig = self.get_plotly_fig(simulate_all_series = simulate_all_series, update_and_validate=update_and_validate)
690
+ fig.show()
691
+ #No need for fig.close() for plotly figures.
692
+
693
+ #simulate all series will simulate any series as needed.
694
+ def export_to_plotly_png(self, filename, simulate_all_series = True, update_and_validate=True, timeout=10):
695
+ fig = self.get_plotly_fig(simulate_all_series = simulate_all_series, update_and_validate=update_and_validate)
696
+ # Save the figure to a file, but use the timeout version.
697
+ self.export_plotly_image_with_timeout(plotly_fig = fig, filename=filename, timeout=timeout)
698
+
699
+ def export_plotly_image_with_timeout(self, plotly_fig, filename, timeout=10):
700
+ # Ensure filename ends with .png
701
+ if not filename.lower().endswith(".png"):
702
+ filename += ".png"
703
+ import plotly.io as pio
704
+ pio.kaleido.scope.mathjax = None
705
+ fig = plotly_fig
706
+
707
+ def export():
708
+ try:
709
+ fig.write_image(filename, engine="kaleido")
710
+ except Exception as e:
711
+ print(f"Export failed: {e}")
712
+
713
+ import threading
714
+ thread = threading.Thread(target=export, daemon=True) # Daemon ensures cleanup
715
+ thread.start()
716
+ thread.join(timeout=timeout) # Wait up to 10 seconds
717
+ if thread.is_alive():
718
+ print("Skipping Plotly png export: Operation timed out. Plotly image export often does not work from Python. Consider using export_to_matplotlib_png.")
719
+
720
+ #update_and_validate will 'clean' for plotly.
721
+ #In the case of creating a matplotlib figure, this really just means removing excess fields.
722
+ #simulate all series will simulate any series as needed.
723
+ def get_matplotlib_fig(self, simulate_all_series = True, update_and_validate=True):
724
+ import copy
725
+ #if simulate_all_series is true, we'll try to simulate any series that need it, then clean the simulate fields out.
726
+ if simulate_all_series == True:
727
+ self.fig_dict = simulate_as_needed_in_fig_dict(self.fig_dict)
728
+ original_fig_dict = copy.deepcopy(self.fig_dict) #we will get a copy, because otherwise the original fig_dict will be forced to be overwritten.
300
729
  if update_and_validate == True: #this will do some automatic 'corrections' during the validation.
301
730
  self.update_and_validate_JSONGrapher_record()
731
+ self.fig_dict = clean_json_fig_dict(self.fig_dict, fields_to_update=['simulate', 'custom_units_chevrons'])
302
732
  fig = convert_JSONGrapher_dict_to_matplotlib_fig(self.fig_dict)
733
+ self.fig_dict = original_fig_dict #restore the original fig_dict.
303
734
  return fig
304
735
 
305
- def plot_with_matplotlib(self, update_and_validate=True):
736
+ #simulate all series will simulate any series as needed.
737
+ def plot_with_matplotlib(self, simulate_all_series = True, update_and_validate=True):
306
738
  import matplotlib.pyplot as plt
307
- fig = self.get_matplotlib_fig(update_and_validate=update_and_validate)
739
+ fig = self.get_matplotlib_fig(simulate_all_series = simulate_all_series, update_and_validate=update_and_validate)
308
740
  plt.show()
309
741
  plt.close(fig) #remove fig from memory.
310
742
 
311
- def export_to_matplotlib_png(self, filename, update_and_validate=True):
743
+ #simulate all series will simulate any series as needed.
744
+ def export_to_matplotlib_png(self, filename, simulate_all_series = True, update_and_validate=True):
312
745
  import matplotlib.pyplot as plt
313
746
  # Ensure filename ends with .png
314
747
  if not filename.lower().endswith(".png"):
315
748
  filename += ".png"
316
- fig = self.get_matplotlib_fig(update_and_validate=update_and_validate)
749
+ fig = self.get_matplotlib_fig(simulate_all_series = simulate_all_series, update_and_validate=update_and_validate)
317
750
  # Save the figure to a file
318
751
  fig.savefig(filename)
319
752
  plt.close(fig) #remove fig from memory.
@@ -379,11 +812,14 @@ class JSONGrapherRecord:
379
812
  current_field[current_path_key] = ""
380
813
 
381
814
  #Make some pointers to external functions, for convenience, so people can use syntax like record.function_name() if desired.
815
+ def apply_style(self, style_name):
816
+ self.fig_dict = apply_style_to_plotly_dict(self.fig_dict, style_name=style_name)
382
817
  def validate_JSONGrapher_record(self):
383
818
  validate_JSONGrapher_record(self)
384
819
  def update_and_validate_JSONGrapher_record(self):
385
820
  update_and_validate_JSONGrapher_record(self)
386
821
 
822
+
387
823
  # helper function to validate x axis and y axis labels.
388
824
  # label string will be the full label including units. Axis_name is typically "x" or "y"
389
825
  def validate_JSONGrapher_axis_label(label_string, axis_name="", remove_plural_units=True):
@@ -399,7 +835,6 @@ def validate_JSONGrapher_axis_label(label_string, axis_name="", remove_plural_un
399
835
  None: Prints warnings if any validation issues are found.
400
836
  """
401
837
  warnings_list = []
402
-
403
838
  #First check if the label is empty.
404
839
  if label_string == '':
405
840
  warnings_list.append(f"Your {axis_name} axis label is an empty string. JSONGrapher records should not have empty strings for axis labels.")
@@ -442,15 +877,18 @@ def units_plural_removal(units_to_check):
442
877
  - "changed" (Boolean): True, or False, where True means the string was changed to remove an "s" at the end.
443
878
  - "singularized" (string): The units parsed to be singular, if needed.
444
879
  """
445
- #first check if we have the module we need. If not, return with no change.
446
-
880
+ #Check if we have the module we need. If not, return with no change.
447
881
  try:
448
882
  import JSONGrapher.units_list as units_list
449
883
  except:
450
- units_changed_flag = False
451
- return units_changed_flag, units_to_check #return None if there was no test.
452
- #First try to check if units are blank or ends with "s" is in the units list.
884
+ #if JSONGrapher is not present, try getting the units_list file locally.
885
+ try:
886
+ import units_list
887
+ except:#if still not present, give up and avoid crashing.
888
+ units_changed_flag = False
889
+ return units_changed_flag, units_to_check #return None if there was no test.
453
890
 
891
+ #First try to check if units are blank or ends with "s" is in the units list.
454
892
  if (units_to_check == "") or (units_to_check[-1] != "s"):
455
893
  units_changed_flag = False
456
894
  units_singularized = units_to_check #return if string is blank or does not end with s.
@@ -472,6 +910,7 @@ def units_plural_removal(units_to_check):
472
910
  def separate_label_text_from_units(label_with_units):
473
911
  """
474
912
  Parses a label with text string and units in parentheses after that to return the two parts.
913
+ This is not meant to separate strings like "Time (s)", it is not meant for strings like "5 (kg)"
475
914
 
476
915
  Args:
477
916
  value (str): A string containing a label and optional units enclosed in parentheses.
@@ -531,7 +970,8 @@ def validate_plotly_data_list(data):
531
970
  if not isinstance(trace, dict):
532
971
  warnings_list.append(f"Trace {i} is not a dictionary.")
533
972
  continue
534
-
973
+ if "comments" in trace:
974
+ warnings_list.append(f"Trace {i} has a comments field within the data. This is allowed by JSONGrapher, but is discouraged by plotly. By default, this will be removed when you export your record.")
535
975
  # Determine the type based on the fields provided
536
976
  trace_type = trace.get("type")
537
977
  if not trace_type:
@@ -567,6 +1007,7 @@ def parse_units(value):
567
1007
  """
568
1008
  Parses a numerical value and its associated units from a string. This meant for scientific constants and parameters
569
1009
  Such as rate constants, gravitational constant, or simiilar.
1010
+ This function is not meant for separating the axis label from its units. For that, use separate_label_text_from_units
570
1011
 
571
1012
  Args:
572
1013
  value (str): A string containing a numeric value and optional units enclosed in parentheses.
@@ -580,7 +1021,7 @@ def parse_units(value):
580
1021
  # Find the position of the first '(' and the last ')'
581
1022
  start = value.find('(')
582
1023
  end = value.rfind(')')
583
-
1024
+ print("line 727", value)
584
1025
  # Ensure both are found and properly ordered
585
1026
  if start != -1 and end != -1 and end > start:
586
1027
  number_part = value[:start].strip() # Everything before '('
@@ -597,6 +1038,36 @@ def parse_units(value):
597
1038
 
598
1039
  return parsed_output
599
1040
 
1041
+
1042
+ #This function sets the plot_type of a data_series_dict
1043
+ #based on some JSONGrapher options.
1044
+ #It calls "plot_type_to_field_values"
1045
+ #and then updates the data_series_dict accordingly, as needed.
1046
+ def set_data_series_dict_plot_type(data_series_dict, plot_type=""):
1047
+ if plot_type == "":
1048
+ plot_type = data_series_dict.get('type', 'scatter') #get will return the second argument if the first argument is not present.
1049
+ #We need to be careful about one case: in plotly, a "spline" is declared a scatter plot with data.line.shape = spline.
1050
+ #So we need to check if we have spline set, in which case we make the plot_type scatter_spline when calling plot_type_to_field_values.
1051
+ shape_field = data_series_dict.get('line', {}).get('shape', '') #get will return first argument if there, second if not, so can chain things.
1052
+ #TODO: need to distinguish between "spline" and "scatter_spline" by checking for marker instructions.
1053
+ if shape_field == 'spline':
1054
+ plot_type = 'scatter_spline'
1055
+ if shape_field == 'linear':
1056
+ plot_type = 'scatter_line'
1057
+ fields_dict = plot_type_to_field_values(plot_type)
1058
+
1059
+
1060
+ #update the data_series_dict.
1061
+ if fields_dict.get("mode_field"):
1062
+ data_series_dict["mode"] = fields_dict["mode_field"]
1063
+ if fields_dict.get("type_field"):
1064
+ data_series_dict["type"] = fields_dict["type_field"]
1065
+ if fields_dict.get("line_shape_field") != "":
1066
+ data_series_dict.setdefault("line", {"shape": ''}) # Creates the field if it does not already exist.
1067
+ data_series_dict["line"]["shape"] = fields_dict["line_shape_field"]
1068
+ return data_series_dict
1069
+
1070
+ #This function creates a fields_dict for the function set_data_series_dict_plot_type
600
1071
  def plot_type_to_field_values(plot_type):
601
1072
  """
602
1073
  Takes in a string that is a plot type, such as "scatter", "scatter_spline", etc.
@@ -608,32 +1079,38 @@ def plot_type_to_field_values(plot_type):
608
1079
  To these fields are used in the function set_plot_type_one_data_series
609
1080
 
610
1081
  """
611
-
612
1082
  fields_dict = {}
613
1083
  #initialize some variables.
614
- fields_dict["type_field"] = plot_type
1084
+ fields_dict["type_field"] = plot_type.lower()
615
1085
  fields_dict["mode_field"] = None
616
1086
  fields_dict["line_shape_field"] = None
617
1087
  # Assign the various types. This list of values was determined 'manually'.
618
- if plot_type == "scatter":
1088
+ if plot_type.lower() == ("scatter" or "markers"):
619
1089
  fields_dict["type_field"] = "scatter"
620
1090
  fields_dict["mode_field"] = "markers"
621
1091
  fields_dict["line_shape_field"] = None
622
- elif plot_type == "scatter_spline":
1092
+ elif plot_type.lower() == "scatter_spline":
623
1093
  fields_dict["type_field"] = "scatter"
624
1094
  fields_dict["mode_field"] = None
625
1095
  fields_dict["line_shape_field"] = "spline"
626
- elif plot_type == "spline":
627
- fields_dict["type_field"] = None
1096
+ elif plot_type.lower() == "spline":
1097
+ fields_dict["type_field"] = 'scatter'
628
1098
  fields_dict["mode_field"] = 'lines'
629
1099
  fields_dict["line_shape_field"] = "spline"
1100
+ elif plot_type.lower() == "scatter_line":
1101
+ fields_dict["type_field"] = 'scatter'
1102
+ fields_dict["mode_field"] = 'lines'
1103
+ fields_dict["line_shape_field"] = "linear"
630
1104
  return fields_dict
631
1105
 
632
1106
  #This function does updating of internal things before validating
633
1107
  #This is used before printing and returning the JSON record.
634
- def update_and_validate_JSONGrapher_record(record):
1108
+ def update_and_validate_JSONGrapher_record(record, clean_for_plotly=True):
635
1109
  record.update_plot_types()
636
1110
  record.validate_JSONGrapher_record()
1111
+ if clean_for_plotly == True:
1112
+ record.fig_dict = clean_json_fig_dict(record.fig_dict)
1113
+ return record
637
1114
 
638
1115
  #TODO: add the ability for this function to check against the schema.
639
1116
  def validate_JSONGrapher_record(record):
@@ -684,8 +1161,11 @@ def validate_JSONGrapher_record(record):
684
1161
  # Validate "title"
685
1162
  if "title" not in layout:
686
1163
  warnings_list.append("Missing 'layout.title' field.")
687
- elif not isinstance(layout["title"], str):
688
- warnings_list.append("'layout.title' should be a string.")
1164
+ # Validate "title.text"
1165
+ elif "text" not in layout["title"]:
1166
+ warnings_list.append("Missing 'layout.title.text' field.")
1167
+ elif not isinstance(layout["title"]["text"], str):
1168
+ warnings_list.append("'layout.title.text' should be a string.")
689
1169
 
690
1170
  # Validate "xaxis"
691
1171
  if "xaxis" not in layout:
@@ -696,8 +1176,10 @@ def validate_JSONGrapher_record(record):
696
1176
  # Validate "xaxis.title"
697
1177
  if "title" not in layout["xaxis"]:
698
1178
  warnings_list.append("Missing 'layout.xaxis.title' field.")
699
- elif not isinstance(layout["xaxis"]["title"], str):
700
- warnings_list.append("'layout.xaxis.title' should be a string.")
1179
+ elif "text" not in layout["xaxis"]["title"]:
1180
+ warnings_list.append("Missing 'layout.xaxis.title.text' field.")
1181
+ elif not isinstance(layout["xaxis"]["title"]["text"], str):
1182
+ warnings_list.append("'layout.xaxis.title.text' should be a string.")
701
1183
 
702
1184
  # Validate "yaxis"
703
1185
  if "yaxis" not in layout:
@@ -708,8 +1190,10 @@ def validate_JSONGrapher_record(record):
708
1190
  # Validate "yaxis.title"
709
1191
  if "title" not in layout["yaxis"]:
710
1192
  warnings_list.append("Missing 'layout.yaxis.title' field.")
711
- elif not isinstance(layout["yaxis"]["title"], str):
712
- warnings_list.append("'layout.yaxis.title' should be a string.")
1193
+ elif "text" not in layout["yaxis"]["title"]:
1194
+ warnings_list.append("Missing 'layout.yaxis.title.text' field.")
1195
+ elif not isinstance(layout["yaxis"]["title"]["text"], str):
1196
+ warnings_list.append("'layout.yaxis.title.text' should be a string.")
713
1197
 
714
1198
  # Return validation result
715
1199
  if warnings_list:
@@ -798,14 +1282,14 @@ def convert_JSONGrapher_dict_to_matplotlib_fig(fig_dict):
798
1282
  # Extract layout details
799
1283
  layout = fig_dict.get("layout", {})
800
1284
  title = layout.get("title", {})
801
- if isinstance(title, dict):
1285
+ if isinstance(title, dict): #This if statements block is rather not human readable. Perhaps should be changed later.
802
1286
  ax.set_title(title.get("text", "Converted Plotly Figure"))
803
1287
  else:
804
1288
  ax.set_title(title if isinstance(title, str) else "Converted Plotly Figure")
805
1289
 
806
1290
  xaxis = layout.get("xaxis", {})
807
1291
  xlabel = "X-Axis" # Default label
808
- if isinstance(xaxis, dict):
1292
+ if isinstance(xaxis, dict): #This if statements block is rather not human readable. Perhaps should be changed later.
809
1293
  title_obj = xaxis.get("title", {})
810
1294
  xlabel = title_obj.get("text", "X-Axis") if isinstance(title_obj, dict) else title_obj
811
1295
  elif isinstance(xaxis, str):
@@ -813,7 +1297,7 @@ def convert_JSONGrapher_dict_to_matplotlib_fig(fig_dict):
813
1297
  ax.set_xlabel(xlabel)
814
1298
  yaxis = layout.get("yaxis", {})
815
1299
  ylabel = "Y-Axis" # Default label
816
- if isinstance(yaxis, dict):
1300
+ if isinstance(yaxis, dict): #This if statements block is rather not human readable. Perhaps should be changed later.
817
1301
  title_obj = yaxis.get("title", {})
818
1302
  ylabel = title_obj.get("text", "Y-Axis") if isinstance(title_obj, dict) else title_obj
819
1303
  elif isinstance(yaxis, str):
@@ -821,6 +1305,10 @@ def convert_JSONGrapher_dict_to_matplotlib_fig(fig_dict):
821
1305
  ax.set_ylabel(ylabel)
822
1306
  ax.legend()
823
1307
  return fig
1308
+
1309
+
1310
+
1311
+
824
1312
 
825
1313
  #The below function works, but because it depends on the python plotly package, we avoid using it
826
1314
  #To decrease the number of dependencies.
@@ -869,26 +1357,351 @@ def convert_plotly_dict_to_matplotlib(fig_dict):
869
1357
  ax.set_ylabel(plotly_fig.layout.yaxis.title.text if plotly_fig.layout.yaxis.title else "Y-Axis")
870
1358
 
871
1359
  return fig
1360
+
1361
+
1362
+ def apply_style_to_plotly_dict(plotly_json, style_name):
1363
+ """
1364
+ Apply a predefined style to a Plotly JSON object based on a style name which may be a journal name.
1365
+
1366
+ :param plotly_json: dict, Plotly JSON object.
1367
+ :param style_name: str, Name of the style or journal.
1368
+ :return: dict, Updated Plotly JSON object.
1369
+ """
1370
+ styles_available = {
1371
+ "Nature": {
1372
+ "layout": {
1373
+ "title": {"font": {"size": 24, "family": "Times New Roman", "color": "black"}},
1374
+ "font": {"size": 18, "family": "Times New Roman"},
1375
+ "paper_bgcolor": "white",
1376
+ "plot_bgcolor": "white",
1377
+ }
1378
+ },
1379
+ "Science": {
1380
+ "layout": {
1381
+ "title": {"font": {"size": 22, "family": "Arial", "color": "black"}},
1382
+ "font": {"size": 16, "family": "Arial"},
1383
+ "paper_bgcolor": "white",
1384
+ "plot_bgcolor": "white",
1385
+ }
1386
+ }
1387
+ }
1388
+
1389
+ # Get the style for the specified journal, default to no change if not found
1390
+ style_dict = styles_available.get(style_name, {})
1391
+
1392
+ # Ensure title field is merged properly to avoid overwriting
1393
+ plotly_json.setdefault("layout", {})
1394
+ plotly_json["layout"].setdefault("title", {})
1395
+
1396
+ # Merge title settings separately to preserve existing text
1397
+ plotly_json["layout"]["title"] = {**plotly_json["layout"]["title"], **style_dict.get("layout", {}).get("title", {})}
1398
+
1399
+ # Merge other layout settings
1400
+ for key, value in style_dict.get("layout", {}).items():
1401
+ if key != "title": # Skip title since it was handled separately
1402
+ plotly_json["layout"][key] = value
1403
+
1404
+ return plotly_json
1405
+
1406
+
1407
+ ### Start section of code with functions for cleaning fig_dicts for plotly compatibility ###
1408
+
1409
+ def update_title_field(data, depth=1, max_depth=10):
1410
+ """ This function is intended to make JSONGrapher .json files compatible with the newer plotly recommended title field formatting
1411
+ which is necessary to do things like change the font, and also necessary for being able to convert a JSONGrapher json_dict to python plotly figure objects. """
1412
+ """ Recursively checks for 'title' fields and converts them to dictionary format. """
1413
+ if depth > max_depth or not isinstance(data, dict):
1414
+ return data
1415
+
1416
+ for key, value in data.items():
1417
+ if key == "title" and isinstance(value, str):
1418
+ data[key] = {"text": value}
1419
+ elif isinstance(value, dict): # Nested dictionary
1420
+ data[key] = update_title_field(value, depth + 1, max_depth)
1421
+ elif isinstance(value, list): # Lists can contain nested dictionaries
1422
+ data[key] = [update_title_field(item, depth + 1, max_depth) if isinstance(item, dict) else item for item in value]
1423
+
1424
+ return data
1425
+
1426
+ def remove_extra_information_field(data, depth=1, max_depth=10):
1427
+ """ This function is intended to make JSONGrapher .json files compatible with the current plotly format expectations
1428
+ and also necessary for being able to convert a JSONGRapher json_dict to python plotly figure objects. """
1429
+ """Recursively checks for 'extraInformation' fields and removes them."""
1430
+ if depth > max_depth or not isinstance(data, dict):
1431
+ return data
1432
+
1433
+ # Use a copy of the dictionary keys to safely modify the dictionary during iteration
1434
+ for key in list(data.keys()):
1435
+ if key == ("extraInformation" or "extra_information"):
1436
+ del data[key] # Remove the field
1437
+ elif isinstance(data[key], dict): # Nested dictionary
1438
+ data[key] = remove_extra_information_field(data[key], depth + 1, max_depth)
1439
+ elif isinstance(data[key], list): # Lists can contain nested dictionaries
1440
+ data[key] = [
1441
+ remove_extra_information_field(item, depth + 1, max_depth) if isinstance(item, dict) else item for item in data[key]
1442
+ ]
1443
+
1444
+ return data
1445
+
1446
+
1447
+ def remove_nested_comments(data, top_level=True):
1448
+ """ This function is intended to make JSONGrapher .json files compatible with the current plotly format expectations
1449
+ and also necessary for being able to convert a JSONGRapher json_dict to python plotly figure objects. """
1450
+ """Removes 'comments' fields that are not at the top level of the JSON-dict. Starts with 'top_level = True' when dict is first passed in then becomes false after that. """
1451
+ if not isinstance(data, dict):
1452
+ return data
1453
+ # Process nested structures
1454
+ for key in list(data.keys()):
1455
+ if isinstance(data[key], dict): # Nested dictionary
1456
+ data[key] = remove_nested_comments(data[key], top_level=False)
1457
+ elif isinstance(data[key], list): # Lists can contain nested dictionaries
1458
+ data[key] = [
1459
+ remove_nested_comments(item, top_level=False) if isinstance(item, dict) else item for item in data[key]
1460
+ ]
1461
+ # Only remove 'comments' if not at the top level
1462
+ if not top_level:
1463
+ data = {k: v for k, v in data.items() if k != "comments"}
1464
+ return data
1465
+
1466
+ def remove_simulate_field(json_fig_dict):
1467
+ data_dicts_list = json_fig_dict['data']
1468
+ for data_dict in data_dicts_list:
1469
+ data_dict.pop('simulate', None) #Some people recommend using pop over if/del as safer. Both ways should work under normal circumstances.
1470
+ json_fig_dict['data'] = data_dicts_list #this line shouldn't be necessary, but including it for clarity and carefulness.
1471
+ return json_fig_dict
1472
+
1473
+ def remove_custom_units_chevrons(json_fig_dict):
1474
+ json_fig_dict['layout']['xaxis']['title']['text'] = json_fig_dict['layout']['xaxis']['title']['text'].replace('<','').replace('>','')
1475
+ json_fig_dict['layout']['yaxis']['title']['text'] = json_fig_dict['layout']['yaxis']['title']['text'].replace('<','').replace('>','')
1476
+ return json_fig_dict
1477
+
1478
+
1479
+ def clean_json_fig_dict(json_fig_dict, fields_to_update=["title_field", "extraInformation", "nested_comments"]):
1480
+ """ This function is intended to make JSONGrapher .json files compatible with the current plotly format expectations
1481
+ and also necessary for being able to convert a JSONGRapher json_dict to python plotly figure objects.
1482
+ This function can also remove the 'simulate' field from data series. However, that is not the default behavior
1483
+ because one would not want to do that by mistake before simulation is performed.
1484
+ """
1485
+ fig_dict = json_fig_dict
1486
+ #unmodified_data = copy.deepcopy(data)
1487
+ if "title_field" in fields_to_update:
1488
+ fig_dict = update_title_field(fig_dict)
1489
+ if "extraInformation" in fields_to_update:
1490
+ fig_dict = remove_extra_information_field(fig_dict)
1491
+ if "nested_comments" in fields_to_update:
1492
+ fig_dict = remove_nested_comments(fig_dict)
1493
+ if "simulate" in fields_to_update:
1494
+ fig_dict = remove_simulate_field(fig_dict)
1495
+ if "custom_units_chevrons" in fields_to_update:
1496
+ fig_dict = remove_custom_units_chevrons(fig_dict)
1497
+
1498
+ return fig_dict
1499
+
1500
+ ### End section of code with functions for cleaning fig_dicts for plotly compatibility ###
1501
+
1502
+ ### Beginning of section of file that has functions for calling external javascript simulators ###
1503
+
1504
+ def run_js_simulation(javascript_simulator_url, simulator_input_json_dict, verbose = False):
1505
+ """
1506
+ Downloads a JavaScript file using its URL, extracts the filename, appends an export statement,
1507
+ executes it with Node.js, and parses the output.
1508
+
1509
+ Parameters:
1510
+ javascript_simulator_url (str): URL of the raw JavaScript file to download and execute. Must have a function named simulate.
1511
+ simulator_input_json_dict (dict): Input parameters for the JavaScript simulator.
1512
+
1513
+ # Example inputs
1514
+ javascript_simulator_url = "https://github.com/AdityaSavara/JSONGrapherExamples/blob/main/ExampleSimulators/Langmuir_Isotherm.js"
1515
+ simulator_input_json_dict = {
1516
+ "simulate": {
1517
+ "K_eq": None,
1518
+ "sigma_max": "1.0267670459667 (mol/kg)",
1519
+ "k_ads": "200 (1/(bar * s))",
1520
+ "k_des": "100 (1/s)"
1521
+ }
1522
+ }
1523
+
1524
+
1525
+ Returns:
1526
+ dict: Parsed JSON output from the JavaScript simulation, or None if an error occurred.
1527
+ """
1528
+ import requests
1529
+ import subprocess
1530
+ import json
1531
+ import os
1532
+
1533
+ # Convert to raw GitHub URL only if "raw" is not in the original URL
1534
+ # For example, the first link below gets converted to the second one.
1535
+ # https://github.com/AdityaSavara/JSONGrapherExamples/blob/main/ExampleSimulators/Langmuir_Isotherm.js
1536
+ # https://raw.githubusercontent.com/AdityaSavara/JSONGrapherExamples/main/ExampleSimulators/Langmuir_Isotherm.js
1537
+
1538
+ if "raw" not in javascript_simulator_url:
1539
+ javascript_simulator_url = convert_to_raw_github_url(javascript_simulator_url)
1540
+
1541
+ # Extract filename from URL
1542
+ js_filename = os.path.basename(javascript_simulator_url)
1543
+
1544
+ # Download the JavaScript file
1545
+ response = requests.get(javascript_simulator_url)
1546
+
1547
+ if response.status_code == 200:
1548
+ with open(js_filename, "w") as file:
1549
+ file.write(response.text)
1550
+
1551
+ # Append the export statement to the JavaScript file
1552
+ with open(js_filename, "a") as file:
1553
+ file.write("\nmodule.exports = { simulate };")
1554
+
1555
+ # Convert input dictionary to a JSON string
1556
+ input_json_str = json.dumps(simulator_input_json_dict)
1557
+
1558
+ # Prepare JavaScript command for execution
1559
+ js_command = f"""
1560
+ const simulator = require('./{js_filename}');
1561
+ console.log(JSON.stringify(simulator.simulate({input_json_str})));
1562
+ """
1563
+
1564
+ result = subprocess.run(["node", "-e", js_command], capture_output=True, text=True)
1565
+
1566
+ # Print output and errors if verbose
1567
+ if verbose:
1568
+ print("Raw JavaScript Output:", result.stdout)
1569
+ print("Node.js Errors:", result.stderr)
1570
+
1571
+ # Parse JSON if valid
1572
+ if result.stdout.strip():
1573
+ try:
1574
+ data_dict_with_simulation = json.loads(result.stdout) #This is the normal case.
1575
+ return data_dict_with_simulation
1576
+ except json.JSONDecodeError:
1577
+ print("Error: JavaScript output is not valid JSON.")
1578
+ return None
1579
+ else:
1580
+ print(f"Error: Unable to fetch JavaScript file. Status code {response.status_code}")
1581
+ return None
1582
+
1583
+ def convert_to_raw_github_url(url):
1584
+ """
1585
+ Converts a GitHub file URL to its raw content URL if necessary, preserving the filename.
1586
+ This function is really a support function for run_js_simulation
1587
+ """
1588
+ from urllib.parse import urlparse
1589
+ parsed_url = urlparse(url)
1590
+
1591
+ # If the URL is already a raw GitHub link, return it unchanged
1592
+ if "raw.githubusercontent.com" in parsed_url.netloc:
1593
+ return url
1594
+
1595
+ path_parts = parsed_url.path.strip("/").split("/")
1596
+
1597
+ # Ensure it's a valid GitHub file URL
1598
+ if "github.com" in parsed_url.netloc and len(path_parts) >= 4:
1599
+ if path_parts[2] == "blob":
1600
+ # If the URL contains "blob", adjust extraction
1601
+ user, repo, branch = path_parts[:2] + [path_parts[3]]
1602
+ file_path = "/".join(path_parts[4:]) # Keep full file path including filename
1603
+ else:
1604
+ # Standard GitHub file URL (without "blob")
1605
+ user, repo, branch = path_parts[:3]
1606
+ file_path = "/".join(path_parts[3:]) # Keep full file path including filename
1607
+
1608
+ return f"https://raw.githubusercontent.com/{user}/{repo}/{branch}/{file_path}"
1609
+
1610
+ return url # Return unchanged if not a GitHub file URL
1611
+
1612
+ #This function takes in a data_series_dict object and then
1613
+ #calls an external javascript simulation if needed
1614
+ #Then fills the data_series dict with the simulated data.
1615
+ def simulate_data_series(data_series_dict, simulator_link='', verbose=False):
1616
+ if simulator_link == '':
1617
+ simulator_link = data_series_dict["simulate"]["model"]
1618
+ #need to provide the link and the data_dict
1619
+ simulation_return = run_js_simulation(simulator_link, data_series_dict, verbose = verbose)
1620
+ data_series_dict_filled = simulation_return["data"]
1621
+ return data_series_dict_filled
1622
+
1623
+ #Function that goes through a fig_dict data series and simulates each data series as needed.
1624
+ #could probably change this into a loop that calls simulate_specific_data_series_by_index
1625
+ #If the simulated data returned has "x_label" and/or "y_label" with units, those will be used to scale the data, then will be removed.
1626
+ def simulate_as_needed_in_fig_dict(fig_dict, simulator_link='', verbose=False):
1627
+ data_dicts_list = fig_dict['data']
1628
+ for data_dict_index, data_dict in enumerate(data_dicts_list):
1629
+ if 'simulate' in data_dict:
1630
+ data_dict_filled = simulate_data_series(data_dict, simulator_link=simulator_link, verbose=verbose)
1631
+ #data_dict_filled may include "x_label" and/or "y_label". If it does, we'll need to check about scaling units.
1632
+ if (("x_label" in data_dict_filled) or ("y_label" in data_dict_filled)):
1633
+ #first, get the units that are in the layout of fig_dict so we know what to convert to.
1634
+ existing_record_x_label = fig_dict["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
1635
+ existing_record_y_label = fig_dict["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
1636
+ existing_record_x_units = separate_label_text_from_units(existing_record_x_label)["units"]
1637
+ existing_record_y_units = separate_label_text_from_units(existing_record_y_label)["units"]
1638
+ #now, get the units from the simulation output.
1639
+ simulated_data_series_x_units = separate_label_text_from_units(data_dict_filled['x_label'])["units"]
1640
+ simulated_data_series_y_units = separate_label_text_from_units(data_dict_filled['y_label'])["units"]
1641
+ x_units_ratio = get_units_scaling_ratio(simulated_data_series_x_units, existing_record_x_units)
1642
+ y_units_ratio = get_units_scaling_ratio(simulated_data_series_y_units, existing_record_y_units)
1643
+ #We scale the dataseries, which really should be a function.
1644
+ scale_dataseries_dict(data_dict_filled, num_to_scale_x_values_by = x_units_ratio, num_to_scale_y_values_by = y_units_ratio)
1645
+ #Now need to remove the "x_label" and "y_label" to be compatible with plotly.
1646
+ data_dict_filled.pop("x_label", None)
1647
+ data_dict_filled.pop("y_label", None)
1648
+ data_dicts_list[data_dict_index] = data_dict_filled
1649
+ fig_dict['data'] = data_dicts_list
1650
+ return fig_dict
1651
+
1652
+ #Function that takes fig_dict and dataseries index and simulates if needed.
1653
+ def simulate_specific_data_series_by_index(fig_dict, data_series_index, simulator_link='', verbose=False):
1654
+ data_dicts_list = fig_dict['data']
1655
+ data_dict_index = data_series_index
1656
+ data_dict = data_dicts_list[data_dict_index]
1657
+ if 'simulate' in data_dict:
1658
+ data_dict_filled = simulate_data_series(data_dict, simulator_link=simulator_link, verbose=verbose)
1659
+ data_dicts_list[data_dict_index] = data_dict_filled
1660
+ fig_dict['data'] = data_dicts_list
1661
+ return fig_dict
1662
+
1663
+ ### End of section of file that has functions for calling external javascript simulators ###
872
1664
 
873
1665
  # Example Usage
874
1666
  if __name__ == "__main__":
875
1667
  # Example of creating a record with optional attributes.
876
- record = JSONGrapherRecord(
1668
+ Record = JSONGrapherRecord(
877
1669
  comments="Here is a description.",
878
- graph_title="Graph Title",
1670
+ graph_title="Here Is The Graph Title Spot",
879
1671
  data_objects_list=[
880
- {"comments": "Initial data series.", "uid": "123", "line": {"shape": "solid"}, "name": "Series A", "type": "line", "x": [1, 2, 3], "y": [4, 5, 6]}
1672
+ {"comments": "Initial data series.", "uid": "123", "name": "Series A", "type": "spline", "x": [1, 2, 3], "y": [4, 5, 8]}
881
1673
  ],
882
1674
  )
1675
+ x_label_including_units= "Time (years)"
1676
+ y_label_including_units = "Height (m)"
1677
+ Record.set_comments("Tree Growth Data collected from the US National Arboretum")
1678
+ Record.set_datatype("Tree_Growth_Curve")
1679
+ Record.set_x_axis_label_including_units(x_label_including_units)
1680
+ Record.set_y_axis_label_including_units(y_label_including_units)
1681
+
1682
+
1683
+ Record.export_to_json_file("test.json")
1684
+
1685
+ print(Record)
883
1686
 
884
1687
  # Example of creating a record from an existing dictionary.
885
1688
  existing_JSONGrapher_record = {
886
1689
  "comments": "Existing record description.",
887
1690
  "graph_title": "Existing Graph",
888
1691
  "data": [
889
- {"comments": "Data series 1", "uid": "123", "line": {"shape": "solid"}, "name": "Series A", "type": "line", "x": [1, 2, 3], "y": [4, 5, 6]}
1692
+ {"comments": "Data series 1", "uid": "123", "name": "Series A", "type": "spline", "x": [1, 2, 3], "y": [4, 5, 8]}
890
1693
  ],
891
1694
  }
892
- record_from_existing = JSONGrapherRecord(existing_JSONGrapher_record=existing_JSONGrapher_record)
893
- record.export_to_json_file("test.json")
894
- print(record)
1695
+ Record_from_existing = JSONGrapherRecord(existing_JSONGrapher_record=existing_JSONGrapher_record)
1696
+ x_label_including_units= "Time (years)"
1697
+ y_label_including_units = "Height (cm)"
1698
+ Record_from_existing.set_comments("Tree Growth Data collected from the US National Arboretum")
1699
+ Record_from_existing.set_datatype("Tree_Growth_Curve")
1700
+ Record_from_existing.set_x_axis_label_including_units(x_label_including_units)
1701
+ Record_from_existing.set_y_axis_label_including_units(y_label_including_units)
1702
+ print(Record_from_existing)
1703
+
1704
+ print("NOW WILL MERGE THE RECORDS, AND USE THE SECOND ONE TWICE (AS A JSONGRAPHER OBJECT THEN JUST THE FIG_DICT)")
1705
+ print(merge_JSONGrapherRecords([Record, Record_from_existing, Record_from_existing.fig_dict]))
1706
+
1707
+