jsongrapher 1.6__py3-none-any.whl → 3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,71 @@
1
1
  import json
2
+ import JSONGrapher.styles.layout_styles_library
3
+ import JSONGrapher.styles.trace_styles_collection_library
2
4
  #TODO: put an option to suppress warnings from JSONRecordCreator
3
5
 
4
6
 
7
+ #Start of the portion of the code for the GUI##
8
+ global_records_list = [] #This list holds onto records as they are added. Index 0 is the merged record. Each other index corresponds to record number (like 1 is first record, 2 is second record, etc)
9
+
10
+
11
+ #This is a JSONGrapher specific function
12
+ #That takes filenames and adds new JSONGrapher records to a global_records_list
13
+ #If the all_selected_file_paths and newest_file_name_and_path are [] and [], that means to clear the global_records_list.
14
+ def add_records_to_global_records_list_and_plot(all_selected_file_paths, newly_added_file_paths, plot_immediately=True):
15
+ #First check if we have received a "clear" condition.
16
+ if (len(all_selected_file_paths) == 0) and (len(newly_added_file_paths) == 0):
17
+ global_records_list.clear()
18
+ return global_records_list
19
+ if len(global_records_list) == 0: #this is for the "first time" the function is called, but the newly_added_file_paths could be a list longer than one.
20
+ first_record = create_new_JSONGrapherRecord()
21
+ first_record.import_from_file(newly_added_file_paths[0]) #get first newly added record record.
22
+ #index 0 will be the one we merge into.
23
+ global_records_list.append(first_record)
24
+ #index 1 will be where we store the first record, so we append again.
25
+ global_records_list.append(first_record)
26
+ #Now, check if there are more records.
27
+ if len(newly_added_file_paths) > 1:
28
+ for filename_and_path_index, filename_and_path in enumerate(newly_added_file_paths):
29
+ if filename_and_path_index == 0:
30
+ pass #passing because we've already added first file.
31
+ else:
32
+ current_record = create_new_JSONGrapherRecord() #make a new record
33
+ current_record.import_from_file(filename_and_path)
34
+ global_records_list.append(current_record) #append it to global records list
35
+ global_records_list[0] = merge_JSONGrapherRecords([global_records_list[0], current_record]) #merge into the main record of records list, which is at index 0.
36
+ else: #For case that global_records_list already exists when funciton is called.
37
+ for filename_and_path_index, filename_and_path in enumerate(newly_added_file_paths):
38
+ current_record = create_new_JSONGrapherRecord() #make a new record
39
+ current_record.import_from_file(filename_and_path)
40
+ global_records_list.append(current_record) #append it to global records list
41
+ global_records_list[0] = merge_JSONGrapherRecords([global_records_list[0], current_record]) #merge into the main record of records list, which is at index 0.
42
+ if plot_immediately:
43
+ #plot the index 0, which is the most up to date merged record.
44
+ global_records_list[0].plot_with_plotly()
45
+ json_string_for_download = json.dumps(global_records_list[0].fig_dict, indent=4)
46
+ return [json_string_for_download] #For the GUI, this function should return a list with something convertable to string to save to file, in index 0.
47
+
48
+
49
+
50
+ #This ia JSONGrapher specific wrapper function to drag_and_drop_gui create_and_launch.
51
+ #This launches the python based JSONGrapher GUI.
52
+ def launch():
53
+ try:
54
+ import JSONGrapher.drag_and_drop_gui as drag_and_drop_gui
55
+ except ImportError:
56
+ try:
57
+ import drag_and_drop_gui # Attempt local import
58
+ except ImportError as exc:
59
+ raise ImportError("Module 'drag_and_drop_gui' could not be found locally or in JSONGrapher.") from exc
60
+ _selected_files = drag_and_drop_gui.create_and_launch(app_name = "JSONGrapher", function_for_after_file_addition=add_records_to_global_records_list_and_plot)
61
+ #We will not return the _selected_files, and instead will return the global_records_list.
62
+ return global_records_list
63
+
64
+ ## End of the portion of the code for the GUI##
65
+
66
+
5
67
  #the function create_new_JSONGrapherRecord is intended to be "like" a wrapper function for people who find it more
6
- # intuitive to create class objects that way, this variable is actually just a reference
68
+ # intuitive to create class objects that way, this variable is actually just a reference
7
69
  # so that we don't have to map the arguments.
8
70
  def create_new_JSONGrapherRecord(hints=False):
9
71
  #we will create a new record. While we could populate it with the init,
@@ -13,90 +75,624 @@ def create_new_JSONGrapherRecord(hints=False):
13
75
  new_record.add_hints()
14
76
  return new_record
15
77
 
78
+ #This is actually a wrapper around merge_JSONGrapherRecords. Made for convenience.
79
+ def load_JSONGrapherRecords(recordsList):
80
+ return merge_JSONGrapherRecords(recordsList)
81
+
82
+ #This is actually a wrapper around merge_JSONGrapherRecords. Made for convenience.
83
+ def import_JSONGrapherRecords(recordsList):
84
+ return merge_JSONGrapherRecords(recordsList)
85
+
86
+ #This is a function for merging JSONGrapher records.
87
+ #recordsList is a list of records
88
+ #Each record can be a JSONGrapherRecord object (a python class object) or a dictionary (meaning, a JSONGrapher JSON as a dictionary)
89
+ #If a record is received that is a string, then the function will attempt to convert that into a dictionary.
90
+ #The units used will be that of the first record encountered
91
+ #if changing this function's arguments, then also change those for load_JSONGrapherRecords and import_JSONGrapherRecords
92
+ def merge_JSONGrapherRecords(recordsList):
93
+ if type(recordsList) == type(""):
94
+ recordsList = [recordsList]
95
+ import copy
96
+ recordsAsDictionariesList = []
97
+ merged_JSONGrapherRecord = create_new_JSONGrapherRecord()
98
+ #first make a list of all the records as dictionaries.
99
+ for record in recordsList:
100
+ if isinstance(record, dict):#can't use type({}) or SyncedDict won't be included.
101
+ recordsAsDictionariesList.append(record)
102
+ elif type(record) == type("string"):
103
+ new_record = create_new_JSONGrapherRecord()
104
+ new_fig_dict = new_record.import_from_json(record)
105
+ recordsAsDictionariesList.append(new_fig_dict)
106
+ else: #this assumpes there is a JSONGrapherRecord type received.
107
+ record = record.fig_dict
108
+ recordsAsDictionariesList.append(record)
109
+ #next, iterate through the list of dictionaries and merge each data object together.
110
+ #We'll use the the units of the first dictionary.
111
+ #We'll put the first record in directly, keeping the units etc. Then will "merge" in the additional data sets.
112
+ #Iterate across all records received.
113
+ for dictionary_index, current_fig_dict in enumerate(recordsAsDictionariesList):
114
+ if dictionary_index == 0: #this is the first record case. We'll use this to start the list and also gather the units.
115
+ merged_JSONGrapherRecord.fig_dict = copy.deepcopy(recordsAsDictionariesList[0])
116
+ first_record_x_label = recordsAsDictionariesList[0]["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
117
+ first_record_y_label = recordsAsDictionariesList[0]["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
118
+ first_record_x_units = separate_label_text_from_units(first_record_x_label)["units"]
119
+ first_record_y_units = separate_label_text_from_units(first_record_y_label)["units"]
120
+ else:
121
+ #first get the units of this particular record.
122
+ this_record_x_label = recordsAsDictionariesList[dictionary_index]["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
123
+ this_record_y_label = recordsAsDictionariesList[dictionary_index]["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
124
+ this_record_x_units = separate_label_text_from_units(this_record_x_label)["units"]
125
+ this_record_y_units = separate_label_text_from_units(this_record_y_label)["units"]
126
+ #now get the ratio of the units for this record relative to the first record.
127
+ #if the units are identical, then just make the ratio 1.
128
+ if this_record_x_units == first_record_x_units:
129
+ x_units_ratio = 1
130
+ else:
131
+ x_units_ratio = get_units_scaling_ratio(this_record_x_units, first_record_x_units)
132
+ if this_record_y_units == first_record_y_units:
133
+ y_units_ratio = 1
134
+ else:
135
+ y_units_ratio = get_units_scaling_ratio(this_record_y_units, first_record_y_units)
136
+ #A record could have more than one data series, but they will all have the same units.
137
+ #Thus, we use a function that will scale all of the dataseries at one time.
138
+ if (x_units_ratio == 1) and (y_units_ratio == 1): #skip scaling if it's not necessary.
139
+ scaled_fig_dict = current_fig_dict
140
+ else:
141
+ scaled_fig_dict = scale_fig_dict_values(current_fig_dict, x_units_ratio, y_units_ratio)
142
+ #now, add the scaled data objects to the original one.
143
+ #This is fairly easy using a list extend.
144
+ merged_JSONGrapherRecord.fig_dict["data"].extend(scaled_fig_dict["data"])
145
+ merged_JSONGrapherRecord = convert_JSONGRapherRecord_data_list_to_class_objects(merged_JSONGrapherRecord)
146
+ return merged_JSONGrapherRecord
147
+
148
+ def convert_JSONGRapherRecord_data_list_to_class_objects(record):
149
+ #will also support receiving a fig_dict
150
+ if isinstance(record, dict):
151
+ fig_dict_received = True
152
+ fig_dict = record
153
+ else:
154
+ fig_dict_received = False
155
+ fig_dict = record.fig_dict
156
+ data_list = fig_dict["data"]
157
+ #Do the casting into data_series objects by creating a fresh JSONDataSeries object and populating it.
158
+ for data_series_index, data_series_received in enumerate(data_list):
159
+ JSONGrapher_data_series_object = JSONGrapherDataSeries()
160
+ JSONGrapher_data_series_object.update_while_preserving_old_terms(data_series_received)
161
+ data_list[data_series_index] = JSONGrapher_data_series_object
162
+ #Now prepare for return.
163
+ if fig_dict_received == True:
164
+ fig_dict["data"] = data_list
165
+ record = fig_dict
166
+ if fig_dict_received == False:
167
+ record.fig_dict["data"] = data_list
168
+ return record
169
+
170
+ ### Start of portion of the file that has functions for scaling data to the same units ###
171
+ #The below function takes two units strings, such as
172
+ # "(((kg)/m))/s" and "(((g)/m))/s"
173
+ # and then returns the scaling ratio of units_string_1 / units_string_2
174
+ # So in the above example, would return 1000.
175
+ #Could add "tag_characters"='<>' as an optional argument to this and other functions
176
+ #to make the option of other characters for custom units.
177
+ def get_units_scaling_ratio(units_string_1, units_string_2):
178
+ # Ensure both strings are properly encoded in UTF-8
179
+ units_string_1 = units_string_1.encode("utf-8").decode("utf-8")
180
+ units_string_2 = units_string_2.encode("utf-8").decode("utf-8")
181
+ #If the unit strings are identical, there is no need to go further.
182
+ if units_string_1 == units_string_2:
183
+ return 1
184
+ import unitpy #this function uses unitpy.
185
+ #Replace "^" with "**" for unit conversion purposes.
186
+ #We won't need to replace back because this function only returns the ratio in the end.
187
+ units_string_1 = units_string_1.replace("^", "**")
188
+ units_string_2 = units_string_2.replace("^", "**")
189
+ #For now, we need to tag µ symbol units as if they are custom units. Because unitpy doesn't support that symbol yet (May 2025)
190
+ units_string_1 = tag_micro_units(units_string_1)
191
+ units_string_2 = tag_micro_units(units_string_2)
192
+ #Next, need to extract custom units and add them to unitpy
193
+ custom_units_1 = extract_tagged_strings(units_string_1)
194
+ custom_units_2 = extract_tagged_strings(units_string_2)
195
+ for custom_unit in custom_units_1:
196
+ add_custom_unit_to_unitpy(custom_unit)
197
+ for custom_unit in custom_units_2:
198
+ add_custom_unit_to_unitpy(custom_unit)
199
+ #Now, remove the "<" and ">" and will put them back later if needed.
200
+ units_string_1 = units_string_1.replace('<','').replace('>','')
201
+ units_string_2 = units_string_2.replace('<','').replace('>','')
202
+ try:
203
+ #First need to make unitpy "U" object and multiply it by 1.
204
+ #While it may be possible to find a way using the "Q" objects directly, this is the way I found so far, which converts the U object into a Q object.
205
+ units_object_converted = 1*unitpy.U(units_string_1)
206
+ ratio_with_units_object = units_object_converted.to(units_string_2)
207
+ #the above can fail if there are reciprocal units like 1/bar rather than (bar)**(-1), so we have an except statement that tries "that" fix if there is a failure.
208
+ except Exception as general_exception: # This is so VS code pylint does not flag this line. pylint: disable=broad-except, disable=unused-variable
209
+ units_string_1 = convert_inverse_units(units_string_1)
210
+ units_string_2 = convert_inverse_units(units_string_2)
211
+ units_object_converted = 1*unitpy.U(units_string_1)
212
+ try:
213
+ ratio_with_units_object = units_object_converted.to(units_string_2)
214
+ except KeyError as e:
215
+ raise KeyError(f"Error during unit conversion in get_units_scaling_ratio: Missing key {e}. Ensure all unit definitions are correctly set. Unit 1: {units_string_1}, Unit 2: {units_string_2}") from e
216
+ except ValueError as e:
217
+ raise ValueError(f"Error during unit conversion in get_units_scaling_ratio: {e}. Make sure unit values are valid and properly formatted. Unit 1: {units_string_1}, Unit 2: {units_string_2}") from e
218
+ except Exception as e: # pylint: disable=broad-except
219
+ raise RuntimeError(f"An unexpected error occurred in get_units_scaling_ratio when trying to convert units: {e}. Double-check that your records have the same units. Unit 1: {units_string_1}, Unit 2: {units_string_2}") from e
220
+
221
+ ratio_with_units_string = str(ratio_with_units_object)
222
+
223
+ ratio_only = ratio_with_units_string.split(' ')[0] #what comes out may look like 1000 gram/(meter second), so we split and take first part.
224
+ ratio_only = float(ratio_only)
225
+ return ratio_only #function returns ratio only. If function is later changed to return more, then units_strings may need further replacements.
226
+
227
+ def return_custom_units_markup(units_string, custom_units_list):
228
+ """puts markup around custom units with '<' and '>' """
229
+ sorted_custom_units_list = sorted(custom_units_list, key=len, reverse=True)
230
+ #the units should be sorted from longest to shortest if not already sorted that way.
231
+ for custom_unit in sorted_custom_units_list:
232
+ units_string = units_string.replace(custom_unit, '<'+custom_unit+'>')
233
+ return units_string
234
+
235
+ #This function tags microunits.
236
+ #However, because unitpy gives unexpected behavior with the microsymbol,
237
+ #We are actually going to change them from "µm" to "<microfrogm>"
238
+ def tag_micro_units(units_string):
239
+ # Unicode representations of micro symbols:
240
+ # U+00B5 → µ (Micro Sign)
241
+ # U+03BC → μ (Greek Small Letter Mu)
242
+ # U+1D6C2 → 𝜇 (Mathematical Greek Small Letter Mu)
243
+ # U+1D6C1 → 𝝁 (Mathematical Bold Greek Small Letter Mu)
244
+ micro_symbols = ["µ", "μ", "𝜇", "𝝁"]
245
+ # Check if any micro symbol is in the string
246
+ if not any(symbol in units_string for symbol in micro_symbols):
247
+ return units_string # If none are found, return the original string unchanged
248
+ import re
249
+ # Construct a regex pattern to detect any micro symbol followed by letters
250
+ pattern = r"[" + "".join(micro_symbols) + r"][a-zA-Z]+"
251
+ # Extract matches and sort them by length (longest first)
252
+ matches = sorted(re.findall(pattern, units_string), key=len, reverse=True)
253
+ # Replace matches with custom unit notation <X>
254
+ for match in matches:
255
+ frogified_match = f"<microfrog{match[1:]}>"
256
+ units_string = units_string.replace(match, frogified_match)
257
+ return units_string
258
+
259
+ #We are actually going to change them back to "µm" from "<microfrogm>"
260
+ def untag_micro_units(units_string):
261
+ if "<microfrog" not in units_string: # Check if any frogified unit exists
262
+ return units_string
263
+ import re
264
+ # Pattern to detect the frogified micro-units
265
+ pattern = r"<microfrog([a-zA-Z]+)>"
266
+ # Replace frogified units with µ + the original unit suffix
267
+ return re.sub(pattern, r"µ\1", units_string)
268
+
269
+ def add_custom_unit_to_unitpy(unit_string):
270
+ import unitpy
271
+ from unitpy.definitions.entry import Entry
272
+ #need to put an entry into "bases" because the BaseSet class will pull from that dictionary.
273
+ unitpy.definitions.unit_base.bases[unit_string] = unitpy.definitions.unit_base.BaseUnit(label=unit_string, abbr=unit_string,dimension=unitpy.definitions.dimensions.dimensions["amount_of_substance"])
274
+ #Then need to make a BaseSet object to put in. Confusingly, we *do not* put a BaseUnit object into the base_unit argument, below.
275
+ #We use "mole" to avoid conflicting with any other existing units.
276
+ base_unit =unitpy.definitions.unit_base.BaseSet(mole = 1)
277
+ #base_unit = unitpy.definitions.unit_base.BaseUnit(label=unit_string, abbr=unit_string,dimension=unitpy.definitions.dimensions.dimensions["amount_of_substance"])
278
+ new_entry = Entry(label = unit_string, abbr = unit_string, base_unit = base_unit, multiplier= 1)
279
+ #only add the entry if it is missing. A duplicate entry would cause crashing later.
280
+ #We can't use the "unitpy.ledger.get_entry" function because the entries have custom == comparisons
281
+ # and for the new entry, it will also return a special NoneType that we can't easy check.
282
+ # the structer unitpy.ledger.units is a list, but unitpy.ledger._lookup is a dictionary we can use
283
+ # to check if the key for the new unit is added or not.
284
+ if unit_string not in unitpy.ledger._lookup: #This comment is so the VS code pylint does not flag this line. pylint: disable=protected-access
285
+ unitpy.ledger.add_unit(new_entry) #implied return is here. No return needed.
286
+
287
+ def extract_tagged_strings(text):
288
+ """Extracts tags surrounded by <> from a given string. Used for custom units.
289
+ returns them as a list sorted from longest to shortest"""
290
+ import re
291
+ list_of_tags = re.findall(r'<(.*?)>', text)
292
+ set_of_tags = set(list_of_tags)
293
+ sorted_tags = sorted(set_of_tags, key=len, reverse=True)
294
+ return sorted_tags
295
+
296
+ #This function is to convert things like (1/bar) to (bar)**(-1)
297
+ #It was written by copilot and refined by further prompting of copilot by testing.
298
+ #The depth is because the function works iteratively and then stops when finished.
299
+ def convert_inverse_units(expression, depth=100):
300
+ import re
301
+ # Patterns to match valid reciprocals while ignoring multiplied units, so (1/bar)*bar should be handled correctly.
302
+ patterns = [r"1/\((1/.*?)\)", r"1/([a-zA-Z]+)"]
303
+ for _ in range(depth):
304
+ new_expression = expression
305
+ for pattern in patterns:
306
+ new_expression = re.sub(pattern, r"(\1)**(-1)", new_expression)
307
+
308
+ # Stop early if no more changes are made
309
+ if new_expression == expression:
310
+ break
311
+ expression = new_expression
312
+ return expression
313
+
314
+ #the below function takes in a fig_dict, as well as x and/or y scaling values.
315
+ #The function then scales the values in the data of the fig_dict and returns the scaled fig_dict.
316
+ def scale_fig_dict_values(fig_dict, num_to_scale_x_values_by = 1, num_to_scale_y_values_by = 1):
317
+ import copy
318
+ scaled_fig_dict = copy.deepcopy(fig_dict)
319
+ #iterate across the data objects inside, and change them.
320
+ for data_index, dataseries in enumerate(scaled_fig_dict["data"]):
321
+ dataseries = scale_dataseries_dict(dataseries, num_to_scale_x_values_by=num_to_scale_x_values_by, num_to_scale_y_values_by=num_to_scale_y_values_by)
322
+ scaled_fig_dict["data"][data_index] = dataseries #this line shouldn't be needed due to mutable references, but adding for clarity and to be safe.
323
+ return scaled_fig_dict
324
+
325
+
326
+ def scale_dataseries_dict(dataseries_dict, num_to_scale_x_values_by = 1, num_to_scale_y_values_by = 1, num_to_scale_z_values_by = 1):
327
+ import numpy as np
328
+ dataseries = dataseries_dict
329
+ dataseries["x"] = list(np.array(dataseries["x"], dtype=float)*num_to_scale_x_values_by) #convert to numpy array for multiplication, then back to list.
330
+ dataseries["y"] = list(np.array(dataseries["y"], dtype=float)*num_to_scale_y_values_by) #convert to numpy array for multiplication, then back to list.
331
+
332
+ # Ensure elements are converted to standard Python types.
333
+ dataseries["x"] = [float(val) for val in dataseries["x"]] #This line written by copilot.
334
+ dataseries["y"] = [float(val) for val in dataseries["y"]] #This line written by copilot.
335
+
336
+ if "z" in dataseries:
337
+ dataseries["z"] = list(np.array(dataseries["z"], dtype=float)*num_to_scale_z_values_by) #convert to numpy array for multiplication, then back to list.
338
+ dataseries["z"] = [float(val) for val in dataseries["z"]] #Mimicking above lines.
339
+ return dataseries_dict
340
+
341
+ ### End of portion of the file that has functions for scaling data to the same units ###
342
+
343
+ ## This is a special dictionary class that will allow a dictionary
344
+ ## inside a main class object to be synchronized with the fields within it.
345
+ class SyncedDict(dict):
346
+ """A dictionary that automatically updates instance attributes."""
347
+ def __init__(self, owner):
348
+ super().__init__()
349
+ self.owner = owner # Store reference to the class instance
350
+ def __setitem__(self, key, value):
351
+ """Update both dictionary and instance attribute."""
352
+ super().__setitem__(key, value) # Set in the dictionary
353
+ setattr(self.owner, key, value) # Sync with instance attribute
354
+ def __delitem__(self, key):
355
+ super().__delitem__(key) # Remove from dict
356
+ if hasattr(self.owner, key):
357
+ delattr(self.owner, key) # Sync removal from instance
358
+ def pop(self, key, *args):
359
+ """Remove item from dictionary and instance attributes."""
360
+ value = super().pop(key, *args) # Remove from dictionary
361
+ if hasattr(self.owner, key):
362
+ delattr(self.owner, key) # Remove from instance attributes
363
+ return value
364
+ def update(self, *args, **kwargs):
365
+ super().update(*args, **kwargs) # Update dict
366
+ for key, value in self.items():
367
+ setattr(self.owner, key, value) # Sync attributes
368
+
369
+
370
+ class JSONGrapherDataSeries(dict): #inherits from dict.
371
+ def __init__(self, uid="", name="", trace_style="", x=None, y=None, **kwargs):
372
+ """Initialize a data series with synced dictionary behavior.
373
+ Here are some fields that can be included, with example values.
374
+
375
+ "uid": data_series_dict["uid"] = "123ABC", # (string) a unique identifier
376
+ "name": data_series_dict["name"] = "Sample Data Series", # (string) name of the data series
377
+ "trace_style": data_series_dict["trace_style"] = "scatter", # (string) type of trace (e.g., scatter, bar)
378
+ "x": data_series_dict["x"] = [1, 2, 3, 4, 5], # (list) x-axis values
379
+ "y": data_series_dict["y"] = [10, 20, 30, 40, 50], # (list) y-axis values
380
+ "mode": data_series_dict["mode"] = "lines", # (string) plot mode (e.g., "lines", "markers")
381
+ "marker_size": data_series_dict["marker"]["size"] = 6, # (integer) marker size
382
+ "marker_color": data_series_dict["marker"]["color"] = "blue", # (string) marker color
383
+ "marker_symbol": data_series_dict["marker"]["symbol"] = "circle", # (string) marker shape/symbol
384
+ "line_width": data_series_dict["line"]["width"] = 2, # (integer) line thickness
385
+ "line_dash": data_series_dict["line"]["dash"] = "solid", # (string) line style (solid, dash, etc.)
386
+ "opacity": data_series_dict["opacity"] = 0.8, # (float) transparency level (0-1)
387
+ "visible": data_series_dict["visible"] = True, # (boolean) whether the trace is visible
388
+ "hoverinfo": data_series_dict["hoverinfo"] = "x+y", # (string) format for hover display
389
+ "legend_group": data_series_dict["legend_group"] = None, # (string or None) optional grouping for legend
390
+ "text": data_series_dict["text"] = "Data Point Labels", # (string or None) optional text annotations
391
+
392
+ """
393
+ super().__init__() # Initialize as a dictionary
394
+
395
+ # Default trace properties
396
+ self.update({
397
+ "uid": uid,
398
+ "name": name,
399
+ "trace_style": trace_style,
400
+ "x": list(x) if x else [],
401
+ "y": list(y) if y else []
402
+ })
403
+
404
+ # Include any extra keyword arguments passed in
405
+ self.update(kwargs)
406
+
407
+ def update_while_preserving_old_terms(self, series_dict):
408
+ """Update instance attributes from a dictionary. Overwrites existing terms and preserves other old terms."""
409
+ self.update(series_dict)
410
+
411
+ def get_data_series_dict(self):
412
+ """Return the dictionary representation of the trace."""
413
+ return dict(self)
414
+
415
+ def set_x_values(self, x_values):
416
+ """Update the x-axis values."""
417
+ self["x"] = list(x_values) if x_values else []
418
+
419
+ def set_y_values(self, y_values):
420
+ """Update the y-axis values."""
421
+ self["y"] = list(y_values) if y_values else []
422
+
423
+ def set_name(self, name):
424
+ """Update the name of the data series."""
425
+ self["name"] = name
426
+
427
+ def set_uid(self, uid):
428
+ """Update the unique identifier (uid) of the data series."""
429
+ self["uid"] = uid
430
+
431
+ def set_trace_style(self, style):
432
+ """Update the trace style (e.g., scatter, scatter_spline, scatter_line, bar)."""
433
+ self["trace_style"] = style
434
+
435
+ def set_marker_symbol(self, symbol):
436
+ self.set_marker_shape(shape=symbol)
437
+
438
+ def set_marker_shape(self, shape):
439
+ """
440
+ Update the marker shape (symbol).
441
+
442
+ Supported marker shapes in Plotly:
443
+ - 'circle' (default)
444
+ - 'square'
445
+ - 'diamond'
446
+ - 'cross'
447
+ - 'x'
448
+ - 'triangle-up'
449
+ - 'triangle-down'
450
+ - 'triangle-left'
451
+ - 'triangle-right'
452
+ - 'pentagon'
453
+ - 'hexagon'
454
+ - 'star'
455
+ - 'hexagram'
456
+ - 'star-triangle-up'
457
+ - 'star-triangle-down'
458
+ - 'star-square'
459
+ - 'star-diamond'
460
+ - 'hourglass'
461
+ - 'bowtie'
462
+
463
+ :param shape: String representing the desired marker shape.
464
+ """
465
+ self.setdefault("marker", {})["symbol"] = shape
466
+
467
+ def add_data_point(self, x_val, y_val):
468
+ """Append a new data point to the series."""
469
+ self["x"].append(x_val)
470
+ self["y"].append(y_val)
471
+
472
+ def set_marker_size(self, size):
473
+ """Update the marker size."""
474
+ self.setdefault("marker", {})["size"] = size
475
+
476
+ def set_marker_color(self, color):
477
+ """Update the marker color."""
478
+ self.setdefault("marker", {})["color"] = color
479
+
480
+ def set_mode(self, mode):
481
+ """Update the mode (options: 'lines', 'markers', 'text', 'lines+markers', 'lines+text', 'markers+text', 'lines+markers+text')."""
482
+ # Check if 'line' is in the mode but 'lines' is not. Then correct for user if needed.
483
+ if "line" in mode and "lines" not in mode:
484
+ mode = mode.replace("line", "lines")
485
+ self["mode"] = mode
486
+
487
+ def set_annotations(self, text): #just a convenient wrapper.
488
+ self.set_text(text)
489
+
490
+ def set_text(self, text):
491
+ #text should be a list of strings teh same length as the data series, one string per point.
492
+ """Update the annotations with a list of text as long as the number of data points."""
493
+ if text == type("string"):
494
+ text = [text] * len(self["x"]) # Repeat the text to match x-values length
495
+ else:
496
+ pass #use text directly
497
+ self["text"] = text
498
+
499
+
500
+ def set_line_width(self, width):
501
+ """Update the line width, should be a number, normally an integer."""
502
+ line = self.setdefault("line", {})
503
+ line.setdefault("width", width) # Ensure width is set
504
+
505
+ def set_line_dash(self, dash_style):
506
+ """
507
+ Update the line dash style.
508
+
509
+ Supported dash styles in Plotly:
510
+ - 'solid' (default) → Continuous solid line
511
+ - 'dot' → Dotted line
512
+ - 'dash' → Dashed line
513
+ - 'longdash' → Longer dashed line
514
+ - 'dashdot' → Dash-dot alternating pattern
515
+ - 'longdashdot' → Long dash-dot alternating pattern
516
+
517
+ :param dash_style: String representing the desired line style.
518
+ """
519
+ self.setdefault("line", {})["dash"] = dash_style
520
+
521
+ def set_transparency(self, transparency_value):
522
+ """
523
+ Update the transparency level by converting it to opacity.
524
+
525
+ Transparency ranges from:
526
+ - 0 (completely opaque) → opacity = 1
527
+ - 1 (fully transparent) → opacity = 0
528
+ - Intermediate values adjust partial transparency.
529
+
530
+ :param transparency_value: Float between 0 and 1, where 0 is opaque and 1 is transparent.
531
+ """
532
+ self["opacity"] = 1 - transparency_value
533
+
534
+ def set_opacity(self, opacity_value):
535
+ """Update the opacity level between 0 and 1."""
536
+ self["opacity"] = opacity_value
537
+
538
+ def set_visible(self, is_visible):
539
+ """Update the visibility of the trace.
540
+ "True" → The trace is fully visible.
541
+ "False" → The trace is completely hidden.
542
+ "legendonly" → The trace is hidden from the plot but still appears in the legend.
543
+
544
+ """
545
+
546
+ self["visible"] = is_visible
547
+
548
+ def set_hoverinfo(self, hover_format):
549
+ """Update hover information format."""
550
+ self["hoverinfo"] = hover_format
551
+
552
+
16
553
 
17
554
  class JSONGrapherRecord:
18
555
  """
19
556
  This class enables making JSONGrapher records. Each instance represents a structured JSON record for a graph.
20
557
  One can optionally provide an existing JSONGrapher record during creation to pre-populate the object.
558
+ One can manipulate the fig_dict inside, directly, using syntax like Record.fig_dict["comments"] = ...
559
+ One can also use syntax like Record["comments"] = ... as some 'magic' synchronizes fields directlyin the Record with fields in the fig_dict.
560
+ However, developers should usually use the syntax like Record.fig_dict, internally, to avoid any referencing mistakes.
561
+
21
562
 
22
563
  Arguments & Attributes (all are optional):
23
- comments (str): General description or metadata related to the entire record. Can include citation links. Goes into the record's top level comments field.
24
- datatype: The datatype is the experiment type or similar, it is used to assess which records can be compared and which (if any) schema to compare to. Use of single underscores between words is recommended. This ends up being the datatype field of the full JSONGrapher file. Avoid using double underscores '__' in this field unless you have read the manual about hierarchical datatypes.
564
+ comments (str): Can be used to put in general description or metadata related to the entire record. Can include citation links. Goes into the record's top level comments field.
565
+ datatype: The datatype is the experiment type or similar, it is used to assess which records can be compared and which (if any) schema to compare to. Use of single underscores between words is recommended. This ends up being the datatype field of the full JSONGrapher file. Avoid using double underscores '__' in this field unless you have read the manual about hierarchical datatypes. The user can choose to provide a URL to a schema in this field, rather than a dataype name.
25
566
  graph_title: Title of the graph or the dataset being represented.
26
- data_objects_list (list): List of data series dictionaries to pre-populate the record.
567
+ data_objects_list (list): List of data series dictionaries to pre-populate the record. These may contain 'simulate' fields in them to call javascript source code for simulating on the fly.
568
+ simulate_as_added: Boolean. True by default. If true, any data series that are added with a simulation field will have an immediate simulation call attempt.
27
569
  x_data: Single series x data in a list or array-like structure.
28
570
  y_data: Single series y data in a list or array-like structure.
29
- x_axis_label_including_units: A string with units provided in parentheses. Use of multiplication "*" and division "/" and parentheses "( )" are allowed within in the units .
30
- y_axis_label_including_units: A string with units provided in parentheses. Use of multiplication "*" and division "/" and parentheses "( )" are allowed within in the units .
571
+ x_axis_label_including_units: A string with units provided in parentheses. Use of multiplication "*" and division "/" and parentheses "( )" are allowed within in the units . The dimensions of units can be multiple, such as mol/s. SI units are expected. Custom units must be inside < > and at the beginning. For example, (<frogs>*kg/s) would be permissible. Units should be non-plural (kg instead of kgs) and should be abbreviated (m not meter). Use “^” for exponents. It is recommended to have no numbers in the units other than exponents, and to thus use (bar)^(-1) rather than 1/bar.
572
+ y_axis_label_including_units: A string with units provided in parentheses. Use of multiplication "*" and division "/" and parentheses "( )" are allowed within in the units . The dimensions of units can be multiple, such as mol/s. SI units are expected. Custom units must be inside < > and at the beginning. For example, (<frogs>*kg/s) would be permissible. Units should be non-plural (kg instead of kgs) and should be abbreviated (m not meter). Use “^” for exponents. It is recommended to have no numbers in the units other than exponents, and to thus use (bar)^(-1) rather than 1/bar.
31
573
  layout: A dictionary defining the layout of the graph, including axis titles,
32
574
  comments, and general formatting options.
33
575
 
34
576
  Methods:
35
577
  add_data_series: Adds a new data series to the record.
36
- set_layout: Updates the layout configuration for the graph.
578
+ add_data_series_as_equation: Adds a new equation to plot, which will be evaluated on the fly.
579
+ set_layout_fields: Updates the layout configuration for the graph.
37
580
  export_to_json_file: Saves the entire record (comments, datatype, data, layout) as a JSON file.
38
581
  populate_from_existing_record: Populates the attributes from an existing JSONGrapher record.
39
582
  """
40
-
41
- def __init__(self, comments="", graph_title="", datatype="", data_objects_list = None, x_data=None, y_data=None, x_axis_label_including_units="", y_axis_label_including_units ="", plot_type ="", layout={}, existing_JSONGrapher_record=None):
583
+
584
+ def __init__(self, comments="", graph_title="", datatype="", data_objects_list = None, simulate_as_added = True, evaluate_equations_as_added = True, x_data=None, y_data=None, x_axis_label_including_units="", y_axis_label_including_units ="", plot_style ="", layout=None, existing_JSONGrapher_record=None):
42
585
  """
43
586
  Initialize a JSONGrapherRecord instance with optional attributes or an existing record.
44
587
 
45
588
  layout (dict): Layout dictionary to pre-populate the graph configuration.
46
589
  existing_JSONGrapher_record (dict): Existing JSONGrapher record to populate the instance.
47
- """
48
- # Default attributes for a new record.
49
- # Initialize the main record dictionary
50
- # the if statements check if something is empty and populates them if not. This is a special syntax in python that does not require a None object to work, empty also works.
51
-
52
- #if receiving a data_objects_list, validate it.
590
+ """
591
+ if layout == None: #it's bad to have an empty dictionary or list as a python argument.
592
+ layout = {}
593
+
594
+ # Assign self.fig_dict in a way that it will push any changes to it into the class instance.
595
+ self.fig_dict = {}
596
+
597
+ # If receiving a data_objects_list, validate it.
53
598
  if data_objects_list:
54
- validate_plotly_data_list(data_objects_list) #call a function from outside the class.
55
- #if receiving axis labels, validate them.
599
+ validate_plotly_data_list(data_objects_list) # Call a function from outside the class.
600
+
601
+ # If receiving axis labels, validate them.
56
602
  if x_axis_label_including_units:
57
603
  validate_JSONGrapher_axis_label(x_axis_label_including_units, axis_name="x", remove_plural_units=False)
58
604
  if y_axis_label_including_units:
59
605
  validate_JSONGrapher_axis_label(y_axis_label_including_units, axis_name="y", remove_plural_units=False)
60
606
 
61
- self.fig_dict = {
607
+ self.fig_dict.update( {
62
608
  "comments": comments, # Top-level comments
63
609
  "datatype": datatype, # Top-level datatype (datatype)
64
610
  "data": data_objects_list if data_objects_list else [], # Data series list
65
611
  "layout": layout if layout else {
66
- "title": graph_title,
67
- "xaxis": {"title": x_axis_label_including_units},
68
- "yaxis": {"title": y_axis_label_including_units}
69
- }
70
- }
71
-
72
- self.plot_type = plot_type #the plot_type is actually a series level attribute. However, if somebody sets the plot_type at the record level, then we will use that plot_type for all of the individual series.
73
- if plot_type != "":
74
- self.fig_dict["plot_type"] = plot_type
75
-
76
- # Populate attributes if an existing JSONGrapher record is provided.
612
+ "title": {"text": graph_title},
613
+ "xaxis": {"title": {"text": x_axis_label_including_units}},
614
+ "yaxis": {"title": {"text": y_axis_label_including_units}}
615
+ }
616
+ }
617
+ )
618
+
619
+ if plot_style !="":
620
+ self.fig_dict["plot_style"] = plot_style
621
+ if simulate_as_added: # Will try to simulate, but because this is the default, will use a try-except rather than crash the program.
622
+ try:
623
+ self.fig_dict = simulate_as_needed_in_fig_dict(self.fig_dict)
624
+ except KeyError:
625
+ pass # Handle missing key issues gracefully
626
+ except Exception as e: # This is so VS code pylint does not flag this line: pylint: disable=broad-except
627
+ print(f"Unexpected error: {e}") # Logs any unhandled errors
628
+
629
+ if evaluate_equations_as_added: # Will try to evaluate, but because this is the default, will use a try-except rather than crash the program.
630
+ try:
631
+ self.fig_dict = evaluate_equations_as_needed_in_fig_dict(self.fig_dict)
632
+ except Exception as e: # This is so VS code pylint does not flag this line. pylint: disable=broad-except, disable=unused-variable
633
+ pass
634
+ # Populate attributes if an existing JSONGrapher record is provided as a dictionary.
77
635
  if existing_JSONGrapher_record:
78
636
  self.populate_from_existing_record(existing_JSONGrapher_record)
79
637
 
80
638
  # Initialize the hints dictionary, for use later, since the actual locations in the JSONRecord can be non-intuitive.
81
639
  self.hints_dictionary = {}
82
640
  # Adding hints. Here, the keys are the full field locations within the record.
83
- self.hints_dictionary["['comments']"] = "Use Record.set_comments() to populate this field. Put in a general description or metadata related to the entire record. Can include citation links. Goes into the record's top level comments field."
84
- self.hints_dictionary["['datatype']"] = "Use Record.set_datatype() to populate this field. This is the datatype, like experiment type, and is used to assess which records can be compared and which (if any) schema to compare to. Use of single underscores between words is recommended. Avoid using double underscores '__' in this field unless you have read the manual about hierarchical datatypes."
85
- self.hints_dictionary["['layout']['title']"] = "Use Record.set_graph_title() to populate this field. This is the title for the graph."
86
- self.hints_dictionary["['layout']['xaxis']['title']"] = "Use Record.set_x_axis_label() to populate this field. This is the x axis label and should have units in parentheses. The units can include multiplication '*', division '/' and parentheses '( )'. Scientific and imperial units are recommended. Custom units can be contained in pointy brackets'< >'." # x-axis label
87
- self.hints_dictionary["['layout']['yaxis']['title']"] = "Use Record.set_y_axis_label() to populate this field. This is the y axis label and should have units in parentheses. The units can include multiplication '*', division '/' and parentheses '( )'. Scientific and imperial units are recommended. Custom units can be contained in pointy brackets'< >'."
641
+ self.hints_dictionary["['comments']"] = "Use Record.set_comments() to populate this field. Can be used to put in a general description or metadata related to the entire record. Can include citations and links. Goes into the record's top-level comments field."
642
+ self.hints_dictionary["['datatype']"] = "Use Record.set_datatype() to populate this field. This is the datatype, like experiment type, and is used to assess which records can be compared and which (if any) schema to compare to. Use of single underscores between words is recommended. Avoid using double underscores '__' in this field unless you have read the manual about hierarchical datatypes. The user can choose to provide a URL to a schema in this field rather than a datatype name."
643
+ self.hints_dictionary["['layout']['title']['text']"] = "Use Record.set_graph_title() to populate this field. This is the title for the graph."
644
+ self.hints_dictionary["['layout']['xaxis']['title']['text']"] = "Use Record.set_x_axis_label() to populate this field. This is the x-axis label and should have units in parentheses. The units can include multiplication '*', division '/' and parentheses '( )'. Scientific and imperial units are recommended. Custom units can be contained in pointy brackets '< >'." # x-axis label
645
+ self.hints_dictionary["['layout']['yaxis']['title']['text']"] = "Use Record.set_y_axis_label() to populate this field. This is the y-axis label and should have units in parentheses. The units can include multiplication '*', division '/' and parentheses '( )'. Scientific and imperial units are recommended. Custom units can be contained in pointy brackets '< >'."
646
+
647
+ ##Start of section of class code that allows class to behave like a dictionary and synchronize with fig_dict ##
648
+ #The __getitem__ and __setitem__ functions allow the class instance to behave 'like' a dictionary without using super.
649
+ #The below functions allow the JSONGrapherRecord to populate the self.fig_dict each time something is added inside.
650
+ #That is, if someone uses something like Record["comments"]="frog", it will also put that into self.fig_dict
651
+
652
+ def __getitem__(self, key):
653
+ return self.fig_dict[key] # Direct access
654
+
655
+ def __setitem__(self, key, value):
656
+ self.fig_dict[key] = value # Direct modification
657
+
658
+ def __delitem__(self, key):
659
+ del self.fig_dict[key] # Support for deletion
660
+
661
+ def __iter__(self):
662
+ return iter(self.fig_dict) # Allow iteration
663
+
664
+ def __len__(self):
665
+ return len(self.fig_dict) # Support len()
666
+
667
+ def pop(self, key, default=None):
668
+ return self.fig_dict.pop(key, default) # Implement pop()
88
669
 
670
+ def keys(self):
671
+ return self.fig_dict.keys() # Dictionary-like keys()
672
+
673
+ def values(self):
674
+ return self.fig_dict.values() # Dictionary-like values()
675
+
676
+ def items(self):
677
+ return self.fig_dict.items() # Dictionary-like items()
678
+
679
+ def update(self, *args, **kwargs):
680
+ """Updates the dictionary with multiple key-value pairs."""
681
+ self.fig_dict.update(*args, **kwargs)
682
+
683
+
684
+ ##End of section of class code that allows class to behave like a dictionary and synchronize with fig_dict ##
89
685
 
90
686
  #this function enables printing the current record.
91
687
  def __str__(self):
92
688
  """
93
689
  Returns a JSON-formatted string of the record with an indent of 4.
94
690
  """
95
- print("Warning: Printing directly will return the raw record without some automatic updates. Please use the syntax RecordObject.print_to_inspect() which will make automatic consistency updates and validation checks to the record before printing.")
691
+ print("Warning: Printing directly will return the raw record without some automatic updates. It is recommended to use the syntax RecordObject.print_to_inspect() which will make automatic consistency updates and validation checks to the record before printing.")
96
692
  return json.dumps(self.fig_dict, indent=4)
97
693
 
98
694
 
99
- def add_data_series(self, series_name, x_values=[], y_values=[], simulate={}, comments="", plot_type="", uid="", line="", extra_fields=None):
695
+ def add_data_series(self, series_name, x_values=None, y_values=None, simulate=None, simulate_as_added=True, comments="", trace_style="", uid="", line="", extra_fields=None):
100
696
  """
101
697
  This is the normal way of adding an x,y data series.
102
698
  """
@@ -104,11 +700,20 @@ class JSONGrapherRecord:
104
700
  # x: List of x-axis values. Or similar structure.
105
701
  # y: List of y-axis values. Or similar structure.
106
702
  # simulate: This is an optional field which, if used, is a JSON object with entries for calling external simulation scripts.
703
+ # simulate_as_added: Boolean for calling simulation scripts immediately.
107
704
  # comments: Optional description of the data series.
108
- # plot_type: Type of the data (e.g., scatter, line).
705
+ # trace_style: Type of the data (e.g., scatter, line, scatter_spline, spline, bar).
109
706
  # line: Dictionary describing line properties (e.g., shape, width).
110
707
  # uid: Optional unique identifier for the series (e.g., a DOI).
111
708
  # extra_fields: Dictionary containing additional fields to add to the series.
709
+ #Should not have mutable objects initialized as defaults, so putting them in below.
710
+ if x_values is None:
711
+ x_values = []
712
+ if y_values is None:
713
+ y_values = []
714
+ if simulate is None:
715
+ simulate = {}
716
+
112
717
  x_values = list(x_values)
113
718
  y_values = list(y_values)
114
719
 
@@ -120,31 +725,122 @@ class JSONGrapherRecord:
120
725
 
121
726
  #Add optional inputs.
122
727
  if len(comments) > 0:
123
- data_series_dict["comments"]: comments
728
+ data_series_dict["comments"] = comments
124
729
  if len(uid) > 0:
125
- data_series_dict["uid"]: uid
730
+ data_series_dict["uid"] = uid
126
731
  if len(line) > 0:
127
- data_series_dict["line"]: line
732
+ data_series_dict["line"] = line
733
+ if len(trace_style) > 0:
734
+ data_series_dict['trace_style'] = trace_style
128
735
  #add simulate field if included.
129
736
  if simulate:
130
737
  data_series_dict["simulate"] = simulate
738
+ if simulate_as_added: #will try to simulate. But because this is the default, will use a try and except rather than crash program.
739
+ try:
740
+ data_series_dict = simulate_data_series(data_series_dict)
741
+ except Exception as e: # This is so VS code pylint does not flag this line. pylint: disable=broad-except, disable=unused-variable
742
+ pass
131
743
  # Add extra fields if provided, they will be added.
132
744
  if extra_fields:
133
745
  data_series_dict.update(extra_fields)
134
- #Add to the class object's data list.
135
- self.fig_dict["data"].append(data_series_dict)
136
- #update plot_type, since our internal function requires the data series to be added already.
137
- if len(plot_type) > 0:
138
- newest_record_index = len(self.fig_dict["data"]) - 1
139
- self.set_plot_type_one_data_series(newest_record_index, plot_type)
140
746
 
747
+ #make this a JSONGrapherDataSeries class object, that way a person can use functions to do things like change marker size etc. more easily.
748
+ JSONGrapher_data_series_object = JSONGrapherDataSeries()
749
+ JSONGrapher_data_series_object.update_while_preserving_old_terms(data_series_dict)
750
+ data_series_dict = JSONGrapher_data_series_object
751
+ #Add to the JSONGrapherRecord class object's data list.
752
+ self.fig_dict["data"].append(data_series_dict) #implied return.
753
+ return data_series_dict
754
+
755
+ def add_data_series_as_equation(self, series_name, x_values=None, y_values=None, equation_dict=None, evaluate_equations_as_added=True, comments="", trace_style="", uid="", line="", extra_fields=None):
756
+ """
757
+ This is a way to add an equation that would be used to fill an x,y data series.
758
+ The equation will be a equation_dict of the json_equationer type
759
+ """
760
+ # series_name: Name of the data series.
761
+ # x: List of x-axis values. Or similar structure.
762
+ # y: List of y-axis values. Or similar structure.
763
+ # equation_dict: This is the field for the equation_dict of json_equationer type
764
+ # evaluate_equations_as_added: Boolean for evaluating equations immediately.
765
+ # comments: Optional description of the data series.
766
+ # plot_type: Type of the data (e.g., scatter, line).
767
+ # line: Dictionary describing line properties (e.g., shape, width).
768
+ # uid: Optional unique identifier for the series (e.g., a DOI).
769
+ # extra_fields: Dictionary containing additional fields to add to the series.
770
+ #Should not have mutable objects initialized as defaults, so putting them in below.
771
+ if x_values is None:
772
+ x_values = []
773
+ if y_values is None:
774
+ y_values = []
775
+ if equation_dict is None:
776
+ equation_dict = {}
777
+
778
+ x_values = list(x_values)
779
+ y_values = list(y_values)
780
+
781
+ data_series_dict = {
782
+ "name": series_name,
783
+ "x": x_values,
784
+ "y": y_values,
785
+ }
786
+
787
+ #Add optional inputs.
788
+ if len(comments) > 0:
789
+ data_series_dict["comments"] = comments
790
+ if len(uid) > 0:
791
+ data_series_dict["uid"] = uid
792
+ if len(line) > 0:
793
+ data_series_dict["line"] = line
794
+ if len(trace_style) > 0:
795
+ data_series_dict['trace_style'] = trace_style
796
+ #add equation field if included.
797
+ if equation_dict:
798
+ data_series_dict["equation"] = equation_dict
799
+ # Add extra fields if provided, they will be added.
800
+ if extra_fields:
801
+ data_series_dict.update(extra_fields)
802
+
803
+ #make this a JSONGrapherDataSeries class object, that way a person can use functions to do things like change marker size etc. more easily.
804
+ JSONGrapher_data_series_object = JSONGrapherDataSeries()
805
+ JSONGrapher_data_series_object.update_while_preserving_old_terms(data_series_dict)
806
+ data_series_dict = JSONGrapher_data_series_object
807
+ #Add to the JSONGrapherRecord class object's data list.
808
+ self.fig_dict["data"].append(data_series_dict)
809
+ #Now evaluate the equation as added, if requested. It does seem counterintuitive to do this "at the end",
810
+ #but the reason this happens at the end is that the evaluation *must* occur after being a fig_dict because we
811
+ #need to check the units coming out against the units in the layout. Otherwise we would not be able to convert units.
812
+ new_data_series_index = len(self.fig_dict["data"])-1
813
+ if evaluate_equations_as_added: #will try to simulate. But because this is the default, will use a try and except rather than crash program.
814
+ try:
815
+ self.fig_dict = evaluate_equation_for_data_series_by_index(self.fig_dict, new_data_series_index)
816
+ except Exception as e: # This is so VS code pylint does not flag this line. pylint: disable=broad-except, disable=unused-variable
817
+ pass
818
+
819
+ def change_data_series_name(self, series_index, series_name):
820
+ self.fig_dict["data"][series_index]["name"] = series_name
821
+
822
+ #this function forces the re-simulation of a particular dataseries.
823
+ #The simulator link will be extracted from the record, by default.
824
+ def simulate_data_series_by_index(self, data_series_index, simulator_link='', verbose=False):
825
+ self.fig_dict = simulate_specific_data_series_by_index(fig_dict=self.fig_dict, data_series_index=data_series_index, simulator_link=simulator_link, verbose=verbose)
826
+ data_series_dict = self.fig_dict["data"][data_series_index] #implied return
827
+ return data_series_dict #Extra regular return
141
828
  #this function returns the current record.
829
+
830
+ def evaluate_eqution_of_data_series_by_index(self, series_index, equation_dict = None, verbose=False):
831
+ if equation_dict != None:
832
+ self.fig_dict["data"][series_index]["equation"] = equation_dict
833
+ self.fig_dict = evaluate_equation_for_data_series_by_index(data_series_index=data_series_dict, verbose=verbose) #implied return.
834
+ return data_series_dict #Extra regular return
835
+
836
+ #this function returns the current record.
142
837
  def get_record(self):
143
838
  """
144
839
  Returns a JSON-dict string of the record
145
840
  """
146
841
  return self.fig_dict
147
-
842
+ #The update_and_validate function will clean for plotly.
843
+ #TODO: the internal recommending "print_to_inspect" function should, by default, exclude printing the full dictionaries of the layout_style and the trace_collection_style.
148
844
  def print_to_inspect(self, update_and_validate=True, validate=True, remove_remaining_hints=False):
149
845
  if remove_remaining_hints == True:
150
846
  self.remove_hints()
@@ -159,50 +855,79 @@ class JSONGrapherRecord:
159
855
  Populates attributes from an existing JSONGrapher record.
160
856
  existing_JSONGrapher_record: A dictionary representing an existing JSONGrapher record.
161
857
  """
162
- if "comments" in existing_JSONGrapher_record: self.fig_dict["comments"] = existing_JSONGrapher_record["comments"]
163
- if "datatype" in existing_JSONGrapher_record: self.fig_dict["datatype"] = existing_JSONGrapher_record["datatype"]
164
- if "data" in existing_JSONGrapher_record: self.fig_dict["data"] = existing_JSONGrapher_record["data"]
165
- if "layout" in existing_JSONGrapher_record: self.fig_dict["layout"] = existing_JSONGrapher_record["layout"]
166
-
858
+ #While we expect a dictionary, if a JSONGrapher ojbect is provided, we will simply pull the dictionary out of that.
859
+ if isinstance(existing_JSONGrapher_record, dict):
860
+ if "comments" in existing_JSONGrapher_record: self.fig_dict["comments"] = existing_JSONGrapher_record["comments"]
861
+ if "datatype" in existing_JSONGrapher_record: self.fig_dict["datatype"] = existing_JSONGrapher_record["datatype"]
862
+ if "data" in existing_JSONGrapher_record: self.fig_dict["data"] = existing_JSONGrapher_record["data"]
863
+ if "layout" in existing_JSONGrapher_record: self.fig_dict["layout"] = existing_JSONGrapher_record["layout"]
864
+ else:
865
+ self.fig_dict = existing_JSONGrapher_record.fig_dict
866
+
867
+
868
+ #the below function takes in existin JSONGrpher record, and merges the data in.
869
+ #This requires scaling any data as needed, according to units.
870
+ def merge_in_JSONGrapherRecord(self, fig_dict_to_merge_in):
871
+ import copy
872
+ fig_dict_to_merge_in = copy.deepcopy(fig_dict_to_merge_in)
873
+ if type(fig_dict_to_merge_in) == type({}):
874
+ pass #this is what we are expecting.
875
+ elif type(fig_dict_to_merge_in) == type("string"):
876
+ fig_dict_to_merge_in = json.loads(fig_dict_to_merge_in)
877
+ else: #this assumpes there is a JSONGrapherRecord type received.
878
+ fig_dict_to_merge_in = fig_dict_to_merge_in.fig_dict
879
+ #Now extract the units of the current record.
880
+ first_record_x_label = self.fig_dict["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
881
+ first_record_y_label = self.fig_dict["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
882
+ first_record_x_units = separate_label_text_from_units(first_record_x_label)["units"]
883
+ first_record_y_units = separate_label_text_from_units(first_record_y_label)["units"]
884
+ #Get the units of the new record.
885
+ this_record_x_label = fig_dict_to_merge_in["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
886
+ this_record_y_label = fig_dict_to_merge_in["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
887
+ this_record_x_units = separate_label_text_from_units(this_record_x_label)["units"]
888
+ this_record_y_units = separate_label_text_from_units(this_record_y_label)["units"]
889
+ #now get the ratio of the units for this record relative to the first record.
890
+ x_units_ratio = get_units_scaling_ratio(this_record_x_units, first_record_x_units)
891
+ y_units_ratio = get_units_scaling_ratio(this_record_y_units, first_record_y_units)
892
+ #A record could have more than one data series, but they will all have the same units.
893
+ #Thus, we use a function that will scale all of the dataseries at one time.
894
+ scaled_fig_dict = scale_fig_dict_values(fig_dict_to_merge_in, x_units_ratio, y_units_ratio)
895
+ #now, add the scaled data objects to the original one.
896
+ #This is fairly easy using a list extend.
897
+ self.fig_dict["data"].extend(scaled_fig_dict["data"])
167
898
 
168
- def set_plot_type_one_data_series(self, data_series_index, plot_type):
169
- fields_dict = plot_type_to_field_values(plot_type)
170
- #get the data_series_dict.
171
- data_series_dict = self.fig_dict['data'][data_series_index]
172
- #update the data_series_dict.
173
- if fields_dict.get("mode_field"):
174
- data_series_dict["mode"] = fields_dict["mode_field"]
175
- if fields_dict.get("type_field"):
176
- data_series_dict["type"] = fields_dict["type_field"]
177
- if fields_dict.get("line_shape_field") != "":
178
- data_series_dict.setdefault("line", {"shape": ''}) # Creates the field if it does not already exist.
179
- data_series_dict["line"]["shape"] = fields_dict["line_shape_field"]
180
899
 
181
- #now put the data_series_dict back:
182
- self.fig_dict['data'][data_series_index] = data_series_dict
900
+
901
+ def import_from_dict(self, fig_dict):
902
+ self.fig_dict = fig_dict
903
+
904
+ def import_from_file(self, json_filename_or_object):
905
+ self.import_from_json(json_filename_or_object)
906
+
907
+ #the json object can be a filename string or can be json object which is actually a dictionary.
908
+ def import_from_json(self, json_filename_or_object):
909
+ if type(json_filename_or_object) == type(""): #assume it's a json_string or filename_and_path.
910
+ try:
911
+ record = json.loads(json_filename_or_object) #first check if it's a json string.
912
+ except json.JSONDecodeError as e1: # Catch specific exception
913
+ try:
914
+ import os
915
+ #if the filename does not exist, then we'll check if adding ".json" fixes the problem.
916
+ if not os.path.exists(json_filename_or_object):
917
+ json_added_filename = json_filename_or_object + ".json"
918
+ if os.path.exists(json_added_filename): json_filename_or_object = json_added_filename #only change the filename if the json_filename exists.
919
+ # Open the file in read mode with UTF-8 encoding
920
+ with open(json_filename_or_object, "r", encoding="utf-8") as file:
921
+ # Read the entire content of the file
922
+ record = file.read().strip() # Stripping leading/trailing whitespace
923
+ self.fig_dict = json.loads(record)
924
+ return self.fig_dict
925
+ except json.JSONDecodeError as e2: # Catch specific exception
926
+ print(f"JSON loading failed on record: {record}. Error: {e1} when trying to parse as a json directly, and {e2} when trying to use as a filename. You may want to try opening your JSON file in VS Code or in an online JSON Validator. Does your json have double quotes around strings? Single quotes around strings is allowed in python, but disallowed in JSON specifications. You may also need to check how Booleans and other aspects are defined in JSON.") # Improved error reporting
927
+ else:
928
+ self.fig_dict = json_filename_or_object
929
+ return self.fig_dict
183
930
 
184
- def set_plot_type_all_series(self, plot_type):
185
- """
186
- Sets the plot_type field for the all data series.
187
- options are: scatter, spline, scatter_spline
188
- """
189
- self.plot_type = plot_type
190
- for data_series_index in range(len(self.fig_dict['data'])): #works with array indexing.
191
- self.set_plot_type_one_data_series(data_series_index, plot_type)
192
-
193
-
194
- def update_plot_types(self, plot_type=None):
195
- """
196
- updates the plot types for any existing data series.
197
-
198
- """
199
- #If optional argument not provided, take class instance setting.
200
- if plot_type == None:
201
- plot_type = self.plot_type
202
- #If the plot_type is not blank, use it for all series.
203
- if plot_type != "":
204
- self.set_plot_type_all_series(plot_type)
205
-
206
931
  def set_datatype(self, datatype):
207
932
  """
208
933
  Sets the datatype field used as the experiment type or schema identifier.
@@ -222,7 +947,7 @@ class JSONGrapherRecord:
222
947
  Updates the title of the graph in the layout dictionary.
223
948
  graph_title (str): The new title to set for the graph.
224
949
  """
225
- self.fig_dict['layout']['title'] = graph_title
950
+ self.fig_dict['layout']['title']['text'] = graph_title
226
951
 
227
952
  def set_x_axis_label_including_units(self, x_axis_label_including_units, remove_plural_units=True):
228
953
  """
@@ -231,8 +956,9 @@ class JSONGrapherRecord:
231
956
  """
232
957
  if "xaxis" not in self.fig_dict['layout'] or not isinstance(self.fig_dict['layout'].get("xaxis"), dict):
233
958
  self.fig_dict['layout']["xaxis"] = {} # Initialize x-axis as a dictionary if it doesn't exist.
234
- validation_result, warnings_list, x_axis_label_including_units = validate_JSONGrapher_axis_label(x_axis_label_including_units, axis_name="x", remove_plural_units=remove_plural_units)
235
- self.fig_dict['layout']["xaxis"]["title"] = x_axis_label_including_units
959
+ _validation_result, _warnings_list, x_axis_label_including_units = validate_JSONGrapher_axis_label(x_axis_label_including_units, axis_name="x", remove_plural_units=remove_plural_units)
960
+ #setdefault avoids problems for missing fields.
961
+ self.fig_dict.setdefault("layout", {}).setdefault("xaxis", {}).setdefault("title", {})["text"] = x_axis_label_including_units
236
962
 
237
963
  def set_y_axis_label_including_units(self, y_axis_label_including_units, remove_plural_units=True):
238
964
  """
@@ -240,27 +966,50 @@ class JSONGrapherRecord:
240
966
  yaxis_title (str): The new title to set for the y-axis.
241
967
  """
242
968
  if "yaxis" not in self.fig_dict['layout'] or not isinstance(self.fig_dict['layout'].get("yaxis"), dict):
243
- self.fig_dict['layout']["yaxis"] = {} # Initialize y-axis as a dictionary if it doesn't exist.
244
-
245
- validation_result, warnings_list, y_axis_label_including_units = validate_JSONGrapher_axis_label(y_axis_label_including_units, axis_name="y", remove_plural_units=remove_plural_units)
246
- self.fig_dict['layout']["yaxis"]["title"] = y_axis_label_including_units
969
+ self.fig_dict['layout']["yaxis"] = {} # Initialize y-axis as a dictionary if it doesn't exist.
970
+ _validation_result, _warnings_list, y_axis_label_including_units = validate_JSONGrapher_axis_label(y_axis_label_including_units, axis_name="y", remove_plural_units=remove_plural_units)
971
+ #setdefault avoids problems for missing fields.
972
+ self.fig_dict.setdefault("layout", {}).setdefault("yaxis", {}).setdefault("title", {})["text"] = y_axis_label_including_units
247
973
 
248
- def set_layout(self, comments="", graph_title="", x_axis_label_including_units="", y_axis_label_including_units="", x_axis_comments="",y_axis_comments="", remove_plural_units=True):
249
- # comments: General comments about the layout.
974
+ def set_z_axis_label_including_units(self, z_axis_label_including_units, remove_plural_units=True):
975
+ """
976
+ Updates the title of the z-axis in the layout dictionary.
977
+ zaxis_title (str): The new title to set for the z-axis.
978
+ """
979
+ if "zaxis" not in self.fig_dict['layout'] or not isinstance(self.fig_dict['layout'].get("zaxis"), dict):
980
+ self.fig_dict['layout']["zaxis"] = {} # Initialize y-axis as a dictionary if it doesn't exist.
981
+ self.fig_dict['layout']["zaxis"]["title"] = {} # Initialize y-axis as a dictionary if it doesn't exist.
982
+ _validation_result, _warnings_list, z_axis_label_including_units = validate_JSONGrapher_axis_label(z_axis_label_including_units, axis_name="z", remove_plural_units=remove_plural_units)
983
+ #setdefault avoids problems for missing fields.
984
+ self.fig_dict.setdefault("layout", {}).setdefault("zaxis", {}).setdefault("title", {})["text"] = z_axis_label_including_units
985
+
986
+ #function to set the min and max of the x axis in plotly way.
987
+ def set_x_axis_range(self, min_value, max_value):
988
+ self.fig_dict["layout"]["xaxis"][0] = min_value
989
+ self.fig_dict["layout"]["xaxis"][1] = max_value
990
+ #function to set the min and max of the y axis in plotly way.
991
+ def set_y_axis_range(self, min_value, max_value):
992
+ self.fig_dict["layout"]["yaxis"][0] = min_value
993
+ self.fig_dict["layout"]["yaxis"][1] = max_value
994
+
995
+ #function to scale the values in the data series by arbitrary amounts.
996
+ def scale_record(self, num_to_scale_x_values_by = 1, num_to_scale_y_values_by = 1):
997
+ self.fig_dict = scale_fig_dict_values(self.fig_dict, num_to_scale_x_values_by=num_to_scale_x_values_by, num_to_scale_y_values_by=num_to_scale_y_values_by)
998
+
999
+ def set_layout_fields(self, comments="", graph_title="", x_axis_label_including_units="", y_axis_label_including_units="", x_axis_comments="",y_axis_comments="", remove_plural_units=True):
1000
+ # comments: General comments about the layout. Allowed by JSONGrapher, but will be removed if converted to a plotly object.
250
1001
  # graph_title: Title of the graph.
251
1002
  # xaxis_title: Title of the x-axis, including units.
252
- # xaxis_comments: Comments related to the x-axis.
1003
+ # xaxis_comments: Comments related to the x-axis. Allowed by JSONGrapher, but will be removed if converted to a plotly object.
253
1004
  # yaxis_title: Title of the y-axis, including units.
254
- # yaxis_comments: Comments related to the y-axis.
1005
+ # yaxis_comments: Comments related to the y-axis. Allowed by JSONGrapher, but will be removed if converted to a plotly object.
1006
+
1007
+ _validation_result, _warnings_list, x_axis_label_including_units = validate_JSONGrapher_axis_label(x_axis_label_including_units, axis_name="x", remove_plural_units=remove_plural_units)
1008
+ _validation_result, _warnings_list, y_axis_label_including_units = validate_JSONGrapher_axis_label(y_axis_label_including_units, axis_name="y", remove_plural_units=remove_plural_units)
1009
+ self.fig_dict['layout']["title"]['text'] = graph_title
1010
+ self.fig_dict['layout']["xaxis"]["title"]['text'] = x_axis_label_including_units
1011
+ self.fig_dict['layout']["yaxis"]["title"]['text'] = y_axis_label_including_units
255
1012
 
256
- validation_result, warnings_list, x_axis_label_including_units = validate_JSONGrapher_axis_label(x_axis_label_including_units, axis_name="x", remove_plural_units=remove_plural_units)
257
- validation_result, warnings_list, y_axis_label_including_units = validate_JSONGrapher_axis_label(y_axis_label_including_units, axis_name="y", remove_plural_units=remove_plural_units)
258
- self.fig_dict['layout'] = {
259
- "title": graph_title,
260
- "xaxis": {"title": x_axis_label_including_units},
261
- "yaxis": {"title": y_axis_label_including_units}
262
- }
263
-
264
1013
  #populate any optional fields, if provided:
265
1014
  if len(comments) > 0:
266
1015
  self.fig_dict['layout']["comments"] = comments
@@ -268,17 +1017,28 @@ class JSONGrapherRecord:
268
1017
  self.fig_dict['layout']["xaxis"]["comments"] = x_axis_comments
269
1018
  if len(y_axis_comments) > 0:
270
1019
  self.fig_dict['layout']["yaxis"]["comments"] = y_axis_comments
271
-
272
-
273
1020
  return self.fig_dict['layout']
274
1021
 
275
- #TODO: add record validation to this function.
276
- def export_to_json_file(self, filename, update_and_validate=True, validate=True, remove_remaining_hints=False):
1022
+ #This function validates the output before exporting, and also has an option of removing hints.
1023
+ #The update_and_validate function will clean for plotly.
1024
+ #simulate all series will simulate any series as needed.
1025
+ #TODO: need to add an "include_formatting" option
1026
+ def export_to_json_file(self, filename, update_and_validate=True, validate=True, simulate_all_series = True, remove_simulate_fields= False, remove_equation_fields= False, remove_remaining_hints=False):
277
1027
  """
278
1028
  writes the json to a file
279
1029
  returns the json as a dictionary.
1030
+ update_and_validate function will clean for plotly. One can alternatively only validate.
1031
+ optionally simulates all series that have a simulate field (does so by default)
1032
+ optionally removes simulate filed from all series that have a simulate field (does not do so by default)
280
1033
  optionally removes hints before export and return.
281
1034
  """
1035
+ #if simulate_all_series is true, we'll try to simulate any series that need it, then clean the simulate fields out if requested.
1036
+ if simulate_all_series == True:
1037
+ self.fig_dict = simulate_as_needed_in_fig_dict(self.fig_dict)
1038
+ if remove_simulate_fields == True:
1039
+ self.fig_dict = clean_json_fig_dict(self.fig_dict, fields_to_update=['simulate'])
1040
+ if remove_equation_fields == True:
1041
+ self.fig_dict = clean_json_fig_dict(self.fig_dict, fields_to_update=['equation'])
282
1042
  if remove_remaining_hints == True:
283
1043
  self.remove_hints()
284
1044
  if update_and_validate == True: #this will do some automatic 'corrections' during the validation.
@@ -291,29 +1051,154 @@ class JSONGrapherRecord:
291
1051
  # Check if the filename has an extension and append `.json` if not
292
1052
  if '.json' not in filename.lower():
293
1053
  filename += ".json"
294
- #Write to file.
295
- with open(filename, 'w') as f:
1054
+ #Write to file using UTF-8 encoding.
1055
+ with open(filename, 'w', encoding='utf-8') as f:
296
1056
  json.dump(self.fig_dict, f, indent=4)
297
1057
  return self.fig_dict
298
1058
 
299
- def get_matplotlib_fig(self, update_and_validate=True):
1059
+ #simulate all series will simulate any series as needed.
1060
+ def get_plotly_fig(self, plot_style=None, update_and_validate=True, simulate_all_series=True, evaluate_all_equations=True, adjust_implicit_data_ranges=True):
1061
+ """
1062
+ Generates a Plotly figure from the stored fig_dict, performing simulations and equations as needed.
1063
+ By default, it will apply the default still hard coded into jsongrapher.
1064
+
1065
+ Args:
1066
+ plot_style: String or dictionary of style to apply. Use '' to skip applying a style, or provide a list of length two containing both a layout style and a data series style."none" removes all style.
1067
+ simulate_all_series (bool): If True, performs simulations for applicable series.
1068
+ update_and_validate (bool): If True, applies automatic corrections to fig_dict.
1069
+ evaluate_all_equations (bool): If True, evaluates all equation-based series.
1070
+ adjust_implicit_data_ranges (bool): If True, modifies ranges for implicit data series.
1071
+
1072
+ Returns:
1073
+ plotly Figure: A validated Plotly figure object based on fig_dict.
1074
+ """
1075
+ if plot_style is None: #should not initialize mutable objects in arguments line, so doing here.
1076
+ plot_style = {"layout_style": "", "trace_styles_collection": ""} # Fresh dictionary per function call
1077
+
1078
+ import plotly.io as pio
1079
+ import copy
1080
+ if plot_style == {"layout_style":"", "trace_styles_collection":""}: #if the plot_style received is the default, we'll check if the fig_dict has a plot_style.
1081
+ plot_style = self.fig_dict.get("plot_style", {"layout_style":"", "trace_styles_collection":""}) #retrieve from self.fig_dict, and use default if not there.
1082
+ #This code *does not* simply modify self.fig_dict. It creates a deepcopy and then puts the final x y data back in.
1083
+ self.fig_dict = execute_implicit_data_series_operations(self.fig_dict,
1084
+ simulate_all_series=simulate_all_series,
1085
+ evaluate_all_equations=evaluate_all_equations,
1086
+ adjust_implicit_data_ranges=adjust_implicit_data_ranges)
1087
+ #Regardless of implicit data series, we make a fig_dict copy, because we will clean self.fig_dict for creating the new plotting fig object.
1088
+ original_fig_dict = copy.deepcopy(self.fig_dict)
1089
+ #before cleaning and validating, we'll apply styles.
1090
+ plot_style = parse_plot_style(plot_style=plot_style)
1091
+ self.apply_plot_style(plot_style=plot_style)
1092
+ #Now we clean out the fields and make a plotly object.
1093
+ if update_and_validate == True: #this will do some automatic 'corrections' during the validation.
1094
+ self.update_and_validate_JSONGrapher_record() #this is the line that cleans "self.fig_dict"
1095
+ self.fig_dict = clean_json_fig_dict(self.fig_dict, fields_to_update=['simulate', 'custom_units_chevrons', 'equation', 'trace_style', '3d_axes', 'bubble'])
1096
+ fig = pio.from_json(json.dumps(self.fig_dict))
1097
+ #restore the original fig_dict.
1098
+ self.fig_dict = original_fig_dict
1099
+ return fig
1100
+
1101
+ #Just a wrapper aroudn plot_with_plotly.
1102
+ def plot(self, plot_style = None, update_and_validate=True, simulate_all_series=True, evaluate_all_equations=True, adjust_implicit_data_ranges=True):
1103
+ if plot_style is None: #should not initialize mutable objects in arguments line, so doing here.
1104
+ plot_style = {"layout_style": "", "trace_styles_collection": ""} # Fresh dictionary per function call
1105
+ return self.plot_with_plotly(plot_style=plot_style, update_and_validate=update_and_validate, simulate_all_series=simulate_all_series, evaluate_all_equations=evaluate_all_equations, adjust_implicit_data_ranges=adjust_implicit_data_ranges)
1106
+
1107
+ #simulate all series will simulate any series as needed. If changing this function's arguments, also change those for self.plot()
1108
+ def plot_with_plotly(self, plot_style = None, update_and_validate=True, simulate_all_series=True, evaluate_all_equations=True, adjust_implicit_data_ranges=True):
1109
+ if plot_style is None: #should not initialize mutable objects in arguments line, so doing here.
1110
+ plot_style = {"layout_style": "", "trace_styles_collection": ""} # Fresh dictionary per function call
1111
+ fig = self.get_plotly_fig(plot_style=plot_style,
1112
+ simulate_all_series=simulate_all_series,
1113
+ update_and_validate=update_and_validate,
1114
+ evaluate_all_equations=evaluate_all_equations,
1115
+ adjust_implicit_data_ranges=adjust_implicit_data_ranges)
1116
+ fig.show()
1117
+ #No need for fig.close() for plotly figures.
1118
+
1119
+
1120
+ #simulate all series will simulate any series as needed.
1121
+ def export_to_plotly_png(self, filename, simulate_all_series = True, update_and_validate=True, timeout=10):
1122
+ fig = self.get_plotly_fig(simulate_all_series = simulate_all_series, update_and_validate=update_and_validate)
1123
+ # Save the figure to a file, but use the timeout version.
1124
+ self.export_plotly_image_with_timeout(plotly_fig = fig, filename=filename, timeout=timeout)
1125
+
1126
+ def export_plotly_image_with_timeout(self, plotly_fig, filename, timeout=10):
1127
+ # Ensure filename ends with .png
1128
+ if not filename.lower().endswith(".png"):
1129
+ filename += ".png"
1130
+ import plotly.io as pio
1131
+ pio.kaleido.scope.mathjax = None
1132
+ fig = plotly_fig
1133
+
1134
+ def export():
1135
+ try:
1136
+ fig.write_image(filename, engine="kaleido")
1137
+ except Exception as e: # This is so VS code pylint does not flag this line. pylint: disable=broad-except
1138
+ print(f"Export failed: {e}")
1139
+
1140
+ import threading
1141
+ thread = threading.Thread(target=export, daemon=True) # Daemon ensures cleanup
1142
+ thread.start()
1143
+ thread.join(timeout=timeout) # Wait up to 10 seconds
1144
+ if thread.is_alive():
1145
+ print("Skipping Plotly png export: Operation timed out. Plotly image export often does not work from Python. Consider using export_to_matplotlib_png.")
1146
+
1147
+ #update_and_validate will 'clean' for plotly.
1148
+ #In the case of creating a matplotlib figure, this really just means removing excess fields.
1149
+ #simulate all series will simulate any series as needed.
1150
+ def get_matplotlib_fig(self, plot_style = None, update_and_validate=True, simulate_all_series = True, evaluate_all_equations = True, adjust_implicit_data_ranges=True):
1151
+ """
1152
+ Generates a matplotlib figure from the stored fig_dict, performing simulations and equations as needed.
1153
+
1154
+ Args:
1155
+ simulate_all_series (bool): If True, performs simulations for applicable series.
1156
+ update_and_validate (bool): If True, applies automatic corrections to fig_dict.
1157
+ evaluate_all_equations (bool): If True, evaluates all equation-based series.
1158
+ adjust_implicit_data_ranges (bool): If True, modifies ranges for implicit data series.
1159
+
1160
+ Returns:
1161
+ plotly Figure: A validated matplotlib figure object based on fig_dict.
1162
+ """
1163
+ if plot_style is None: #should not initialize mutable objects in arguments line, so doing here.
1164
+ plot_style = {"layout_style": "", "trace_styles_collection": ""} # Fresh dictionary per function call
1165
+ import copy
1166
+ if plot_style == {"layout_style":"", "trace_styles_collection":""}: #if the plot_style received is the default, we'll check if the fig_dict has a plot_style.
1167
+ plot_style = self.fig_dict.get("plot_style", {"layout_style":"", "trace_styles_collection":""})
1168
+ #This code *does not* simply modify self.fig_dict. It creates a deepcopy and then puts the final x y data back in.
1169
+ self.fig_dict = execute_implicit_data_series_operations(self.fig_dict,
1170
+ simulate_all_series=simulate_all_series,
1171
+ evaluate_all_equations=evaluate_all_equations,
1172
+ adjust_implicit_data_ranges=adjust_implicit_data_ranges)
1173
+ #Regardless of implicit data series, we make a fig_dict copy, because we will clean self.fig_dict for creating the new plotting fig object.
1174
+ original_fig_dict = copy.deepcopy(self.fig_dict) #we will get a copy, because otherwise the original fig_dict will be forced to be overwritten.
1175
+ #before cleaning and validating, we'll apply styles.
1176
+ plot_style = parse_plot_style(plot_style=plot_style)
1177
+ self.apply_plot_style(plot_style=plot_style)
300
1178
  if update_and_validate == True: #this will do some automatic 'corrections' during the validation.
301
1179
  self.update_and_validate_JSONGrapher_record()
1180
+ self.fig_dict = clean_json_fig_dict(self.fig_dict, fields_to_update=['simulate', 'custom_units_chevrons', 'equation', 'trace_style'])
302
1181
  fig = convert_JSONGrapher_dict_to_matplotlib_fig(self.fig_dict)
1182
+ self.fig_dict = original_fig_dict #restore the original fig_dict.
303
1183
  return fig
304
1184
 
305
- def plot_with_matplotlib(self, update_and_validate=True):
1185
+ #simulate all series will simulate any series as needed.
1186
+ def plot_with_matplotlib(self, update_and_validate=True, simulate_all_series=True, evaluate_all_equations=True, adjust_implicit_data_ranges=True):
306
1187
  import matplotlib.pyplot as plt
307
- fig = self.get_matplotlib_fig(update_and_validate=update_and_validate)
1188
+ fig = self.get_matplotlib_fig(simulate_all_series=simulate_all_series,
1189
+ update_and_validate=update_and_validate,
1190
+ evaluate_all_equations=evaluate_all_equations,
1191
+ adjust_implicit_data_ranges=adjust_implicit_data_ranges)
308
1192
  plt.show()
309
1193
  plt.close(fig) #remove fig from memory.
310
1194
 
311
- def export_to_matplotlib_png(self, filename, update_and_validate=True):
1195
+ #simulate all series will simulate any series as needed.
1196
+ def export_to_matplotlib_png(self, filename, simulate_all_series = True, update_and_validate=True):
312
1197
  import matplotlib.pyplot as plt
313
1198
  # Ensure filename ends with .png
314
1199
  if not filename.lower().endswith(".png"):
315
1200
  filename += ".png"
316
- fig = self.get_matplotlib_fig(update_and_validate=update_and_validate)
1201
+ fig = self.get_matplotlib_fig(simulate_all_series = simulate_all_series, update_and_validate=update_and_validate)
317
1202
  # Save the figure to a file
318
1203
  fig.savefig(filename)
319
1204
  plt.close(fig) #remove fig from memory.
@@ -378,12 +1263,136 @@ class JSONGrapherRecord:
378
1263
  if current_field.get(current_path_key, "") == hint_text:
379
1264
  current_field[current_path_key] = ""
380
1265
 
1266
+ ## Start of section of JSONGRapher class functions related to styles ##
1267
+
1268
+ def apply_plot_style(self, plot_style= None):
1269
+ #the plot_style can be a string, or a plot_style dictionary {"layout_style":"default", "trace_styles_collection":"default"} or a list of length two with those two items.
1270
+ #The plot_style dictionary can include a pair of dictionaries.
1271
+ #if apply style is called directly, we will first put the plot_style into the plot_style field
1272
+ #This way, the style will stay.
1273
+ if plot_style is None: #should not initialize mutable objects in arguments line, so doing here.
1274
+ plot_style = {"layout_style": "", "trace_styles_collection": ""} # Fresh dictionary per function call
1275
+ self.fig_dict['plot_style'] = plot_style
1276
+ self.fig_dict = apply_plot_style_to_plotly_dict(self.fig_dict, plot_style=plot_style)
1277
+ def remove_plot_style(self):
1278
+ self.fig_dict.pop("plot_style") #This line removes the field of plot_style from the fig_dict.
1279
+ self.fig_dict = remove_plot_style_from_plotly_dict(self.fig_dict) #This line removes the actual formatting from the fig_dict.
1280
+ def set_layout_style(self, layout_style):
1281
+ if "plot_style" not in self.fig_dict: #create it not present.
1282
+ self.fig_dict["plot_style"] = {} # Initialize if missing
1283
+ self.fig_dict["plot_style"]["layout_style"] = layout_style
1284
+ def remove_layout_style_setting(self):
1285
+ if "layout_style" in self.fig_dict["plot_style"]:
1286
+ self.fig_dict["plot_style"].pop("layout_style")
1287
+ def extract_layout_style(self):
1288
+ layout_style = extract_layout_style_from_plotly_dict(self.fig_dict)
1289
+ return layout_style
1290
+ def apply_trace_style_by_index(self, data_series_index, trace_styles_collection='', trace_style=''):
1291
+ if trace_styles_collection == '':
1292
+ self.fig_dict.setdefault("plot_style",{}) #create the plot_style dictionary if it's not there. Else, return current value.
1293
+ trace_styles_collection = self.fig_dict["plot_style"].get("trace_styles_collection", '') #check if there is a trace_styles_collection within it, and use that. If it's not there, then use ''.
1294
+ #trace_style should be a dictionary, but can be a string.
1295
+ data_series = self.fig_dict["data"][data_series_index]
1296
+ data_series = apply_trace_style_to_single_data_series(data_series, trace_styles_collection=trace_styles_collection, trace_style_to_apply=trace_style) #this is the 'external' function, not the one in the class.
1297
+ self.fig_dict["data"][data_series_index] = data_series
1298
+ return data_series
1299
+ def set_trace_style_one_data_series(self, data_series_index, trace_style):
1300
+ self.fig_dict['data'][data_series_index]["trace_style"] = trace_style
1301
+ return self.fig_dict['data'][data_series_index]
1302
+ def set_trace_styles_collection(self, trace_styles_collection):
1303
+ """
1304
+ Sets the plot_style["trace_styles_collection"] field for the all data series.
1305
+ options are: scatter, spline, scatter_spline
1306
+ """
1307
+ self.fig_dict["plot_style"]["trace_styles_collection"] = trace_styles_collection
1308
+ def remove_trace_styles_collection_setting(self):
1309
+ if "trace_styles_collection" in self.fig_dict["plot_style"]:
1310
+ self.fig_dict["plot_style"].pop("trace_styles_collection")
1311
+ def set_trace_style_all_series(self, trace_style):
1312
+ """
1313
+ Sets the trace_style field for the all data series.
1314
+ options are: scatter, spline, scatter_spline
1315
+ """
1316
+ for data_series_index in range(len(self.fig_dict['data'])): #works with array indexing.
1317
+ self.set_trace_style_one_data_series(data_series_index, trace_style)
1318
+ def extract_trace_styles_collection(self, new_trace_styles_collection_name='',
1319
+ indices_of_data_series_to_extract_styles_from=None,
1320
+ new_trace_style_names_list=None, extract_colors=False):
1321
+ """
1322
+ Extracts trace style collection
1323
+ :param new_trace_styles_collection_name: str, Name of the new collection.
1324
+ :param indices_of_data_series_to_extract_styles_from: list, Indices of series to extract styles from.
1325
+ :param new_trace_style_names_list: list, Names for the new trace styles.
1326
+ """
1327
+ if indices_of_data_series_to_extract_styles_from is None: # should not initialize mutable objects in arguments line, so doing here.
1328
+ indices_of_data_series_to_extract_styles_from = []
1329
+ if new_trace_style_names_list is None: # should not initialize mutable objects in arguments line, so doing here.
1330
+ new_trace_style_names_list = []
1331
+ fig_dict = self.fig_dict
1332
+ new_trace_styles_collection_dictionary_without_name = {}
1333
+ if new_trace_styles_collection_name == '':
1334
+ new_trace_styles_collection_name = 'replace_this_with_your_trace_styles_collection_name'
1335
+ if indices_of_data_series_to_extract_styles_from == []:
1336
+ indices_of_data_series_to_extract_styles_from = range(len(fig_dict["data"]))
1337
+ if new_trace_style_names_list == []:
1338
+ for data_series_index in indices_of_data_series_to_extract_styles_from:
1339
+ data_series_dict = fig_dict["data"][data_series_index]
1340
+ trace_style_name = data_series_dict.get('trace_style', '') # return blank line if not there.
1341
+ if trace_style_name == '':
1342
+ trace_style_name = 'custom_trace_style' + str(data_series_index)
1343
+ if trace_style_name not in new_trace_style_names_list:
1344
+ pass
1345
+ else:
1346
+ trace_style_name = trace_style_name + str(data_series_index)
1347
+ new_trace_style_names_list.append(trace_style_name)
1348
+ if len(indices_of_data_series_to_extract_styles_from) != len(new_trace_style_names_list):
1349
+ raise ValueError("Error: The input for indices_of_data_series_to_extract_styles_from is not compatible with the input for new_trace_style_names_list. There is a difference in lengths after the automatic parsing and filling that occurs.")
1350
+ for index_to_extract_from in indices_of_data_series_to_extract_styles_from:
1351
+ new_trace_style_name = new_trace_style_names_list[index_to_extract_from]
1352
+ extracted_trace_style = extract_trace_style_by_index(fig_dict, index_to_extract_from, new_trace_style_name=new_trace_style_names_list[index_to_extract_from], extract_colors=extract_colors)
1353
+ new_trace_styles_collection_dictionary_without_name[new_trace_style_name] = extracted_trace_style[new_trace_style_name]
1354
+ return new_trace_styles_collection_name, new_trace_styles_collection_dictionary_without_name
1355
+ def export_trace_styles_collection(self, new_trace_styles_collection_name='',
1356
+ indices_of_data_series_to_extract_styles_from=None,
1357
+ new_trace_style_names_list=None, filename='', extract_colors=False):
1358
+ """
1359
+ Exports trace style collection while ensuring proper handling of mutable default arguments.
1360
+
1361
+ :param new_trace_styles_collection_name: str, Name of the new collection.
1362
+ :param indices_of_data_series_to_extract_styles_from: list, Indices of series to extract styles from.
1363
+ :param new_trace_style_names_list: list, Names for the new trace styles.
1364
+ :param filename: str, Name of the file to export to.
1365
+ """
1366
+ if indices_of_data_series_to_extract_styles_from is None: # should not initialize mutable objects in arguments line, so doing here.
1367
+ indices_of_data_series_to_extract_styles_from = []
1368
+ if new_trace_style_names_list is None: # should not initialize mutable objects in arguments line, so doing here.
1369
+ new_trace_style_names_list = []
1370
+ auto_new_trace_styles_collection_name, new_trace_styles_collection_dictionary_without_name = self.extract_trace_styles_collection(new_trace_styles_collection_name=new_trace_styles_collection_name, indices_of_data_series_to_extract_styles_from=indices_of_data_series_to_extract_styles_from, new_trace_style_names_list = new_trace_style_names_list, extract_colors=extract_colors)
1371
+ if new_trace_styles_collection_name == '':
1372
+ new_trace_styles_collection_name = auto_new_trace_styles_collection_name
1373
+ if filename == '':
1374
+ filename = new_trace_styles_collection_name
1375
+ write_trace_styles_collection_to_file(trace_styles_collection=new_trace_styles_collection_dictionary_without_name, trace_styles_collection_name=new_trace_styles_collection_name, filename=filename)
1376
+ return new_trace_styles_collection_name, new_trace_styles_collection_dictionary_without_name
1377
+ def extract_trace_style_by_index(self, data_series_index, new_trace_style_name='', extract_colors=False):
1378
+ extracted_trace_style = extract_trace_style_by_index(self.fig_dict, data_series_index, new_trace_style_name=new_trace_style_name, extract_colors=extract_colors)
1379
+ return extracted_trace_style
1380
+ def export_trace_style_by_index(self, data_series_index, new_trace_style_name='', filename='', extract_colors=False):
1381
+ extracted_trace_style = extract_trace_style_by_index(self.fig_dict, data_series_index, new_trace_style_name=new_trace_style_name, extract_colors=extract_colors)
1382
+ new_trace_style_name = list(extracted_trace_style.keys())[0] #the extracted_trace_style will have a single key which is the style name.
1383
+ if filename == '':
1384
+ filename = new_trace_style_name
1385
+ write_trace_style_to_file(trace_style_dict=extracted_trace_style[new_trace_style_name],trace_style_name=new_trace_style_name, filename=filename)
1386
+ return extracted_trace_style
1387
+ ## End of section of JSONGRapher class functions related to styles ##
1388
+
381
1389
  #Make some pointers to external functions, for convenience, so people can use syntax like record.function_name() if desired.
382
1390
  def validate_JSONGrapher_record(self):
383
1391
  validate_JSONGrapher_record(self)
384
1392
  def update_and_validate_JSONGrapher_record(self):
385
1393
  update_and_validate_JSONGrapher_record(self)
386
1394
 
1395
+
387
1396
  # helper function to validate x axis and y axis labels.
388
1397
  # label string will be the full label including units. Axis_name is typically "x" or "y"
389
1398
  def validate_JSONGrapher_axis_label(label_string, axis_name="", remove_plural_units=True):
@@ -399,7 +1408,6 @@ def validate_JSONGrapher_axis_label(label_string, axis_name="", remove_plural_un
399
1408
  None: Prints warnings if any validation issues are found.
400
1409
  """
401
1410
  warnings_list = []
402
-
403
1411
  #First check if the label is empty.
404
1412
  if label_string == '':
405
1413
  warnings_list.append(f"Your {axis_name} axis label is an empty string. JSONGrapher records should not have empty strings for axis labels.")
@@ -442,15 +1450,18 @@ def units_plural_removal(units_to_check):
442
1450
  - "changed" (Boolean): True, or False, where True means the string was changed to remove an "s" at the end.
443
1451
  - "singularized" (string): The units parsed to be singular, if needed.
444
1452
  """
445
- #first check if we have the module we need. If not, return with no change.
446
-
1453
+ # Check if we have the module we need. If not, return with no change.
447
1454
  try:
448
1455
  import JSONGrapher.units_list as units_list
449
- except:
450
- units_changed_flag = False
451
- return units_changed_flag, units_to_check #return None if there was no test.
452
- #First try to check if units are blank or ends with "s" is in the units list.
1456
+ except ImportError:
1457
+ try:
1458
+ from . import units_list # Attempt local import
1459
+ except ImportError as exc: # If still not present, give up and avoid crashing
1460
+ units_changed_flag = False
1461
+ print(f"Module import failed: {exc}") # Log the error for debugging
1462
+ return units_changed_flag, units_to_check # Return unchanged values
453
1463
 
1464
+ #First try to check if units are blank or ends with "s" is in the units list.
454
1465
  if (units_to_check == "") or (units_to_check[-1] != "s"):
455
1466
  units_changed_flag = False
456
1467
  units_singularized = units_to_check #return if string is blank or does not end with s.
@@ -466,40 +1477,51 @@ def units_plural_removal(units_to_check):
466
1477
  else: #No change if the truncated string isn't found.
467
1478
  units_changed_flag = False
468
1479
  units_singularized = units_to_check
1480
+ else:
1481
+ units_changed_flag = False
1482
+ units_singularized = units_to_check #if it's outside of ourknown logic, we just return unchanged.
469
1483
  return units_changed_flag, units_singularized
470
1484
 
471
1485
 
472
1486
  def separate_label_text_from_units(label_with_units):
473
- """
474
- Parses a label with text string and units in parentheses after that to return the two parts.
1487
+ # Check for mismatched parentheses
1488
+ open_parentheses = label_with_units.count('(')
1489
+ close_parentheses = label_with_units.count(')')
1490
+
1491
+ if open_parentheses != close_parentheses:
1492
+ raise ValueError(f"Mismatched parentheses in input string: '{label_with_units}'")
475
1493
 
476
- Args:
477
- value (str): A string containing a label and optional units enclosed in parentheses.
478
- Example: "Time (Years)" or "Speed (km/s)
1494
+ # Default parsed output
1495
+ parsed_output = {"text": label_with_units, "units": ""}
479
1496
 
480
- Returns:
481
- dict: A dictionary with two keys:
482
- - "text" (str): The label text parsed from the input string.
483
- - "units" (str): The units parsed from the input string, or an empty string if no units are present.
484
- """
485
- # Find the position of the first '(' and the last ')'
1497
+ # Extract tentative start and end indices, from first open and first close parentheses.
486
1498
  start = label_with_units.find('(')
487
1499
  end = label_with_units.rfind(')')
488
-
489
- # Ensure both are found and properly ordered
490
- if start != -1 and end != -1 and end > start:
491
- text_part = label_with_units[:start].strip() # Everything before '('
492
- units_part = label_with_units[start + 1:end].strip() # Everything inside '()'
1500
+
1501
+ # Flag to track if the second check fails
1502
+ second_check_failed = False
1503
+
1504
+ # Ensure removing both first '(' and last ')' doesn't cause misalignment
1505
+ if start != -1 and end != -1:
1506
+ temp_string = label_with_units[:start] + label_with_units[start + 1:end] + label_with_units[end + 1:] # Removing first '(' and last ')'
1507
+ first_closing_paren_after_removal = temp_string.find(')')
1508
+ first_opening_paren_after_removal = temp_string.find('(')
1509
+ if first_opening_paren_after_removal != -1 and first_closing_paren_after_removal < first_opening_paren_after_removal:
1510
+ second_check_failed = True # Set flag if second check fails
1511
+
1512
+ if second_check_failed:
1513
+ #For the units, keep everything from the first '(' onward
1514
+ parsed_output["text"] = label_with_units[:start].strip()
1515
+ parsed_output["units"] = label_with_units[start:].strip()
493
1516
  else:
494
- text_part = label_with_units
495
- units_part = ""
496
- parsed_output = {
497
- "text":text_part,
498
- "units":units_part
499
- }
1517
+ # Extract everything between first '(' and last ')'
1518
+ parsed_output["text"] = label_with_units[:start].strip()
1519
+ parsed_output["units"] = label_with_units[start + 1:end].strip()
1520
+
500
1521
  return parsed_output
501
1522
 
502
1523
 
1524
+
503
1525
  def validate_plotly_data_list(data):
504
1526
  """
505
1527
  Validates the entries in a Plotly data array.
@@ -531,31 +1553,32 @@ def validate_plotly_data_list(data):
531
1553
  if not isinstance(trace, dict):
532
1554
  warnings_list.append(f"Trace {i} is not a dictionary.")
533
1555
  continue
534
-
1556
+ if "comments" in trace:
1557
+ warnings_list.append(f"Trace {i} has a comments field within the data. This is allowed by JSONGrapher, but is discouraged by plotly. By default, this will be removed when you export your record.")
535
1558
  # Determine the type based on the fields provided
536
- trace_type = trace.get("type")
537
- if not trace_type:
1559
+ trace_style = trace.get("type")
1560
+ if not trace_style:
538
1561
  # Infer type based on fields and attributes
539
1562
  if "x" in trace and "y" in trace:
540
1563
  if "mode" in trace or "marker" in trace or "line" in trace:
541
- trace_type = "scatter"
1564
+ trace_style = "scatter"
542
1565
  elif "text" in trace or "marker.color" in trace:
543
- trace_type = "bar"
1566
+ trace_style = "bar"
544
1567
  else:
545
- trace_type = "scatter" # Default assumption
1568
+ trace_style = "scatter" # Default assumption
546
1569
  elif "labels" in trace and "values" in trace:
547
- trace_type = "pie"
1570
+ trace_style = "pie"
548
1571
  elif "z" in trace:
549
- trace_type = "heatmap"
1572
+ trace_style = "heatmap"
550
1573
  else:
551
1574
  warnings_list.append(f"Trace {i} cannot be inferred as a valid type.")
552
1575
  continue
553
1576
 
554
1577
  # Check for required fields
555
- required_fields = required_fields_by_type.get(trace_type, [])
1578
+ required_fields = required_fields_by_type.get(trace_style, [])
556
1579
  for field in required_fields:
557
1580
  if field not in trace:
558
- warnings_list.append(f"Trace {i} (type inferred as {trace_type}) is missing required field: {field}.")
1581
+ warnings_list.append(f"Trace {i} (type inferred as {trace_style}) is missing required field: {field}.")
559
1582
 
560
1583
  if warnings_list:
561
1584
  print("Warning: There are some entries in your data list that did not pass validation checks: \n", warnings_list)
@@ -567,6 +1590,7 @@ def parse_units(value):
567
1590
  """
568
1591
  Parses a numerical value and its associated units from a string. This meant for scientific constants and parameters
569
1592
  Such as rate constants, gravitational constant, or simiilar.
1593
+ This function is not meant for separating the axis label from its units. For that, use separate_label_text_from_units
570
1594
 
571
1595
  Args:
572
1596
  value (str): A string containing a numeric value and optional units enclosed in parentheses.
@@ -580,7 +1604,6 @@ def parse_units(value):
580
1604
  # Find the position of the first '(' and the last ')'
581
1605
  start = value.find('(')
582
1606
  end = value.rfind(')')
583
-
584
1607
  # Ensure both are found and properly ordered
585
1608
  if start != -1 and end != -1 and end > start:
586
1609
  number_part = value[:start].strip() # Everything before '('
@@ -597,43 +1620,13 @@ def parse_units(value):
597
1620
 
598
1621
  return parsed_output
599
1622
 
600
- def plot_type_to_field_values(plot_type):
601
- """
602
- Takes in a string that is a plot type, such as "scatter", "scatter_spline", etc.
603
- and returns the field values that would have to go into a plotly data object.
604
-
605
- Returns:
606
- dict: A dictionary with keys and values for the fields that will be ultimately filled.
607
-
608
- To these fields are used in the function set_plot_type_one_data_series
609
-
610
- """
611
-
612
- fields_dict = {}
613
- #initialize some variables.
614
- fields_dict["type_field"] = plot_type
615
- fields_dict["mode_field"] = None
616
- fields_dict["line_shape_field"] = None
617
- # Assign the various types. This list of values was determined 'manually'.
618
- if plot_type == "scatter":
619
- fields_dict["type_field"] = "scatter"
620
- fields_dict["mode_field"] = "markers"
621
- fields_dict["line_shape_field"] = None
622
- elif plot_type == "scatter_spline":
623
- fields_dict["type_field"] = "scatter"
624
- fields_dict["mode_field"] = None
625
- fields_dict["line_shape_field"] = "spline"
626
- elif plot_type == "spline":
627
- fields_dict["type_field"] = None
628
- fields_dict["mode_field"] = 'lines'
629
- fields_dict["line_shape_field"] = "spline"
630
- return fields_dict
631
-
632
1623
  #This function does updating of internal things before validating
633
1624
  #This is used before printing and returning the JSON record.
634
- def update_and_validate_JSONGrapher_record(record):
635
- record.update_plot_types()
1625
+ def update_and_validate_JSONGrapher_record(record, clean_for_plotly=True):
636
1626
  record.validate_JSONGrapher_record()
1627
+ if clean_for_plotly == True:
1628
+ record.fig_dict = clean_json_fig_dict(record.fig_dict)
1629
+ return record
637
1630
 
638
1631
  #TODO: add the ability for this function to check against the schema.
639
1632
  def validate_JSONGrapher_record(record):
@@ -684,8 +1677,11 @@ def validate_JSONGrapher_record(record):
684
1677
  # Validate "title"
685
1678
  if "title" not in layout:
686
1679
  warnings_list.append("Missing 'layout.title' field.")
687
- elif not isinstance(layout["title"], str):
688
- warnings_list.append("'layout.title' should be a string.")
1680
+ # Validate "title.text"
1681
+ elif "text" not in layout["title"]:
1682
+ warnings_list.append("Missing 'layout.title.text' field.")
1683
+ elif not isinstance(layout["title"]["text"], str):
1684
+ warnings_list.append("'layout.title.text' should be a string.")
689
1685
 
690
1686
  # Validate "xaxis"
691
1687
  if "xaxis" not in layout:
@@ -696,8 +1692,10 @@ def validate_JSONGrapher_record(record):
696
1692
  # Validate "xaxis.title"
697
1693
  if "title" not in layout["xaxis"]:
698
1694
  warnings_list.append("Missing 'layout.xaxis.title' field.")
699
- elif not isinstance(layout["xaxis"]["title"], str):
700
- warnings_list.append("'layout.xaxis.title' should be a string.")
1695
+ elif "text" not in layout["xaxis"]["title"]:
1696
+ warnings_list.append("Missing 'layout.xaxis.title.text' field.")
1697
+ elif not isinstance(layout["xaxis"]["title"]["text"], str):
1698
+ warnings_list.append("'layout.xaxis.title.text' should be a string.")
701
1699
 
702
1700
  # Validate "yaxis"
703
1701
  if "yaxis" not in layout:
@@ -708,8 +1706,10 @@ def validate_JSONGrapher_record(record):
708
1706
  # Validate "yaxis.title"
709
1707
  if "title" not in layout["yaxis"]:
710
1708
  warnings_list.append("Missing 'layout.yaxis.title' field.")
711
- elif not isinstance(layout["yaxis"]["title"], str):
712
- warnings_list.append("'layout.yaxis.title' should be a string.")
1709
+ elif "text" not in layout["yaxis"]["title"]:
1710
+ warnings_list.append("Missing 'layout.yaxis.title.text' field.")
1711
+ elif not isinstance(layout["yaxis"]["title"]["text"], str):
1712
+ warnings_list.append("'layout.yaxis.title.text' should be a string.")
713
1713
 
714
1714
  # Return validation result
715
1715
  if warnings_list:
@@ -718,41 +1718,150 @@ def validate_JSONGrapher_record(record):
718
1718
  else:
719
1719
  return True, []
720
1720
 
721
- def rolling_polynomial_fit(x_values, y_values, window_size=3, degree=2):
1721
+ def rolling_polynomial_fit(x_values, y_values, window_size=3, degree=2, num_interpolated_points=0, adjust_edges=True):
722
1722
  """
723
- Applies a rolling polynomial regression with a specified window size and degree.
1723
+ Applies a rolling polynomial regression with a specified window size and degree,
1724
+ interpolates additional points, and optionally adjusts edge points for smoother transitions.
724
1725
 
725
1726
  Args:
726
1727
  x_values (list): List of x coordinates.
727
1728
  y_values (list): List of y coordinates.
728
1729
  window_size (int): Number of points per rolling fit (default: 3).
729
1730
  degree (int): Degree of polynomial to fit (default: 2).
1731
+ num_interpolated_points (int): Number of interpolated points per segment (default: 3). Set to 0 to only return original points.
1732
+ adjust_edges (bool): Whether to adjust edge cases based on window size (default: True).
730
1733
 
731
1734
  Returns:
732
1735
  tuple: (smoothed_x, smoothed_y) lists for plotting.
733
1736
  """
734
1737
  import numpy as np
1738
+
735
1739
  smoothed_y = []
736
- smoothed_x = x_values # Keep x values unchanged
1740
+ smoothed_x = []
737
1741
 
738
1742
  half_window = window_size // 2 # Number of points to take before & after
739
1743
 
740
- for i in range(len(y_values)):
741
- # Handle edge cases: First and last points have fewer neighbors
1744
+ for i in range(len(y_values) - 1):
1745
+ # Handle edge cases dynamically based on window size
742
1746
  left_bound = max(0, i - half_window)
743
1747
  right_bound = min(len(y_values), i + half_window + 1)
744
1748
 
1749
+ if adjust_edges:
1750
+ if i == 0: # First point
1751
+ right_bound = min(len(y_values), i + window_size) # Expand to use more points near start
1752
+ elif i == len(y_values) - 2: # Last point
1753
+ left_bound = max(0, i - (window_size - 1)) # Expand to include more points near end
1754
+
745
1755
  # Select the windowed data
746
1756
  x_window = np.array(x_values[left_bound:right_bound])
747
1757
  y_window = np.array(y_values[left_bound:right_bound])
748
1758
 
1759
+ # Adjust degree based on window size
1760
+ adjusted_degree = degree if len(x_window) > 2 else 1 # Use linear fit if only two points are available
1761
+
749
1762
  # Fit polynomial & evaluate at current point
750
- poly_coeffs = np.polyfit(x_window, y_window, deg=degree)
751
- smoothed_y.append(np.polyval(poly_coeffs, x_values[i]))
1763
+ poly_coeffs = np.polyfit(x_window, y_window, deg=adjusted_degree)
1764
+
1765
+ # Generate interpolated points between x_values[i] and x_values[i+1]
1766
+ x_interp = np.linspace(x_values[i], x_values[i+1], num_interpolated_points + 2) # Including endpoints
1767
+ y_interp = np.polyval(poly_coeffs, x_interp)
1768
+
1769
+ smoothed_x.extend(x_interp)
1770
+ smoothed_y.extend(y_interp)
752
1771
 
753
1772
  return smoothed_x, smoothed_y
754
1773
 
755
1774
 
1775
+
1776
+ ## Start of Section of Code for Styles and Converting between plotly and matplotlib Fig objectss ##
1777
+ # #There are a few things to know about the styles logic of JSONGrapher:
1778
+ # (1) There are actually two parts to the plot_style: a layout_style for the graph and a trace_styles_collection which will get applied to the individual dataseries.
1779
+ # So the plot_style is really supposed to be a dictionary with {"layout_style":"default", "trace_styles_collection":"default"} that way it is JSON compatible and avoids ambiguity.
1780
+ # A person can pass in dictionaries for layout_style and for trace_styles_collection and thereby create custom styles.
1781
+ # There are helper functions to extract style dictionaries once a person has a JSONGrapher record which they're happy with.
1782
+ # (2) We parse what the person provides as a style, so we accept things other than the ideal plot_style dictionary format.
1783
+ # If someone provides a single string, we'll use it for both layout_style and trace_styles_collection.
1784
+ # If we get a list of two, we'll expect that to be in the order of layout_style then trace_styles_collection
1785
+ # If we get a string that we can't find in the existing styles list, then we'll use the default.
1786
+ # (1) by default, exporting a JSONGRapher record to file will *not* include plot_styles. include_formatting will be an optional argument.
1787
+ # (2) There is an apply_plot_style function which will first put the style into self.fig_dict['plot_style'] so it stays there, before applying the style.
1788
+ # (3) For the plotting functions, they will have plot_style = {"layout_style":"", "trace_styles_collection":""} or = '' as their default argument value, which will result in checking if plot_style exists in the self.fig_dict already. If so, it will be used.
1789
+ # If somebody passes in a "None" type or the word none, then *no* style changes will be applied during plotting, relative to what the record already has.
1790
+ # One can pass a style in for the plotting functions. In those cases, we'll use the remove style option, then apply.
1791
+
1792
+ def parse_plot_style(plot_style):
1793
+ """
1794
+ Parse the given plot style and return a structured dictionary for layout and data series styles.
1795
+ If plot_style is missing a layout_style or trace_styles_collection then will set them as an empty string.
1796
+
1797
+ :param plot_style: None, str, list of two items, or a dictionary with at least one valid field.
1798
+ :return: dict with "layout_style" and "trace_styles_collection", ensuring defaults if missing.
1799
+ """
1800
+ if plot_style is None:
1801
+ parsed_plot_style = {"layout_style": None, "trace_styles_collection": None}
1802
+ elif isinstance(plot_style, str):
1803
+ parsed_plot_style = {"layout_style": plot_style, "trace_styles_collection": plot_style}
1804
+ elif isinstance(plot_style, list) and len(plot_style) == 2:
1805
+ parsed_plot_style = {"layout_style": plot_style[0], "trace_styles_collection": plot_style[1]}
1806
+ elif isinstance(plot_style, dict):
1807
+ if "trace_styles_collection" not in plot_style:
1808
+ if "trace_style_collection" in plot_style:
1809
+ print("Warning: plot_style has 'trace_style_collection', this key should be 'trace_styles_collection'. The key is being used, but the spelling error should be fixed.")
1810
+ plot_style["traces_styles_collection"] = plot_style["trace_style_collection"]
1811
+ elif "traces_style_collection" in plot_style:
1812
+ print("Warning: plot_style has 'traces_style_collection', this key should be 'trace_styles_collection'. The key is being used, but the spelling error should be fixed.")
1813
+ plot_style["traces_styles_collection"] = plot_style["traces_style_collection"]
1814
+ else:
1815
+ plot_style.setdefault("trace_styles_collection", '')
1816
+ if "layout_style" not in plot_style:
1817
+ plot_style.setdefault("layout_style", '')
1818
+ parsed_plot_style = {
1819
+ "layout_style": plot_style.get("layout_style", None),
1820
+ "trace_styles_collection": plot_style.get("trace_styles_collection", None),
1821
+ }
1822
+ else:
1823
+ raise ValueError("Invalid plot style: Must be None, a string, a list of two items, or a dictionary with valid fields.")
1824
+ return parsed_plot_style
1825
+
1826
+ #this function uses a stylename or list of stylename/dictionaries to apply *both* layout_style and trace_styles_collection
1827
+ #plot_style is a dictionary of form {"layout_style":"default", "trace_styles_collection":"default"}
1828
+ #However, the style_to_apply does not need to be passed in as a dictionary.
1829
+ #For example: style_to_apply = ['default', 'default'] or style_to_apply = 'science'.
1830
+ #IMPORTANT: This is the only function that will set a layout_style or trace_styles_collection that is an empty string into 'default'.
1831
+ # all other style applying functions (including parse_plot_style) will pass on the empty string or will do nothing if receiving an empty string.
1832
+ def apply_plot_style_to_plotly_dict(fig_dict, plot_style=None):
1833
+ if plot_style is None: # should not initialize mutable objects in arguments line, so doing here.
1834
+ plot_style = {"layout_style": {}, "trace_styles_collection": {}} # Fresh dictionary per function call
1835
+ #We first parse style_to_apply to get a properly formatted plot_style dictionary of form: {"layout_style":"default", "trace_styles_collection":"default"}
1836
+ plot_style = parse_plot_style(plot_style)
1837
+ plot_style.setdefault("layout_style",'') #fill with blank string if not present.
1838
+ plot_style.setdefault("trace_styles_collection",'') #fill with blank string if not present.
1839
+ #Code logic for layout style.
1840
+ if str(plot_style["layout_style"]).lower() != 'none': #take no action if received "None" or NoneType
1841
+ if plot_style["layout_style"] == '': #in this case, we're going to use the default.
1842
+ plot_style["layout_style"] = 'default'
1843
+ fig_dict = remove_layout_style_from_plotly_dict(fig_dict=fig_dict)
1844
+ fig_dict = apply_layout_style_to_plotly_dict(fig_dict=fig_dict, layout_style_to_apply=plot_style["layout_style"])
1845
+ #Code logic for trace_styles_collection style.
1846
+ if str(plot_style["trace_styles_collection"]).lower() != 'none': #take no action if received "None" or NoneType
1847
+ if plot_style["trace_styles_collection"] == '': #in this case, we're going to use the default.
1848
+ plot_style["trace_styles_collection"] = 'default'
1849
+ fig_dict = remove_trace_styles_collection_from_plotly_dict(fig_dict=fig_dict)
1850
+ fig_dict = apply_trace_styles_collection_to_plotly_dict(fig_dict=fig_dict,trace_styles_collection=plot_style["trace_styles_collection"])
1851
+ return fig_dict
1852
+
1853
+ def remove_plot_style_from_plotly_dict(fig_dict):
1854
+ """
1855
+ Remove both layout and data series styles from a Plotly figure dictionary.
1856
+
1857
+ :param fig_dict: dict, Plotly style fig_dict
1858
+ :return: dict, Updated Plotly style fig_dict with default formatting.
1859
+ """
1860
+ fig_dict = remove_layout_style_from_plotly_dict(fig_dict)
1861
+ fig_dict = remove_trace_styles_collection_from_plotly_dict(fig_dict)
1862
+ return fig_dict
1863
+
1864
+
756
1865
  def convert_JSONGrapher_dict_to_matplotlib_fig(fig_dict):
757
1866
  """
758
1867
  Converts a Plotly figure dictionary into a Matplotlib figure without using pio.from_json.
@@ -767,30 +1876,40 @@ def convert_JSONGrapher_dict_to_matplotlib_fig(fig_dict):
767
1876
  fig, ax = plt.subplots()
768
1877
 
769
1878
  # Extract traces (data series)
1879
+ #This section is now deprecated. It has not been completely updated after the trace_style field was created.
1880
+ #There was old logic for plotly_trace_type which has been partially updated, but in fact the logic should be rewritten
1881
+ #to better accommodate the existence of both "trace_style" and "type". It may be that there should be
1882
+ #a helper function called
770
1883
  for trace in fig_dict.get("data", []):
771
- trace_type = trace.get("type", None)
1884
+ trace_style = trace.get("trace_style", '')
1885
+ plotly_trace_types = trace.get("type", '')
1886
+ if (plotly_trace_types == '') and (trace_style == ''):
1887
+ trace_style = 'scatter_spline'
1888
+ elif (plotly_trace_types == 'scatter') and (trace_style == ''):
1889
+ trace_style = 'scatter_spline'
1890
+ elif (trace_style == '') and (plotly_trace_types != ''):
1891
+ trace_style = plotly_trace_types
772
1892
  # If type is missing, but mode indicates lines and shape is spline, assume it's a spline
773
- if not trace_type and trace.get("mode") == "lines" and trace.get("line", {}).get("shape") == "spline":
774
- trace_type = "spline"
775
-
1893
+ if not trace_style and trace.get("mode") == "lines" and trace.get("line", {}).get("shape") == "spline":
1894
+ trace_style = "spline"
776
1895
  x_values = trace.get("x", [])
777
1896
  y_values = trace.get("y", [])
778
1897
  trace_name = trace.get("name", "Data")
779
- if trace_type == "bar":
1898
+ if trace_style == "bar":
780
1899
  ax.bar(x_values, y_values, label=trace_name)
781
-
782
- elif trace_type == "scatter":
1900
+ elif trace_style == "scatter":
1901
+ mode = trace.get("mode", "")
1902
+ ax.scatter(x_values, y_values, label=trace_name, alpha=0.7)
1903
+ elif trace_style == "scatter_spline":
783
1904
  mode = trace.get("mode", "")
784
1905
  ax.scatter(x_values, y_values, label=trace_name, alpha=0.7)
785
-
786
1906
  # Attempt to simulate spline behavior if requested
787
1907
  if "lines" in mode or trace.get("line", {}).get("shape") == "spline":
788
1908
  print("Warning: Rolling polynomial approximation used instead of spline.")
789
1909
  x_smooth, y_smooth = rolling_polynomial_fit(x_values, y_values, window_size=3, degree=2)
790
-
791
1910
  # Add a label explicitly for the legend
792
1911
  ax.plot(x_smooth, y_smooth, linestyle="-", label=f"{trace_name} Spline")
793
- elif trace_type == "spline":
1912
+ elif trace_style == "spline":
794
1913
  print("Warning: Using rolling polynomial approximation instead of true spline.")
795
1914
  x_smooth, y_smooth = rolling_polynomial_fit(x_values, y_values, window_size=3, degree=2)
796
1915
  ax.plot(x_smooth, y_smooth, linestyle="-", label=f"{trace_name} Spline")
@@ -798,14 +1917,14 @@ def convert_JSONGrapher_dict_to_matplotlib_fig(fig_dict):
798
1917
  # Extract layout details
799
1918
  layout = fig_dict.get("layout", {})
800
1919
  title = layout.get("title", {})
801
- if isinstance(title, dict):
1920
+ if isinstance(title, dict): #This if statements block is rather not human readable. Perhaps should be changed later.
802
1921
  ax.set_title(title.get("text", "Converted Plotly Figure"))
803
1922
  else:
804
1923
  ax.set_title(title if isinstance(title, str) else "Converted Plotly Figure")
805
1924
 
806
1925
  xaxis = layout.get("xaxis", {})
807
1926
  xlabel = "X-Axis" # Default label
808
- if isinstance(xaxis, dict):
1927
+ if isinstance(xaxis, dict): #This if statements block is rather not human readable. Perhaps should be changed later.
809
1928
  title_obj = xaxis.get("title", {})
810
1929
  xlabel = title_obj.get("text", "X-Axis") if isinstance(title_obj, dict) else title_obj
811
1930
  elif isinstance(xaxis, str):
@@ -813,7 +1932,7 @@ def convert_JSONGrapher_dict_to_matplotlib_fig(fig_dict):
813
1932
  ax.set_xlabel(xlabel)
814
1933
  yaxis = layout.get("yaxis", {})
815
1934
  ylabel = "Y-Axis" # Default label
816
- if isinstance(yaxis, dict):
1935
+ if isinstance(yaxis, dict): #This if statements block is rather not human readable. Perhaps should be changed later.
817
1936
  title_obj = yaxis.get("title", {})
818
1937
  ylabel = title_obj.get("text", "Y-Axis") if isinstance(title_obj, dict) else title_obj
819
1938
  elif isinstance(yaxis, str):
@@ -821,6 +1940,7 @@ def convert_JSONGrapher_dict_to_matplotlib_fig(fig_dict):
821
1940
  ax.set_ylabel(ylabel)
822
1941
  ax.legend()
823
1942
  return fig
1943
+
824
1944
 
825
1945
  #The below function works, but because it depends on the python plotly package, we avoid using it
826
1946
  #To decrease the number of dependencies.
@@ -839,7 +1959,7 @@ def convert_plotly_dict_to_matplotlib(fig_dict):
839
1959
  matplotlib.figure.Figure: The corresponding Matplotlib figure.
840
1960
  """
841
1961
  import plotly.io as pio
842
-
1962
+ import matplotlib.pyplot as plt
843
1963
  # Convert JSON dictionary into a Plotly figure
844
1964
  plotly_fig = pio.from_json(json.dumps(fig_dict))
845
1965
 
@@ -870,25 +1990,1548 @@ def convert_plotly_dict_to_matplotlib(fig_dict):
870
1990
 
871
1991
  return fig
872
1992
 
1993
+ def apply_trace_styles_collection_to_plotly_dict(fig_dict, trace_styles_collection="", trace_style_to_apply=""):
1994
+ """
1995
+ Iterates over all traces in the `data` list of a Plotly figure dictionary
1996
+ and applies styles to each one.
1997
+
1998
+ Args:
1999
+ fig_dict (dict): A dictionary containing a `data` field with Plotly traces.
2000
+ trace_style_to_apply (str): Optional style preset to apply. Default is "default".
2001
+
2002
+ Returns:
2003
+ dict: Updated Plotly figure dictionary with defaults applied to each trace.
2004
+
2005
+ """
2006
+ if type(trace_styles_collection) == type("string"):
2007
+ trace_styles_collection_name = trace_styles_collection
2008
+ else:
2009
+ trace_styles_collection_name = trace_styles_collection["name"]
2010
+
2011
+ if "data" in fig_dict and isinstance(fig_dict["data"], list):
2012
+ fig_dict["data"] = [apply_trace_style_to_single_data_series(data_series=trace,trace_styles_collection=trace_styles_collection, trace_style_to_apply=trace_style_to_apply) for trace in fig_dict["data"]]
2013
+
2014
+ if "plot_style" not in fig_dict:
2015
+ fig_dict["plot_style"] = {}
2016
+ fig_dict["plot_style"]["trace_styles_collection"] = trace_styles_collection_name
2017
+ return fig_dict
2018
+
2019
+
2020
+ # The logic in JSONGrapher is to apply the style information but to treat "type" differently
2021
+ # Accordingly, we use 'trace_styles_collection' as a field in JSONGrapher for each data_series.
2022
+ # compared to how plotly treats 'type' for a data series. So later in the process, when actually plotting with plotly, the 'type' field will get overwritten.
2023
+ def apply_trace_style_to_single_data_series(data_series, trace_styles_collection="", trace_style_to_apply=""):
2024
+ """
2025
+ Applies predefined styles to a single Plotly data series while preserving relevant fields.
2026
+
2027
+ Args:
2028
+ data_series (dict): A dictionary representing a single Plotly data series.
2029
+ trace_style_to_apply (str or dict): Name of the style preset or a custom style dictionary. Default is "default".
2030
+
2031
+ Returns:
2032
+ dict: Updated data series with style applied.
2033
+ """
2034
+ if not isinstance(data_series, dict):
2035
+ return data_series # Return unchanged if the data series is invalid.
2036
+ if isinstance(trace_style_to_apply, dict):#in this case, we'll set the data_series trace_style to match.
2037
+ data_series["trace_style"] = trace_style_to_apply
2038
+ if str(trace_style_to_apply) != str(''): #if we received a non-empty string (or dictionary), we'll put it into the data_series object.
2039
+ data_series["trace_style"] = trace_style_to_apply
2040
+ elif str(trace_style_to_apply) == str(''): #If we received an empty string for the trace_style_to apply (default JSONGrapher flow), we'll check in the data_series object.
2041
+ #first see if there is a trace_style in the data_series.
2042
+ trace_style = data_series.get("trace_style", "")
2043
+ #If it's "none", then we'll return the data series unchanged.
2044
+ #We consider it that for every trace_styles_collection, that "none" means to make no change.
2045
+ if str(trace_style).lower() == "none":
2046
+ return data_series
2047
+ #if we find a dictionary, we will set the trace_style_to_apply to that, to ensure we skip other string checks to use the dictionary.
2048
+ if isinstance(trace_style,dict):
2049
+ trace_style_to_apply = trace_style
2050
+ #if the trace_style_to_apply is a string and we have not received a trace_styles collection, then we have nothing
2051
+ #to use, so will return the data_series unchanged.
2052
+ if type(trace_style_to_apply) == type("string"):
2053
+ if (trace_styles_collection == '') or (str(trace_styles_collection).lower() == 'none'):
2054
+ return data_series
2055
+ #if the trace_style_to_apply is "none", we will return the series unchanged.
2056
+ if str(trace_style_to_apply).lower() == str("none"):
2057
+ return data_series
2058
+ #Add a couple of hardcoded cases.
2059
+ if type(trace_style_to_apply) == type("string"):
2060
+ if (trace_style_to_apply.lower() == "nature") or (trace_style_to_apply.lower() == "science"):
2061
+ trace_style_to_apply = "default"
2062
+
2063
+ #at this stage, should remove any existing formatting before applying new formatting.
2064
+ data_series = remove_trace_style_from_single_data_series(data_series)
2065
+
2066
+ # -------------------------------
2067
+ # Predefined trace_styles_collection
2068
+ # -------------------------------
2069
+ # Each trace_styles_collection is defined as a dictionary containing multiple trace_styles.
2070
+ # Users can select a style preset trace_styles_collection (e.g., "default", "minimalist", "bold"),
2071
+ # and this function will apply appropriate settings for the given trace_style.
2072
+ #
2073
+ # Examples of Supported trace_styles:
2074
+ # - "scatter_spline" (default when type is not specified)
2075
+ # - "scatter"
2076
+ # - "spline"
2077
+ # - "bar"
2078
+ # - "heatmap"
2079
+ #
2080
+ # Note: Colors are intentionally omitted to allow users to define their own.
2081
+ # However, predefined colorscales are applied for heatmaps.
2082
+
2083
+
2084
+ styles_available = JSONGrapher.styles.trace_styles_collection_library.styles_library
2085
+
2086
+ # Get the appropriate style dictionary
2087
+ if isinstance(trace_styles_collection, dict):
2088
+ styles_collection_dict = trace_styles_collection # Use custom style directly
2089
+ else:
2090
+ styles_collection_dict = styles_available.get(trace_styles_collection, {})
2091
+ if not styles_collection_dict: # Check if it's an empty dictionary
2092
+ print(f"Warning: trace_styles_collection named '{trace_styles_collection}' not found. Using 'default' trace_styles_collection instead.")
2093
+ styles_collection_dict = styles_available.get("default", {})
2094
+ # Determine the trace_style, defaulting to the first item in a given style if none is provided.
2095
+
2096
+ # Retrieve the specific style for the plot type
2097
+ if trace_style_to_apply == "":# if a trace_style_to_apply has not been supplied, we will get it from the dataseries.
2098
+ trace_style = data_series.get("trace_style", "")
2099
+ else:
2100
+ trace_style = trace_style_to_apply
2101
+
2102
+ if trace_style == "": #if the trace style is an empty string....
2103
+ trace_style = list(styles_collection_dict.keys())[0] #take the first trace_style name in the style_dict. In python 3.7 and later dictionary keys preserve ordering.
2104
+
2105
+ #If a person adds "__colorscale" to the end of a trace_style, like "scatter_spline__rainbow" we will extract the colorscale and apply it to the plot.
2106
+ #This should be done before extracting the trace_style from the styles_available, because we need to split the string to break out the trace_style
2107
+ colorscale = "" #initializing variable.
2108
+ if isinstance(trace_style, str): #check if it is a string type.
2109
+ if "__" in trace_style:
2110
+ trace_style, colorscale = trace_style.split("__")
2111
+
2112
+ colorscale_structure = "" #initialize this variable for use later. It tells us which fields to put the colorscale related values in. This should be done before regular trace_style fields are applied.
2113
+ #3D and bubble plots will have a colorscale by default.
2114
+ if trace_style == "bubble": #for bubble trace styles, we need to move the z values into the marker size. We also need to do this before the styles_dict collection is accessed, since then the trace_style becomes a dictionary.
2115
+ data_series = prepare_bubble_sizes(data_series)
2116
+ colorscale_structure = "bubble"
2117
+ elif trace_style == "mesh3d": #for bubble trace styles, we need to move the z values into the marker size. We also need to do this before the styles_dict collection is accessed, since then the trace_style becomes a dictionary.
2118
+ colorscale_structure = "mesh3d"
2119
+ elif trace_style == "scatter3d": #for bubble trace styles, we need to move the z values into the marker size. We also need to do this before the styles_dict collection is accessed, since then the trace_style becomes a dictionary.
2120
+ colorscale_structure = "scatter3d"
2121
+
2122
+ if trace_style in styles_collection_dict:
2123
+ trace_style = styles_collection_dict.get(trace_style)
2124
+ elif trace_style not in styles_collection_dict: # Check if it's an empty dictionary
2125
+ print(f"Warning: trace_style named '{trace_style}' not found in trace_styles_collection '{trace_styles_collection}'. Using the first trace_style in in trace_styles_collection '{trace_styles_collection}'.")
2126
+ trace_style = list(styles_collection_dict.keys())[0] #take the first trace_style name in the style_dict. In python 3.7 and later dictionary keys preserve ordering.
2127
+ trace_style = styles_collection_dict.get(trace_style)
2128
+
2129
+ # Apply type and other predefined settings
2130
+ data_series["type"] = trace_style.get("type")
2131
+ # Apply other attributes while preserving existing values
2132
+ for key, value in trace_style.items():
2133
+ if key not in ["type"]:
2134
+ if isinstance(value, dict): # Ensure value is a dictionary
2135
+ data_series.setdefault(key, {}).update(value)
2136
+ else:
2137
+ data_series[key] = value # Direct assignment for non-dictionary values
2138
+
2139
+ #Before applying colorscales, we check if we have recieved a colorscale from the user. If so, we'll need to parse the trace_type to assign the colorscale structure.
2140
+ if colorscale != "":
2141
+ #If it is a scatter plot with markers, then the colorscale_structure will be marker. Need to check for this before the lines alone case.
2142
+ if ("markers" in data_series["mode"]) or ("markers+lines" in data_series["mode"]) or ("lines+markers" in data_series["mode"]):
2143
+ colorscale_structure = "marker"
2144
+ elif ("lines" in data_series["mode"]):
2145
+ colorscale_structure = "line"
2146
+ elif ("bar" in data_series["type"]):
2147
+ colorscale_structure = "marker"
2148
+
2149
+ #Block of code to clean color values for 3D plots and 2D plots. It can't be just from the style dictionary because we need to point to data.
2150
+ def clean_color_values(list_of_values, variable_string_for_warning):
2151
+ if None in list_of_values:
2152
+ print("Warning: A colorscale based on" + variable_string_for_warning + "was requested. None values were found. They are being replaced with 0 values. It is recommended to provide data without None values.")
2153
+ color_values = [0 if value is None else value for value in list_of_values]
2154
+ else:
2155
+ color_values = list_of_values
2156
+ return color_values
2157
+
2158
+ if colorscale_structure == "bubble":
2159
+ #data_series["marker"]["colorscale"] = "viridis_r" #https://plotly.com/python/builtin-colorscales/
2160
+ data_series["marker"]["showscale"] = True
2161
+ if "z" in data_series:
2162
+ color_values = clean_color_values(list_of_values= data_series["z"], variable_string_for_warning="z")
2163
+ data_series["marker"]["color"] = color_values
2164
+ elif "z_points" in data_series:
2165
+ color_values = clean_color_values(list_of_values= data_series["z_points"], variable_string_for_warning="z_points")
2166
+ data_series["marker"]["color"] = color_values
2167
+ elif colorscale_structure == "scatter3d":
2168
+ #data_series["marker"]["colorscale"] = "viridis_r" #https://plotly.com/python/builtin-colorscales/
2169
+ data_series["marker"]["showscale"] = True
2170
+ if "z" in data_series:
2171
+ color_values = clean_color_values(list_of_values= data_series["z"], variable_string_for_warning="z")
2172
+ data_series["marker"]["color"] = color_values
2173
+ elif "z_points" in data_series:
2174
+ color_values = clean_color_values(list_of_values= data_series["z_points"], variable_string_for_warning="z_points")
2175
+ data_series["marker"]["color"] = color_values
2176
+ elif colorscale_structure == "mesh3d":
2177
+ #data_series["colorscale"] = "viridis_r" #https://plotly.com/python/builtin-colorscales/
2178
+ data_series["showscale"] = True
2179
+ if "z" in data_series:
2180
+ color_values = clean_color_values(list_of_values= data_series["z"], variable_string_for_warning="z")
2181
+ data_series["intensity"] = color_values
2182
+ elif "z_points" in data_series:
2183
+ color_values = clean_color_values(list_of_values= data_series["z_points"], variable_string_for_warning="z_points")
2184
+ data_series["intensity"] = color_values
2185
+ elif colorscale_structure == "marker":
2186
+ data_series["marker"]["colorscale"] = colorscale
2187
+ data_series["marker"]["showscale"] = True
2188
+ color_values = clean_color_values(list_of_values=data_series["y"], variable_string_for_warning="y")
2189
+ data_series["marker"]["color"] = color_values
2190
+ elif colorscale_structure == "line":
2191
+ data_series["line"]["colorscale"] = colorscale
2192
+ data_series["line"]["showscale"] = True
2193
+ color_values = clean_color_values(list_of_values=data_series["y"], variable_string_for_warning="y")
2194
+ data_series["line"]["color"] = color_values
2195
+
2196
+
2197
+ return data_series
2198
+
2199
+ def prepare_bubble_sizes(data_series):
2200
+ #To make a bubble plot with plotly, we are actually using a 2D plot
2201
+ #and are using the z values in a data_series to create the sizes of each point.
2202
+ #We also will scale them to some maximum bubble size that is specifed.
2203
+ if "marker" not in data_series:
2204
+ data_series["marker"] = {}
2205
+ if "z_points" in data_series:
2206
+ data_series["marker"]["size"] = data_series["z_points"]
2207
+ elif "z" in data_series:
2208
+ data_series["marker"]["size"] = data_series["z"]
2209
+
2210
+ #now need to normalize to the max value in the list.
2211
+ def normalize_to_max(starting_list):
2212
+ import numpy as np
2213
+ arr = np.array(starting_list) # Convert list to NumPy array for efficient operations
2214
+ max_value = np.max(arr) # Find the maximum value in the list
2215
+ if max_value == 0:
2216
+ normalized_values = np.zeros_like(arr) # If max_value is zero, return zeros
2217
+ else:
2218
+ normalized_values = arr / max_value # Otherwise, divide each element by max_value
2219
+ return normalized_values # Return the normalized values
2220
+ try:
2221
+ normalized_sizes = normalize_to_max(data_series["marker"]["size"])
2222
+ except KeyError as exc:
2223
+ raise KeyError("Error: During bubble plot bubble size normalization, there was an error. This usually means the z variable has not been populated. For example, by equation evaluation set to false or simulation evaluation set to false.")
2224
+
2225
+
2226
+ #Now biggest bubble is 1 (or 0) so multiply to enlarge to scale.
2227
+ if "max_bubble_size" in data_series:
2228
+ max_bubble_size = data_series["max_bubble_size"]
2229
+ else:
2230
+ max_bubble_size = 10
2231
+ scaled_sizes = normalized_sizes*max_bubble_size
2232
+ data_series["marker"]["size"] = scaled_sizes.tolist() #from numpy array back to list.
2233
+
2234
+ #Now let's also set the text that appears during hovering to include the original data.
2235
+ if "z_points" in data_series:
2236
+ data_series["text"] = data_series["z_points"]
2237
+ elif "z" in data_series:
2238
+ data_series["text"] = data_series["z"]
2239
+
2240
+ return data_series
2241
+
2242
+
2243
+ #TODO: This logic should be changed in the future. There should be a separated function to remove formatting
2244
+ # versus just removing the current setting of "trace_styles_collection"
2245
+ # So the main class function will also be broken into two and/or need to take an optional argument in
2246
+ def remove_trace_styles_collection_from_plotly_dict(fig_dict):
2247
+ """
2248
+ Remove applied data series styles from a Plotly figure dictionary.
2249
+
2250
+ :param fig_dict: dict, Plotly style fig_dict
2251
+ :return: dict, Updated Plotly style fig_dict with default formatting.
2252
+ """
2253
+ #will remove formatting from the individual data_series, but will not remove formatting from any that have trace_style of "none".
2254
+ if isinstance(fig_dict, dict) and "data" in fig_dict and isinstance(fig_dict["data"], list):
2255
+ updated_data = [] # Initialize an empty list to store processed traces
2256
+ for trace in fig_dict["data"]:
2257
+ # Check if the trace has a "trace_style" field and if its value is "none" (case-insensitive)
2258
+ if trace.get("trace_style", "").lower() == "none":
2259
+ updated_data.append(trace) # Skip modification and keep the trace unchanged
2260
+ else:
2261
+ # Apply the function to modify the trace before adding it to the list
2262
+ updated_data.append(remove_trace_style_from_single_data_series(trace))
2263
+ # Update the "data" field with the processed traces
2264
+ fig_dict["data"] = updated_data
2265
+
2266
+
2267
+ #If being told to remove the style, should also pop it from fig_dict.
2268
+ if "plot_style" in fig_dict:
2269
+ if "trace_styles_collection" in fig_dict["plot_style"]:
2270
+ fig_dict["plot_style"].pop("trace_styles_collection")
2271
+ return fig_dict
2272
+
2273
+ def remove_trace_style_from_single_data_series(data_series):
2274
+ """
2275
+ Remove only formatting fields from a single Plotly data series while preserving all other fields.
2276
+
2277
+ Note: Since fig_dict data objects may contain custom fields (e.g., "equation", "metadata"),
2278
+ this function explicitly removes predefined **formatting** attributes while leaving all other data intact.
2279
+
2280
+ :param data_series: dict, A dictionary representing a single Plotly data series.
2281
+ :return: dict, Updated data series with formatting fields removed but key data retained.
2282
+ """
2283
+
2284
+ if not isinstance(data_series, dict):
2285
+ return data_series # Return unchanged if input is invalid.
2286
+
2287
+ # **Define formatting fields to remove**
2288
+ formatting_fields = {
2289
+ "mode", "line", "marker", "colorscale", "opacity", "fill", "fillcolor", "color", "intensity", "showscale",
2290
+ "legendgroup", "showlegend", "textposition", "textfont", "visible", "connectgaps", "cliponaxis", "showgrid"
2291
+ }
2292
+
2293
+ # **Create a new data series excluding only formatting fields**
2294
+ cleaned_data_series = {key: value for key, value in data_series.items() if key not in formatting_fields}
2295
+ #make the new data series into a JSONGrapherDataSeries object.
2296
+ new_data_series_object = JSONGrapherDataSeries()
2297
+ new_data_series_object.update_while_preserving_old_terms(cleaned_data_series)
2298
+ return new_data_series_object
2299
+
2300
+ def extract_trace_style_by_index(fig_dict, data_series_index, new_trace_style_name='', extract_colors=False):
2301
+ data_series_dict = fig_dict["data"][data_series_index]
2302
+ extracted_trace_style = extract_trace_style_from_data_series_dict(data_series_dict=data_series_dict, new_trace_style_name=new_trace_style_name, extract_colors=extract_colors)
2303
+ return extracted_trace_style
2304
+
2305
+ def extract_trace_style_from_data_series_dict(data_series_dict, new_trace_style_name='', additional_attributes_to_extract=None, extract_colors=False):
2306
+ """
2307
+ Extract formatting attributes from a given Plotly data series.
2308
+
2309
+ The function scans the provided `data_series` dictionary and returns a new dictionary
2310
+ containing only the predefined formatting fields.
2311
+
2312
+ Examples of formatting attributes extracted:
2313
+ - "type"
2314
+ - "mode"
2315
+ - "line"
2316
+ - "marker"
2317
+ - "colorscale"
2318
+ - "opacity"
2319
+ - "fill"
2320
+ - "legendgroup"
2321
+ - "showlegend"
2322
+ - "textposition"
2323
+ - "textfont"
2324
+
2325
+ :param data_series_dict: dict, A dictionary representing a single Plotly data series.
2326
+ :param trace_style: string, the key name for what user wants to call the trace_style in the style, after extraction.
2327
+ :return: dict, A dictionary containing only the formatting attributes.
2328
+ """
2329
+ if additional_attributes_to_extract is None: #in python, it's not good to make an empty list a default argument.
2330
+ additional_attributes_to_extract = []
2331
+
2332
+ if new_trace_style_name=='':
2333
+ new_trace_style_name = data_series_dict.get("trace_style", "") #keep blank if not present.
2334
+ if new_trace_style_name=='':
2335
+ new_trace_style_name = "custom"
2336
+
2337
+ if not isinstance(data_series_dict, dict):
2338
+ return {} # Return an empty dictionary if input is invalid.
2339
+
2340
+ # Define known formatting attributes. This is a set (not a dictionary, not a list)
2341
+ formatting_fields = {
2342
+ "type", "mode", "line", "marker", "colorscale", "opacity", "fill", "fillcolor", "color", "intensity", "showscale",
2343
+ "legendgroup", "showlegend", "textposition", "textfont", "visible", "connectgaps", "cliponaxis", "showgrid"
2344
+ }
2345
+
2346
+ formatting_fields.update(additional_attributes_to_extract)
2347
+ # Extract only formatting-related attributes
2348
+ trace_style_dict = {key: value for key, value in data_series_dict.items() if key in formatting_fields}
2349
+
2350
+ #Pop out colors if we are not extracting them.
2351
+ if extract_colors == False:
2352
+ if "marker" in trace_style_dict:
2353
+ if "color" in trace_style_dict["marker"]:
2354
+ trace_style_dict["marker"].pop("color")
2355
+ if "line" in trace_style_dict:
2356
+ if "color" in trace_style_dict["line"]:
2357
+ trace_style_dict["line"].pop("color")
2358
+ if "colorscale" in trace_style_dict: # Handles top-level colorscale for heatmaps, choropleths
2359
+ trace_style_dict.pop("colorscale")
2360
+ if "fillcolor" in trace_style_dict: # Handles fill colors
2361
+ trace_style_dict.pop("fillcolor")
2362
+ if "textfont" in trace_style_dict:
2363
+ if "color" in trace_style_dict["textfont"]: # Handles text color
2364
+ trace_style_dict["textfont"].pop("color")
2365
+ if "legendgrouptitle" in trace_style_dict and isinstance(trace_style_dict["legendgrouptitle"], dict):
2366
+ if "font" in trace_style_dict["legendgrouptitle"] and isinstance(trace_style_dict["legendgrouptitle"]["font"], dict):
2367
+ if "color" in trace_style_dict["legendgrouptitle"]["font"]:
2368
+ trace_style_dict["legendgrouptitle"]["font"].pop("color")
2369
+ extracted_trace_style = {new_trace_style_name : trace_style_dict} #this is a trace_style dict.
2370
+ return extracted_trace_style #this is a trace_style dict.
2371
+
2372
+ #export a single trace_style dictionary to .json.
2373
+ def write_trace_style_to_file(trace_style_dict, trace_style_name, filename):
2374
+ # Ensure the filename ends with .json
2375
+ if not filename.lower().endswith(".json"):
2376
+ filename += ".json"
2377
+
2378
+ json_structure = {
2379
+ "trace_style": {
2380
+ "name": trace_style_name,
2381
+ trace_style_name: {
2382
+ trace_style_dict
2383
+ }
2384
+ }
2385
+ }
2386
+
2387
+ with open(filename, "w", encoding="utf-8") as file: # Specify UTF-8 encoding for compatibility
2388
+ json.dump(json_structure, file, indent=4)
2389
+
2390
+
2391
+ #export an entire trace_styles_collection to .json. The trace_styles_collection is dict.
2392
+ def write_trace_styles_collection_to_file(trace_styles_collection, trace_styles_collection_name, filename):
2393
+ if "trace_styles_collection" in trace_styles_collection: #We may receive a traces_style collection in a container. If so, we pull the traces_style_collection out.
2394
+ trace_styles_collection = trace_styles_collection[trace_styles_collection["name"]]
2395
+ # Ensure the filename ends with .json
2396
+ if not filename.lower().endswith(".json"):
2397
+ filename += ".json"
2398
+
2399
+ json_structure = {
2400
+ "trace_styles_collection": {
2401
+ "name": trace_styles_collection_name,
2402
+ trace_styles_collection_name: trace_styles_collection
2403
+ }
2404
+ }
2405
+
2406
+ with open(filename, "w", encoding="utf-8") as file: # Specify UTF-8 encoding for compatibility
2407
+ json.dump(json_structure, file, indent=4)
2408
+
2409
+
2410
+
2411
+ #export an entire trace_styles_collection from .json. THe trace_styles_collection is dict.
2412
+ def import_trace_styles_collection(filename):
2413
+ # Ensure the filename ends with .json
2414
+ if not filename.lower().endswith(".json"):
2415
+ filename += ".json"
2416
+
2417
+ with open(filename, "r", encoding="utf-8") as file: # Specify UTF-8 encoding for compatibility
2418
+ data = json.load(file)
2419
+
2420
+ # Validate JSON structure
2421
+ containing_dict = data.get("trace_styles_collection")
2422
+ if not isinstance(containing_dict, dict):
2423
+ raise ValueError("Error: Missing or malformed 'trace_styles_collection'.")
2424
+
2425
+ collection_name = containing_dict.get("name")
2426
+ if not isinstance(collection_name, str) or collection_name not in containing_dict:
2427
+ raise ValueError(f"Error: Expected dictionary '{collection_name}' is missing or malformed.")
2428
+ trace_styles_collection = containing_dict[collection_name]
2429
+ # Return only the dictionary corresponding to the collection name
2430
+ return trace_styles_collection
2431
+
2432
+
2433
+ #export an entire trace_styles_collection from .json. THe trace_styles_collection is dict.
2434
+ def import_trace_style(filename):
2435
+ # Ensure the filename ends with .json
2436
+ if not filename.lower().endswith(".json"):
2437
+ filename += ".json"
2438
+
2439
+ with open(filename, "r", encoding="utf-8") as file: # Specify UTF-8 encoding for compatibility
2440
+ data = json.load(file)
2441
+
2442
+ # Validate JSON structure
2443
+ containing_dict = data.get("trace_style")
2444
+ if not isinstance(containing_dict, dict):
2445
+ raise ValueError("Error: Missing or malformed 'trace_style'.")
2446
+
2447
+ style_name = containing_dict.get("name")
2448
+ if not isinstance(style_name, str) or style_name not in containing_dict:
2449
+ raise ValueError(f"Error: Expected dictionary '{style_name}' is missing or malformed.")
2450
+ trace_style_dict = containing_dict[style_name]
2451
+
2452
+ # Return only the dictionary corresponding to the trace style name
2453
+ return trace_style_dict
2454
+
2455
+
2456
+ def apply_layout_style_to_plotly_dict(fig_dict, layout_style_to_apply="default"):
2457
+ """
2458
+ Apply a predefined style to a Plotly fig_dict while preserving non-cosmetic fields.
2459
+
2460
+ :param fig_dict: dict, Plotly style fig_dict
2461
+ :param layout_style_to_apply: str, Name of the style or journal, or a style dictionary to apply.
2462
+ :return: dict, Updated Plotly style fig_dict.
2463
+ """
2464
+ if type(layout_style_to_apply) == type("string"):
2465
+ layout_style_to_apply_name = layout_style_to_apply
2466
+ else:
2467
+ layout_style_to_apply_name = list(layout_style_to_apply.keys())[0]#if it is a dictionary, it will have one key which is its name.
2468
+ if (layout_style_to_apply == '') or (str(layout_style_to_apply).lower() == 'none'):
2469
+ return fig_dict
2470
+
2471
+ #Hardcoding some cases as ones that will call the default layout, for convenience.
2472
+ if (layout_style_to_apply.lower() == "minimalist") or (layout_style_to_apply.lower() == "bold"):
2473
+ layout_style_to_apply = "default"
2474
+
2475
+
2476
+ styles_available = JSONGrapher.styles.layout_styles_library.styles_library
2477
+
2478
+
2479
+ # Use or get the style specified, or use default if not found
2480
+ if isinstance(layout_style_to_apply, dict):
2481
+ style_dict = layout_style_to_apply
2482
+ else:
2483
+ style_dict = styles_available.get(layout_style_to_apply, {})
2484
+ if not style_dict: # Check if it's an empty dictionary
2485
+ print(f"Style named '{layout_style_to_apply}' not found with explicit layout dictionary. Using 'default' layout style.")
2486
+ style_dict = styles_available.get("default", {})
2487
+
2488
+ # Ensure layout exists in the figure
2489
+ fig_dict.setdefault("layout", {})
2490
+
2491
+ # **Extract non-cosmetic fields**
2492
+ non_cosmetic_fields = {
2493
+ "title.text": fig_dict.get("layout", {}).get("title", {}).get("text", None),
2494
+ "xaxis.title.text": fig_dict.get("layout", {}).get("xaxis", {}).get("title", {}).get("text", None),
2495
+ "yaxis.title.text": fig_dict.get("layout", {}).get("yaxis", {}).get("title", {}).get("text", None),
2496
+ "zaxis.title.text": fig_dict.get("layout", {}).get("zaxis", {}).get("title", {}).get("text", None),
2497
+ "legend.title.text": fig_dict.get("layout", {}).get("legend", {}).get("title", {}).get("text", None),
2498
+ "annotations.text": [
2499
+ annotation.get("text", None) for annotation in fig_dict.get("layout", {}).get("annotations", [])
2500
+ ],
2501
+ "updatemenus.buttons.label": [
2502
+ button.get("label", None) for menu in fig_dict.get("layout", {}).get("updatemenus", [])
2503
+ for button in menu.get("buttons", [])
2504
+ ],
2505
+ "coloraxis.colorbar.title.text": fig_dict.get("layout", {}).get("coloraxis", {}).get("colorbar", {}).get("title", {}).get("text", None),
2506
+ }
2507
+
2508
+ # **Apply style dictionary to create a fresh layout object**
2509
+ new_layout = style_dict.get("layout", {}).copy()
2510
+
2511
+ # **Restore non-cosmetic fields**
2512
+ if non_cosmetic_fields["title.text"]:
2513
+ new_layout.setdefault("title", {})["text"] = non_cosmetic_fields["title.text"]
2514
+
2515
+ if non_cosmetic_fields["xaxis.title.text"]:
2516
+ new_layout.setdefault("xaxis", {}).setdefault("title", {})["text"] = non_cosmetic_fields["xaxis.title.text"]
2517
+
2518
+ if non_cosmetic_fields["yaxis.title.text"]:
2519
+ new_layout.setdefault("yaxis", {}).setdefault("title", {})["text"] = non_cosmetic_fields["yaxis.title.text"]
2520
+
2521
+ if non_cosmetic_fields["zaxis.title.text"]:
2522
+ new_layout.setdefault("zaxis", {}).setdefault("title", {})["text"] = non_cosmetic_fields["zaxis.title.text"]
2523
+
2524
+ if non_cosmetic_fields["legend.title.text"]:
2525
+ new_layout.setdefault("legend", {}).setdefault("title", {})["text"] = non_cosmetic_fields["legend.title.text"]
2526
+
2527
+ if non_cosmetic_fields["annotations.text"]:
2528
+ new_layout["annotations"] = [{"text": text} for text in non_cosmetic_fields["annotations.text"]]
2529
+
2530
+ if non_cosmetic_fields["updatemenus.buttons.label"]:
2531
+ new_layout["updatemenus"] = [{"buttons": [{"label": label} for label in non_cosmetic_fields["updatemenus.buttons.label"]]}]
2532
+
2533
+ if non_cosmetic_fields["coloraxis.colorbar.title.text"]:
2534
+ new_layout.setdefault("coloraxis", {}).setdefault("colorbar", {})["title"] = {"text": non_cosmetic_fields["coloraxis.colorbar.title.text"]}
2535
+
2536
+ # **Assign the new layout back into the figure dictionary**
2537
+ fig_dict["layout"] = new_layout
2538
+ #Now update the fig_dict to signify the new layout_style used.
2539
+ if "plot_style" not in fig_dict:
2540
+ fig_dict["plot_style"] = {}
2541
+ fig_dict["plot_style"]["layout_style"] = layout_style_to_apply_name
2542
+ return fig_dict
2543
+
2544
+ #TODO: This logic should be changed in the future. There should be a separated function to remove formatting
2545
+ # versus just removing the current setting of "layout_style"
2546
+ # So the main class function will also be broken into two and/or need to take an optional argument in
2547
+ def remove_layout_style_from_plotly_dict(fig_dict):
2548
+ """
2549
+ Remove applied layout styles from a Plotly figure dictionary while preserving essential content.
2550
+
2551
+ :param fig_dict: dict, Plotly style fig_dict
2552
+ :return: dict, Updated Plotly style fig_dict with styles removed but key data intact.
2553
+ """
2554
+ if "layout" in fig_dict:
2555
+ style_keys = ["font", "paper_bgcolor", "plot_bgcolor", "gridcolor", "gridwidth", "tickfont", "linewidth"]
2556
+
2557
+ # **Store non-cosmetic fields if present, otherwise assign None**
2558
+ non_cosmetic_fields = {
2559
+ "title.text": fig_dict.get("layout", {}).get("title", {}).get("text", None),
2560
+ "xaxis.title.text": fig_dict.get("layout", {}).get("xaxis", {}).get("title", {}).get("text", None),
2561
+ "yaxis.title.text": fig_dict.get("layout", {}).get("yaxis", {}).get("title", {}).get("text", None),
2562
+ "zaxis.title.text": fig_dict.get("layout", {}).get("zaxis", {}).get("title", {}).get("text", None),
2563
+ "legend.title.text": fig_dict.get("layout", {}).get("legend", {}).get("title", {}).get("text", None),
2564
+ "annotations.text": [annotation.get("text", None) for annotation in fig_dict.get("layout", {}).get("annotations", [])],
2565
+ "updatemenus.buttons.label": [
2566
+ button.get("label", None) for menu in fig_dict.get("layout", {}).get("updatemenus", [])
2567
+ for button in menu.get("buttons", [])
2568
+ ],
2569
+ "coloraxis.colorbar.title.text": fig_dict.get("layout", {}).get("coloraxis", {}).get("colorbar", {}).get("title", {}).get("text", None),
2570
+ }
2571
+
2572
+ # Preserve title text while removing font styling
2573
+ if "title" in fig_dict["layout"] and isinstance(fig_dict["layout"]["title"], dict):
2574
+ fig_dict["layout"]["title"] = {"text": non_cosmetic_fields["title.text"]} if non_cosmetic_fields["title.text"] is not None else {}
2575
+
2576
+ # Preserve axis titles while stripping font styles
2577
+ for axis in ["xaxis", "yaxis", "zaxis"]:
2578
+ if axis in fig_dict["layout"] and isinstance(fig_dict["layout"][axis], dict):
2579
+ if "title" in fig_dict["layout"][axis] and isinstance(fig_dict["layout"][axis]["title"], dict):
2580
+ fig_dict["layout"][axis]["title"] = {"text": non_cosmetic_fields[f"{axis}.title.text"]} if non_cosmetic_fields[f"{axis}.title.text"] is not None else {}
2581
+
2582
+ # Remove style-related attributes but keep axis configurations
2583
+ for key in style_keys:
2584
+ fig_dict["layout"][axis].pop(key, None)
2585
+
2586
+ # Preserve legend title text while stripping font styling
2587
+ if "legend" in fig_dict["layout"] and isinstance(fig_dict["layout"]["legend"], dict):
2588
+ if "title" in fig_dict["layout"]["legend"] and isinstance(fig_dict["layout"]["legend"]["title"], dict):
2589
+ fig_dict["layout"]["legend"]["title"] = {"text": non_cosmetic_fields["legend.title.text"]} if non_cosmetic_fields["legend.title.text"] is not None else {}
2590
+ fig_dict["layout"]["legend"].pop("font", None)
2591
+
2592
+ # Preserve annotations text while stripping style attributes
2593
+ if "annotations" in fig_dict["layout"]:
2594
+ fig_dict["layout"]["annotations"] = [
2595
+ {"text": text} if text is not None else {} for text in non_cosmetic_fields["annotations.text"]
2596
+ ]
2597
+
2598
+ # Preserve update menu labels while stripping styles
2599
+ if "updatemenus" in fig_dict["layout"]:
2600
+ for menu in fig_dict["layout"]["updatemenus"]:
2601
+ for i, button in enumerate(menu.get("buttons", [])):
2602
+ button.clear()
2603
+ if non_cosmetic_fields["updatemenus.buttons.label"][i] is not None:
2604
+ button["label"] = non_cosmetic_fields["updatemenus.buttons.label"][i]
2605
+
2606
+ # Preserve color bar title while stripping styles
2607
+ if "coloraxis" in fig_dict["layout"] and "colorbar" in fig_dict["layout"]["coloraxis"]:
2608
+ fig_dict["layout"]["coloraxis"]["colorbar"]["title"] = {"text": non_cosmetic_fields["coloraxis.colorbar.title.text"]} if non_cosmetic_fields["coloraxis.colorbar.title.text"] is not None else {}
2609
+
2610
+ # Remove general style settings without clearing layout structure
2611
+ for key in style_keys:
2612
+ fig_dict["layout"].pop(key, None)
2613
+
2614
+ #If being told to remove the style, should also pop it from fig_dict.
2615
+ if "plot_style" in fig_dict:
2616
+ if "layout_style" in fig_dict["plot_style"]:
2617
+ fig_dict["plot_style"].pop("layout_style")
2618
+ return fig_dict
2619
+
2620
+ def extract_layout_style_from_plotly_dict(fig_dict):
2621
+ """
2622
+ Extract a layout style dictionary from a given Plotly JSON object, including background color, grids, and other appearance attributes.
2623
+
2624
+ :param fig_dict: dict, Plotly JSON object.
2625
+ :return: dict, Extracted style settings.
2626
+ """
2627
+
2628
+
2629
+ # **Extraction Phase** - Collect cosmetic fields if they exist
2630
+ layout = fig_dict.get("layout", {})
2631
+
2632
+ # Note: Each assignment below will return None if the corresponding field is missing
2633
+ title_font = layout.get("title", {}).get("font")
2634
+ title_x = layout.get("title", {}).get("x")
2635
+ title_y = layout.get("title", {}).get("y")
2636
+
2637
+ global_font = layout.get("font")
2638
+ paper_bgcolor = layout.get("paper_bgcolor")
2639
+ plot_bgcolor = layout.get("plot_bgcolor")
2640
+ margin = layout.get("margin")
2641
+
2642
+ # Extract x-axis cosmetic fields
2643
+ xaxis_title_font = layout.get("xaxis", {}).get("title", {}).get("font")
2644
+ xaxis_tickfont = layout.get("xaxis", {}).get("tickfont")
2645
+ xaxis_gridcolor = layout.get("xaxis", {}).get("gridcolor")
2646
+ xaxis_gridwidth = layout.get("xaxis", {}).get("gridwidth")
2647
+ xaxis_zerolinecolor = layout.get("xaxis", {}).get("zerolinecolor")
2648
+ xaxis_zerolinewidth = layout.get("xaxis", {}).get("zerolinewidth")
2649
+ xaxis_tickangle = layout.get("xaxis", {}).get("tickangle")
2650
+
2651
+ # **Set flag for x-axis extraction**
2652
+ xaxis = any([
2653
+ xaxis_title_font, xaxis_tickfont, xaxis_gridcolor, xaxis_gridwidth,
2654
+ xaxis_zerolinecolor, xaxis_zerolinewidth, xaxis_tickangle
2655
+ ])
2656
+
2657
+ # Extract y-axis cosmetic fields
2658
+ yaxis_title_font = layout.get("yaxis", {}).get("title", {}).get("font")
2659
+ yaxis_tickfont = layout.get("yaxis", {}).get("tickfont")
2660
+ yaxis_gridcolor = layout.get("yaxis", {}).get("gridcolor")
2661
+ yaxis_gridwidth = layout.get("yaxis", {}).get("gridwidth")
2662
+ yaxis_zerolinecolor = layout.get("yaxis", {}).get("zerolinecolor")
2663
+ yaxis_zerolinewidth = layout.get("yaxis", {}).get("zerolinewidth")
2664
+ yaxis_tickangle = layout.get("yaxis", {}).get("tickangle")
2665
+
2666
+ # **Set flag for y-axis extraction**
2667
+ yaxis = any([
2668
+ yaxis_title_font, yaxis_tickfont, yaxis_gridcolor, yaxis_gridwidth,
2669
+ yaxis_zerolinecolor, yaxis_zerolinewidth, yaxis_tickangle
2670
+ ])
2671
+
2672
+ # Extract legend styling
2673
+ legend_font = layout.get("legend", {}).get("font")
2674
+ legend_x = layout.get("legend", {}).get("x")
2675
+ legend_y = layout.get("legend", {}).get("y")
2676
+
2677
+ # **Assignment Phase** - Reconstruct dictionary in a structured manner
2678
+ extracted_layout_style = {"layout": {}}
2679
+
2680
+ if title_font or title_x:
2681
+ extracted_layout_style["layout"]["title"] = {}
2682
+ if title_font:
2683
+ extracted_layout_style["layout"]["title"]["font"] = title_font
2684
+ if title_x:
2685
+ extracted_layout_style["layout"]["title"]["x"] = title_x
2686
+ if title_y:
2687
+ extracted_layout_style["layout"]["title"]["y"] = title_y
2688
+
2689
+ if global_font:
2690
+ extracted_layout_style["layout"]["font"] = global_font
2691
+
2692
+ if paper_bgcolor:
2693
+ extracted_layout_style["layout"]["paper_bgcolor"] = paper_bgcolor
2694
+ if plot_bgcolor:
2695
+ extracted_layout_style["layout"]["plot_bgcolor"] = plot_bgcolor
2696
+ if margin:
2697
+ extracted_layout_style["layout"]["margin"] = margin
2698
+
2699
+ if xaxis:
2700
+ extracted_layout_style["layout"]["xaxis"] = {}
2701
+ if xaxis_title_font:
2702
+ extracted_layout_style["layout"]["xaxis"]["title"] = {"font": xaxis_title_font}
2703
+ if xaxis_tickfont:
2704
+ extracted_layout_style["layout"]["xaxis"]["tickfont"] = xaxis_tickfont
2705
+ if xaxis_gridcolor:
2706
+ extracted_layout_style["layout"]["xaxis"]["gridcolor"] = xaxis_gridcolor
2707
+ if xaxis_gridwidth:
2708
+ extracted_layout_style["layout"]["xaxis"]["gridwidth"] = xaxis_gridwidth
2709
+ if xaxis_zerolinecolor:
2710
+ extracted_layout_style["layout"]["xaxis"]["zerolinecolor"] = xaxis_zerolinecolor
2711
+ if xaxis_zerolinewidth:
2712
+ extracted_layout_style["layout"]["xaxis"]["zerolinewidth"] = xaxis_zerolinewidth
2713
+ if xaxis_tickangle:
2714
+ extracted_layout_style["layout"]["xaxis"]["tickangle"] = xaxis_tickangle
2715
+
2716
+ if yaxis:
2717
+ extracted_layout_style["layout"]["yaxis"] = {}
2718
+ if yaxis_title_font:
2719
+ extracted_layout_style["layout"]["yaxis"]["title"] = {"font": yaxis_title_font}
2720
+ if yaxis_tickfont:
2721
+ extracted_layout_style["layout"]["yaxis"]["tickfont"] = yaxis_tickfont
2722
+ if yaxis_gridcolor:
2723
+ extracted_layout_style["layout"]["yaxis"]["gridcolor"] = yaxis_gridcolor
2724
+ if yaxis_gridwidth:
2725
+ extracted_layout_style["layout"]["yaxis"]["gridwidth"] = yaxis_gridwidth
2726
+ if yaxis_zerolinecolor:
2727
+ extracted_layout_style["layout"]["yaxis"]["zerolinecolor"] = yaxis_zerolinecolor
2728
+ if yaxis_zerolinewidth:
2729
+ extracted_layout_style["layout"]["yaxis"]["zerolinewidth"] = yaxis_zerolinewidth
2730
+ if yaxis_tickangle:
2731
+ extracted_layout_style["layout"]["yaxis"]["tickangle"] = yaxis_tickangle
2732
+
2733
+ if legend_font or legend_x or legend_y:
2734
+ extracted_layout_style["layout"]["legend"] = {}
2735
+ if legend_font:
2736
+ extracted_layout_style["layout"]["legend"]["font"] = legend_font
2737
+ if legend_x:
2738
+ extracted_layout_style["layout"]["legend"]["x"] = legend_x
2739
+ if legend_y:
2740
+ extracted_layout_style["layout"]["legend"]["y"] = legend_y
2741
+
2742
+ return extracted_layout_style
2743
+
2744
+ ## Start of Section of Code for Styles and Converting between plotly and matplotlib Fig objectss ##
2745
+
2746
+ ### Start of section of code with functions for extracting and updating x and y ranges of data series ###
2747
+
2748
+ def update_implicit_data_series_x_ranges(fig_dict, range_dict):
2749
+ """
2750
+ Updates the x_range_default values for all simulate and equation data series
2751
+ in a given figure dictionary using the provided range dictionary.
2752
+
2753
+ Args:
2754
+ fig_dict (dict): The original figure dictionary containing various data series.
2755
+ range_dict (dict): A dictionary with keys "min_x" and "max_x" providing the
2756
+ global minimum and maximum x values for updates.
2757
+
2758
+ Returns:
2759
+ dict: A new figure dictionary with updated x_range_default values for
2760
+ equation and simulate series, while keeping other data unchanged.
2761
+
2762
+ Notes:
2763
+ - If min_x or max_x in range_dict is None, the function preserves the
2764
+ existing x_range_default values instead of overwriting them.
2765
+ - Uses deepcopy to ensure modifications do not affect the original fig_dict.
2766
+ """
2767
+ import copy # Import inside function to limit scope
2768
+
2769
+ updated_fig_dict = copy.deepcopy(fig_dict) # Deep copy avoids modifying original data
2770
+
2771
+ min_x = range_dict["min_x"]
2772
+ max_x = range_dict["max_x"]
2773
+
2774
+ for data_series in updated_fig_dict.get("data", []):
2775
+ if "equation" in data_series:
2776
+ equation_info = data_series["equation"]
2777
+
2778
+ # Determine valid values before assignment
2779
+ min_x_value = min_x if (min_x is not None) else equation_info.get("x_range_default", [None, None])[0]
2780
+ max_x_value = max_x if (max_x is not None) else equation_info.get("x_range_default", [None, None])[1]
2781
+
2782
+ # Assign updated values
2783
+ equation_info["x_range_default"] = [min_x_value, max_x_value]
2784
+
2785
+ elif "simulate" in data_series:
2786
+ simulate_info = data_series["simulate"]
2787
+
2788
+ # Determine valid values before assignment
2789
+ min_x_value = min_x if (min_x is not None) else simulate_info.get("x_range_default", [None, None])[0]
2790
+ max_x_value = max_x if (max_x is not None) else simulate_info.get("x_range_default", [None, None])[1]
2791
+
2792
+ # Assign updated values
2793
+ simulate_info["x_range_default"] = [min_x_value, max_x_value]
2794
+
2795
+ return updated_fig_dict
2796
+
2797
+
2798
+
2799
+
2800
+ def get_fig_dict_ranges(fig_dict, skip_equations=False, skip_simulations=False):
2801
+ """
2802
+ Extracts minimum and maximum x/y values from each data_series in a fig_dict, as well as overall min and max for x and y.
2803
+
2804
+ Args:
2805
+ fig_dict (dict): The figure dictionary containing multiple data series.
2806
+ skip_equations (bool): If True, equation-based data series are ignored.
2807
+ skip_simulations (bool): If True, simulation-based data series are ignored.
2808
+
2809
+ Returns:
2810
+ tuple:
2811
+ - fig_dict_ranges (dict): A dictionary containing overall min/max x/y values across all valid series.
2812
+ - data_series_ranges (dict): A dictionary with individual min/max values for each data series.
2813
+
2814
+ Notes:
2815
+ - Equations and simulations have predefined x-range defaults and limits.
2816
+ - If their x-range is absent, individual data series values are used.
2817
+ - Ensures empty lists don't trigger errors when computing min/max values.
2818
+ """
2819
+ # Initialize final range values to None to ensure assignment
2820
+ fig_dict_ranges = {
2821
+ "min_x": None,
2822
+ "max_x": None,
2823
+ "min_y": None,
2824
+ "max_y": None
2825
+ }
2826
+
2827
+ data_series_ranges = {
2828
+ "min_x": [],
2829
+ "max_x": [],
2830
+ "min_y": [],
2831
+ "max_y": []
2832
+ }
2833
+
2834
+ for data_series in fig_dict.get("data", []):
2835
+ min_x, max_x, min_y, max_y = None, None, None, None # Initialize extrema as None
2836
+
2837
+ # Determine if the data series contains either "equation" or "simulate"
2838
+ if "equation" in data_series:
2839
+ if skip_equations:
2840
+ implicit_data_series_to_extract_from = None
2841
+ # Will Skip processing, but still append None values
2842
+ else:
2843
+ implicit_data_series_to_extract_from = data_series["equation"]
2844
+
2845
+ elif "simulate" in data_series:
2846
+ if skip_simulations:
2847
+ implicit_data_series_to_extract_from = None
2848
+ # Will Skip processing, but still append None values
2849
+ else:
2850
+ implicit_data_series_to_extract_from = data_series["simulate"]
2851
+
2852
+ else:
2853
+ implicit_data_series_to_extract_from = None # No equation or simulation, process x and y normally
2854
+
2855
+ if implicit_data_series_to_extract_from:
2856
+ x_range_default = implicit_data_series_to_extract_from.get("x_range_default", [None, None])
2857
+ x_range_limits = implicit_data_series_to_extract_from.get("x_range_limits", [None, None])
2858
+
2859
+ # Assign values, but keep None if missing
2860
+ min_x = (x_range_default[0] if (x_range_default[0] is not None) else x_range_limits[0])
2861
+ max_x = (x_range_default[1] if (x_range_default[1] is not None) else x_range_limits[1])
2862
+
2863
+ # Ensure "x" key exists AND list is not empty before calling min() or max()
2864
+ if (min_x is None) and ("x" in data_series) and (len(data_series["x"]) > 0):
2865
+ valid_x_values = [x for x in data_series["x"] if x is not None] # Filter out None values
2866
+ if valid_x_values: # Ensure list isn't empty after filtering
2867
+ min_x = min(valid_x_values)
2868
+
2869
+ if (max_x is None) and ("x" in data_series) and (len(data_series["x"]) > 0):
2870
+ valid_x_values = [x for x in data_series["x"] if x is not None] # Filter out None values
2871
+ if valid_x_values: # Ensure list isn't empty after filtering
2872
+ max_x = max(valid_x_values)
2873
+
2874
+ # Ensure "y" key exists AND list is not empty before calling min() or max()
2875
+ if (min_y is None) and ("y" in data_series) and (len(data_series["y"]) > 0):
2876
+ valid_y_values = [y for y in data_series["y"] if y is not None] # Filter out None values
2877
+ if valid_y_values: # Ensure list isn't empty after filtering
2878
+ min_y = min(valid_y_values)
2879
+
2880
+ if (max_y is None) and ("y" in data_series) and (len(data_series["y"]) > 0):
2881
+ valid_y_values = [y for y in data_series["y"] if y is not None] # Filter out None values
2882
+ if valid_y_values: # Ensure list isn't empty after filtering
2883
+ max_y = max(valid_y_values)
2884
+
2885
+ # Always add values to the lists, including None if applicable
2886
+ data_series_ranges["min_x"].append(min_x)
2887
+ data_series_ranges["max_x"].append(max_x)
2888
+ data_series_ranges["min_y"].append(min_y)
2889
+ data_series_ranges["max_y"].append(max_y)
2890
+
2891
+ # Filter out None values for overall min/max calculations
2892
+ valid_min_x_values = [x for x in data_series_ranges["min_x"] if x is not None]
2893
+ valid_max_x_values = [x for x in data_series_ranges["max_x"] if x is not None]
2894
+ valid_min_y_values = [y for y in data_series_ranges["min_y"] if y is not None]
2895
+ valid_max_y_values = [y for y in data_series_ranges["max_y"] if y is not None]
2896
+
2897
+ fig_dict_ranges["min_x"] = min(valid_min_x_values) if valid_min_x_values else None
2898
+ fig_dict_ranges["max_x"] = max(valid_max_x_values) if valid_max_x_values else None
2899
+ fig_dict_ranges["min_y"] = min(valid_min_y_values) if valid_min_y_values else None
2900
+ fig_dict_ranges["max_y"] = max(valid_max_y_values) if valid_max_y_values else None
2901
+
2902
+ return fig_dict_ranges, data_series_ranges
2903
+
2904
+
2905
+ # # Example usage
2906
+ # fig_dict = {
2907
+ # "data": [
2908
+ # {"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]},
2909
+ # {"x": [5, 6, 7, 8], "y": [50, 60, 70, 80]},
2910
+ # {"equation": {
2911
+ # "x_range_default": [None, 500],
2912
+ # "x_range_limits": [100, 600]
2913
+ # }},
2914
+ # {"simulate": {
2915
+ # "x_range_default": [None, 700],
2916
+ # "x_range_limits": [300, 900]
2917
+ # }}
2918
+ # ]
2919
+ # }
2920
+
2921
+ # fig_dict_ranges, data_series_ranges = get_fig_dict_ranges(fig_dict, skip_equations=True, skip_simulations=True) # Skips both
2922
+ # print("Data Series Values:", data_series_ranges)
2923
+ # print("Extreme Values:", fig_dict_ranges)
2924
+
2925
+ ### Start of section of code with functions for extracting and updating x and y ranges of data series ###
2926
+
2927
+
2928
+ ### Start section of code with functions for cleaning fig_dicts for plotly compatibility ###
2929
+
2930
+ def update_title_field(fig_dict, depth=1, max_depth=10):
2931
+ """ This function is intended to make JSONGrapher .json files compatible with the newer plotly recommended title field formatting
2932
+ which is necessary to do things like change the font, and also necessary for being able to convert a JSONGrapher json_dict to python plotly figure objects.
2933
+ Recursively checks for 'title' fields and converts them to dictionary format. """
2934
+ if depth > max_depth or not isinstance(fig_dict, dict):
2935
+ return fig_dict
2936
+
2937
+ for key, value in fig_dict.items():
2938
+ if key == "title" and isinstance(value, str):
2939
+ fig_dict[key] = {"text": value}
2940
+ elif isinstance(value, dict): # Nested dictionary
2941
+ fig_dict[key] = update_title_field(value, depth + 1, max_depth)
2942
+ elif isinstance(value, list): # Lists can contain nested dictionaries
2943
+ fig_dict[key] = [update_title_field(item, depth + 1, max_depth) if isinstance(item, dict) else item for item in value]
2944
+ return fig_dict
2945
+
2946
+
2947
+
2948
+
2949
+
2950
+ def convert_to_3d_layout(layout):
2951
+ import copy
2952
+ # Create a deep copy to avoid modifying the original layout
2953
+ new_layout = copy.deepcopy(layout)
2954
+
2955
+ # Add the axis fields inside `scene` first
2956
+ new_layout["scene"] = {
2957
+ "xaxis": layout.get("xaxis", {}),
2958
+ "yaxis": layout.get("yaxis", {}),
2959
+ "zaxis": layout.get("zaxis", {})
2960
+ }
2961
+
2962
+ # Remove the original axis fields from the top-level layout
2963
+ new_layout.pop("xaxis", None)
2964
+ new_layout.pop("yaxis", None)
2965
+ new_layout.pop("zaxis", None)
2966
+
2967
+ return new_layout
2968
+
2969
+ #A bubble plot uses z data, but that data is then
2970
+ #moved into the size field and the z field must be removed.
2971
+ def remove_bubble_fields(fig_dict):
2972
+ #This code will modify the data_series inside the fig_dict, directly.
2973
+ bubble_found = False #initialize with false case.
2974
+ for data_series in fig_dict["data"]:
2975
+ if "trace_style" in data_series:
2976
+ if (data_series["trace_style"] == "bubble") or ("max_bubble_size" in data_series):
2977
+ bubble_found = True
2978
+ if bubble_found == True:
2979
+ if "z" in data_series:
2980
+ data_series.pop("z")
2981
+ if "z_points" in data_series:
2982
+ data_series.pop("z_points")
2983
+ if "max_bubble_size" in data_series:
2984
+ data_series.pop("max_bubble_size")
2985
+ if bubble_found == True:
2986
+ if "zaxis" in fig_dict["layout"]:
2987
+ fig_dict["layout"].pop("zaxis")
2988
+ return fig_dict
2989
+
2990
+ def update_3d_axes(fig_dict):
2991
+ if "zaxis" in fig_dict["layout"]:
2992
+ fig_dict['layout'] = convert_to_3d_layout(fig_dict['layout'])
2993
+ for data_series_index, data_series in enumerate(fig_dict["data"]):
2994
+ if data_series["type"] == "scatter3d":
2995
+ if "z_matrix" in data_series: #for this one, we don't want the z_matrix.
2996
+ data_series.pop("z_matrix")
2997
+ if data_series["type"] == "mesh3d":
2998
+ if "z_matrix" in data_series: #for this one, we don't want the z_matrix.
2999
+ data_series.pop("z_matrix")
3000
+ if data_series["type"] == "surface":
3001
+ if "z_matrix" in data_series: #for this one, we want the z_matrix so we pop z if we have the z_matrix..
3002
+ data_series.pop("z")
3003
+ print(" The Surface type of 3D plot has not been implemented yet. It requires replacing z with the z_matrix after the equation has been evaluated.")
3004
+ return fig_dict
3005
+
3006
+ def remove_extra_information_field(fig_dict, depth=1, max_depth=10):
3007
+ """ This function is intended to make JSONGrapher .json files compatible with the current plotly format expectations
3008
+ and also necessary for being able to convert a JSONGrapher json_dict to python plotly figure objects.
3009
+ Recursively checks for 'extraInformation' fields and removes them."""
3010
+ if depth > max_depth or not isinstance(fig_dict, dict):
3011
+ return fig_dict
3012
+
3013
+ # Use a copy of the dictionary keys to safely modify the dictionary during iteration
3014
+ for key in list(fig_dict.keys()):
3015
+ if key == ("extraInformation" or "extra_information"):
3016
+ del fig_dict[key] # Remove the field
3017
+ elif isinstance(fig_dict[key], dict): # Nested dictionary
3018
+ fig_dict[key] = remove_extra_information_field(fig_dict[key], depth + 1, max_depth)
3019
+ elif isinstance(fig_dict[key], list): # Lists can contain nested dictionaries
3020
+ fig_dict[key] = [
3021
+ remove_extra_information_field(item, depth + 1, max_depth) if isinstance(item, dict) else item for item in fig_dict[key]
3022
+ ]
3023
+
3024
+ return fig_dict
3025
+
3026
+
3027
+ def remove_nested_comments(data, top_level=True):
3028
+ """ This function is intended to make JSONGrapher .json files compatible with the current plotly format expectations
3029
+ and also necessary for being able to convert a JSONGrapher json_dict to python plotly figure objects.
3030
+ Removes 'comments' fields that are not at the top level of the JSON-dict. Starts with 'top_level = True' when dict is first passed in then becomes false after that. """
3031
+ if not isinstance(data, dict):
3032
+ return data
3033
+ # Process nested structures
3034
+ for key in list(data.keys()):
3035
+ if isinstance(data[key], dict): # Nested dictionary
3036
+ data[key] = remove_nested_comments(data[key], top_level=False)
3037
+ elif isinstance(data[key], list): # Lists can contain nested dictionaries
3038
+ data[key] = [
3039
+ remove_nested_comments(item, top_level=False) if isinstance(item, dict) else item for item in data[key]
3040
+ ]
3041
+ # Only remove 'comments' if not at the top level
3042
+ if not top_level:
3043
+ data = {k: v for k, v in data.items() if k != "comments"}
3044
+ return data
3045
+
3046
+ def remove_simulate_field(json_fig_dict):
3047
+ data_dicts_list = json_fig_dict['data']
3048
+ for data_dict in data_dicts_list:
3049
+ data_dict.pop('simulate', None) #Some people recommend using pop over if/del as safer. Both ways should work under normal circumstances.
3050
+ json_fig_dict['data'] = data_dicts_list #this line shouldn't be necessary, but including it for clarity and carefulness.
3051
+ return json_fig_dict
3052
+
3053
+ def remove_equation_field(json_fig_dict):
3054
+ data_dicts_list = json_fig_dict['data']
3055
+ for data_dict in data_dicts_list:
3056
+ data_dict.pop('equation', None) #Some people recommend using pop over if/del as safer. Both ways should work under normal circumstances.
3057
+ json_fig_dict['data'] = data_dicts_list #this line shouldn't be necessary, but including it for clarity and carefulness.
3058
+ return json_fig_dict
3059
+
3060
+ def remove_trace_style_field(json_fig_dict):
3061
+ data_dicts_list = json_fig_dict['data']
3062
+ for data_dict in data_dicts_list:
3063
+ data_dict.pop('trace_style', None) #Some people recommend using pop over if/del as safer. Both ways should work under normal circumstances.
3064
+ data_dict.pop('tracetype', None) #Some people recommend using pop over if/del as safer. Both ways should work under normal circumstances.
3065
+ json_fig_dict['data'] = data_dicts_list #this line shouldn't be necessary, but including it for clarity and carefulness.
3066
+ return json_fig_dict
3067
+
3068
+ def remove_custom_units_chevrons(json_fig_dict):
3069
+ try:
3070
+ json_fig_dict['layout']['xaxis']['title']['text'] = json_fig_dict['layout']['xaxis']['title']['text'].replace('<','').replace('>','')
3071
+ except KeyError:
3072
+ pass
3073
+ try:
3074
+ json_fig_dict['layout']['yaxis']['title']['text'] = json_fig_dict['layout']['yaxis']['title']['text'].replace('<','').replace('>','')
3075
+ except KeyError:
3076
+ pass
3077
+ try:
3078
+ json_fig_dict['layout']['zaxis']['title']['text'] = json_fig_dict['layout']['zaxis']['title']['text'].replace('<','').replace('>','')
3079
+ except KeyError:
3080
+ pass
3081
+ return json_fig_dict
3082
+
3083
+ def clean_json_fig_dict(json_fig_dict, fields_to_update=None):
3084
+ """ This function is intended to make JSONGrapher .json files compatible with the current plotly format expectations
3085
+ and also necessary for being able to convert a JSONGrapher json_dict to python plotly figure objects.
3086
+ fields_to_update should be a list.
3087
+ This function can also remove the 'simulate' field from data series. However, that is not the default behavior
3088
+ because one would not want to do that by mistake before simulation is performed.
3089
+ This function can also remove the 'equation' field from data series. However, that is not the default behavior
3090
+ because one would not want to do that by mistake before the equation is evaluated.
3091
+ """
3092
+ if fields_to_update is None: # should not initialize mutable objects in arguments line, so doing here.
3093
+ fields_to_update = ["title_field", "extraInformation", "nested_comments"]
3094
+ fig_dict = json_fig_dict
3095
+ #unmodified_data = copy.deepcopy(data)
3096
+ if "title_field" in fields_to_update:
3097
+ fig_dict = update_title_field(fig_dict)
3098
+ if "extraInformation" in fields_to_update:
3099
+ fig_dict = remove_extra_information_field(fig_dict)
3100
+ if "nested_comments" in fields_to_update:
3101
+ fig_dict = remove_nested_comments(fig_dict)
3102
+ if "simulate" in fields_to_update:
3103
+ fig_dict = remove_simulate_field(fig_dict)
3104
+ if "equation" in fields_to_update:
3105
+ fig_dict = remove_equation_field(fig_dict)
3106
+ if "custom_units_chevrons" in fields_to_update:
3107
+ fig_dict = remove_custom_units_chevrons(fig_dict)
3108
+ if "bubble" in fields_to_update: #must be updated before trace_style is removed.
3109
+ fig_dict = remove_bubble_fields(fig_dict)
3110
+ if "trace_style" in fields_to_update:
3111
+ fig_dict = remove_trace_style_field(fig_dict)
3112
+ if "3d_axes" in fields_to_update: #This is for 3D plots
3113
+ fig_dict = update_3d_axes(fig_dict)
3114
+
3115
+ return fig_dict
3116
+
3117
+ ### End section of code with functions for cleaning fig_dicts for plotly compatibility ###
3118
+
3119
+ ### Beginning of section of file that has functions for "simulate" and "equation" fields, to evaluate equations and call external javascript simulators, as well as support functions ###
3120
+
3121
+ def run_js_simulation(javascript_simulator_url, simulator_input_json_dict, verbose = False):
3122
+ """
3123
+ Downloads a JavaScript file using its URL, extracts the filename, appends an export statement,
3124
+ executes it with Node.js, and parses the output.
3125
+
3126
+ Parameters:
3127
+ javascript_simulator_url (str): URL of the raw JavaScript file to download and execute. Must have a function named simulate.
3128
+ simulator_input_json_dict (dict): Input parameters for the JavaScript simulator.
3129
+
3130
+ # Example inputs
3131
+ javascript_simulator_url = "https://github.com/AdityaSavara/JSONGrapherExamples/blob/main/ExampleSimulators/Langmuir_Isotherm.js"
3132
+ simulator_input_json_dict = {
3133
+ "simulate": {
3134
+ "K_eq": None,
3135
+ "sigma_max": "1.0267670459667 (mol/kg)",
3136
+ "k_ads": "200 (1/(bar * s))",
3137
+ "k_des": "100 (1/s)"
3138
+ }
3139
+ }
3140
+
3141
+
3142
+ Returns:
3143
+ dict: Parsed JSON output from the JavaScript simulation, or None if an error occurred.
3144
+ """
3145
+ import requests
3146
+ import subprocess
3147
+ #import json
3148
+ import os
3149
+
3150
+ # Convert to raw GitHub URL only if "raw" is not in the original URL
3151
+ # For example, the first link below gets converted to the second one.
3152
+ # https://github.com/AdityaSavara/JSONGrapherExamples/blob/main/ExampleSimulators/Langmuir_Isotherm.js
3153
+ # https://raw.githubusercontent.com/AdityaSavara/JSONGrapherExamples/main/ExampleSimulators/Langmuir_Isotherm.js
3154
+
3155
+ if "raw" not in javascript_simulator_url:
3156
+ javascript_simulator_url = convert_to_raw_github_url(javascript_simulator_url)
3157
+
3158
+ # Extract filename from URL
3159
+ js_filename = os.path.basename(javascript_simulator_url)
3160
+
3161
+ # Download the JavaScript file
3162
+ response = requests.get(javascript_simulator_url, timeout=300)
3163
+
3164
+ if response.status_code == 200:
3165
+ with open(js_filename, "w", encoding="utf-8") as file: # Specify UTF-8 encoding for compatibility
3166
+ file.write(response.text)
3167
+
3168
+ # Append the export statement to the JavaScript file
3169
+ with open(js_filename, "a", encoding="utf-8") as file: # Specify UTF-8 encoding for compatibility
3170
+ file.write("\nmodule.exports = { simulate };")
3171
+
3172
+ # Convert input dictionary to a JSON string
3173
+ input_json_str = json.dumps(simulator_input_json_dict)
3174
+
3175
+ # Prepare JavaScript command for execution
3176
+ js_command = f"""
3177
+ const simulator = require('./{js_filename}');
3178
+ console.log(JSON.stringify(simulator.simulate({input_json_str})));
3179
+ """
3180
+
3181
+ result = subprocess.run(["node", "-e", js_command], capture_output=True, text=True, check=True)
3182
+
3183
+ # Print output and errors if verbose
3184
+ if verbose:
3185
+ print("Raw JavaScript Output:", result.stdout)
3186
+ print("Node.js Errors:", result.stderr)
3187
+
3188
+ # Parse JSON if valid
3189
+ if result.stdout.strip():
3190
+ try:
3191
+ data_dict_with_simulation = json.loads(result.stdout) #This is the normal case.
3192
+ return data_dict_with_simulation
3193
+ except json.JSONDecodeError:
3194
+ print("Error: JavaScript output is not valid JSON.")
3195
+ return None
3196
+ else:
3197
+ print(f"Error: Unable to fetch JavaScript file. Status code {response.status_code}")
3198
+ return None
3199
+
3200
+ def convert_to_raw_github_url(url):
3201
+ """
3202
+ Converts a GitHub file URL to its raw content URL if necessary, preserving the filename.
3203
+ This function is really a support function for run_js_simulation
3204
+ """
3205
+ from urllib.parse import urlparse
3206
+ parsed_url = urlparse(url)
3207
+
3208
+ # If the URL is already a raw GitHub link, return it unchanged
3209
+ if "raw.githubusercontent.com" in parsed_url.netloc:
3210
+ return url
3211
+
3212
+ path_parts = parsed_url.path.strip("/").split("/")
3213
+
3214
+ # Ensure it's a valid GitHub file URL
3215
+ if "github.com" in parsed_url.netloc and len(path_parts) >= 4:
3216
+ if path_parts[2] == "blob":
3217
+ # If the URL contains "blob", adjust extraction
3218
+ user, repo, branch = path_parts[:2] + [path_parts[3]]
3219
+ file_path = "/".join(path_parts[4:]) # Keep full file path including filename
3220
+ else:
3221
+ # Standard GitHub file URL (without "blob")
3222
+ user, repo, branch = path_parts[:3]
3223
+ file_path = "/".join(path_parts[3:]) # Keep full file path including filename
3224
+
3225
+ return f"https://raw.githubusercontent.com/{user}/{repo}/{branch}/{file_path}"
3226
+
3227
+ return url # Return unchanged if not a GitHub file URL
3228
+
3229
+ #This function takes in a data_series_dict object and then
3230
+ #calls an external javascript simulation if needed
3231
+ #Then fills the data_series dict with the simulated data.
3232
+ #This function is not intended to be called by the regular user
3233
+ #because it returns extra fields that need to be parsed out.
3234
+ #and because it does not do unit conversions as needed after the simulation resultss are returned.
3235
+ def simulate_data_series(data_series_dict, simulator_link='', verbose=False):
3236
+ if simulator_link == '':
3237
+ simulator_link = data_series_dict["simulate"]["model"]
3238
+ try:
3239
+ simulation_return = run_js_simulation(simulator_link, data_series_dict, verbose=verbose)
3240
+ if isinstance(simulation_return, dict) and "error" in simulation_return: # Check for errors in the returned data
3241
+ print(f"Simulation failed: {simulation_return.get('error_message', 'Unknown error')}")
3242
+ print(simulation_return)
3243
+ return None
3244
+ return simulation_return.get("data", None)
3245
+
3246
+ except Exception as e: # This is so VS code pylint does not flag this line. pylint: disable=broad-except
3247
+ print(f"Exception occurred in simulate_data_series function of JSONRecordCreator.py: {e}")
3248
+ return None
3249
+
3250
+ #Function that goes through a fig_dict data series and simulates each data series as needed.
3251
+ #If the simulated data returned has "x_label" and/or "y_label" with units, those will be used to scale the data, then will be removed.
3252
+ def simulate_as_needed_in_fig_dict(fig_dict, simulator_link='', verbose=False):
3253
+ data_dicts_list = fig_dict['data']
3254
+ for data_dict_index in range(len(data_dicts_list)):
3255
+ fig_dict = simulate_specific_data_series_by_index(fig_dict, data_dict_index, simulator_link=simulator_link, verbose=verbose)
3256
+ return fig_dict
3257
+
3258
+ #Function that takes fig_dict and dataseries index and simulates if needed. Also performs unit conversions as needed.
3259
+ #If the simulated data returned has "x_label" and/or "y_label" with units, those will be used to scale the data, then will be removed.
3260
+ def simulate_specific_data_series_by_index(fig_dict, data_series_index, simulator_link='', verbose=False):
3261
+ data_dicts_list = fig_dict['data']
3262
+ data_dict_index = data_series_index
3263
+ data_dict = data_dicts_list[data_dict_index]
3264
+ if 'simulate' in data_dict:
3265
+ data_dict_filled = simulate_data_series(data_dict, simulator_link=simulator_link, verbose=verbose)
3266
+ # Check if unit scaling is needed
3267
+ if ("x_label" in data_dict_filled) or ("y_label" in data_dict_filled):
3268
+ #first, get the units that are in the layout of fig_dict so we know what to convert to.
3269
+ existing_record_x_label = fig_dict["layout"]["xaxis"]["title"]["text"]
3270
+ existing_record_y_label = fig_dict["layout"]["yaxis"]["title"]["text"]
3271
+ # Extract units from the simulation output.
3272
+ existing_record_x_units = separate_label_text_from_units(existing_record_x_label).get("units", "")
3273
+ existing_record_y_units = separate_label_text_from_units(existing_record_y_label).get("units", "")
3274
+ simulated_data_series_x_units = separate_label_text_from_units(data_dict_filled.get('x_label', '')).get("units", "")
3275
+ simulated_data_series_y_units = separate_label_text_from_units(data_dict_filled.get('y_label', '')).get("units", "")
3276
+ # Compute unit scaling ratios
3277
+ x_units_ratio = get_units_scaling_ratio(simulated_data_series_x_units, existing_record_x_units) if simulated_data_series_x_units and existing_record_x_units else 1
3278
+ y_units_ratio = get_units_scaling_ratio(simulated_data_series_y_units, existing_record_y_units) if simulated_data_series_y_units and existing_record_y_units else 1
3279
+ # Apply scaling to the data series
3280
+ scale_dataseries_dict(data_dict_filled, num_to_scale_x_values_by=x_units_ratio, num_to_scale_y_values_by=y_units_ratio)
3281
+ #Verbose logging for debugging
3282
+ if verbose:
3283
+ print(f"Scaling X values by: {x_units_ratio}, Scaling Y values by: {y_units_ratio}")
3284
+ #Now need to remove the "x_label" and "y_label" to be compatible with plotly.
3285
+ data_dict_filled.pop("x_label", None)
3286
+ data_dict_filled.pop("y_label", None)
3287
+ # Update the figure dictionary
3288
+ data_dicts_list[data_dict_index] = data_dict_filled
3289
+ fig_dict['data'] = data_dicts_list
3290
+ return fig_dict
3291
+
3292
+ def evaluate_equations_as_needed_in_fig_dict(fig_dict):
3293
+ data_dicts_list = fig_dict['data']
3294
+ for data_dict_index, data_dict in enumerate(data_dicts_list):
3295
+ if 'equation' in data_dict:
3296
+ fig_dict = evaluate_equation_for_data_series_by_index(fig_dict, data_dict_index)
3297
+ return fig_dict
3298
+
3299
+ #TODO: Should add z units ratio scaling here (just to change units when merging records). Should do the same for the simulate_specific_data_series_by_index function.
3300
+ def evaluate_equation_for_data_series_by_index(fig_dict, data_series_index, verbose="auto"):
3301
+ try:
3302
+ # Attempt to import from the json_equationer package
3303
+ import json_equationer.equation_creator as equation_creator
3304
+ except ImportError:
3305
+ try:
3306
+ # Fallback: attempt local import
3307
+ from . import equation_creator
3308
+ except ImportError as exc:
3309
+ # Log the failure and handle gracefully
3310
+ print(f"Failed to import equation_creator: {exc}")
3311
+ import copy
3312
+ data_dicts_list = fig_dict['data']
3313
+ data_dict = data_dicts_list[data_series_index]
3314
+ if 'equation' in data_dict:
3315
+ equation_object = equation_creator.Equation(data_dict['equation'])
3316
+ if verbose == "auto":
3317
+ equation_dict_evaluated = equation_object.evaluate_equation()
3318
+ else:
3319
+ equation_dict_evaluated = equation_object.evaluate_equation(verbose=verbose)
3320
+ if "graphical_dimensionality" in equation_dict_evaluated:
3321
+ graphical_dimensionality = equation_dict_evaluated["graphical_dimensionality"]
3322
+ else:
3323
+ graphical_dimensionality = 2
3324
+ data_dict_filled = copy.deepcopy(data_dict)
3325
+ data_dict_filled['equation'] = equation_dict_evaluated
3326
+ data_dict_filled['x_label'] = data_dict_filled['equation']['x_variable']
3327
+ data_dict_filled['y_label'] = data_dict_filled['equation']['y_variable']
3328
+ data_dict_filled['x'] = equation_dict_evaluated['x_points']
3329
+ data_dict_filled['y'] = equation_dict_evaluated['y_points']
3330
+ if graphical_dimensionality == 3:
3331
+ data_dict_filled['z_label'] = data_dict_filled['equation']['z_variable']
3332
+ data_dict_filled['z'] = equation_dict_evaluated['z_points']
3333
+ #data_dict_filled may include "x_label" and/or "y_label". If it does, we'll need to check about scaling units.
3334
+ if (("x_label" in data_dict_filled) or ("y_label" in data_dict_filled)) or ("z_label" in data_dict_filled):
3335
+ #first, get the units that are in the layout of fig_dict so we know what to convert to.
3336
+ existing_record_x_label = fig_dict["layout"]["xaxis"]["title"]["text"] #this is a dictionary.
3337
+ existing_record_y_label = fig_dict["layout"]["yaxis"]["title"]["text"] #this is a dictionary.
3338
+ existing_record_x_units = separate_label_text_from_units(existing_record_x_label)["units"]
3339
+ existing_record_y_units = separate_label_text_from_units(existing_record_y_label)["units"]
3340
+ if "z_label" in data_dict_filled:
3341
+ existing_record_z_label = fig_dict["layout"]["zaxis"]["title"]["text"] #this is a dictionary.
3342
+ if (existing_record_x_units == '') and (existing_record_y_units == ''): #skip scaling if there are no units.
3343
+ pass
3344
+ else: #If we will be scaling...
3345
+ #now, get the units from the evaluated equation output.
3346
+ simulated_data_series_x_units = separate_label_text_from_units(data_dict_filled['x_label'])["units"]
3347
+ simulated_data_series_y_units = separate_label_text_from_units(data_dict_filled['y_label'])["units"]
3348
+ x_units_ratio = get_units_scaling_ratio(simulated_data_series_x_units, existing_record_x_units)
3349
+ y_units_ratio = get_units_scaling_ratio(simulated_data_series_y_units, existing_record_y_units)
3350
+ #We scale the dataseries, which really should be a function.
3351
+ scale_dataseries_dict(data_dict_filled, num_to_scale_x_values_by = x_units_ratio, num_to_scale_y_values_by = y_units_ratio)
3352
+ #Now need to remove the "x_label" and "y_label" to be compatible with plotly.
3353
+ data_dict_filled.pop("x_label", None)
3354
+ data_dict_filled.pop("y_label", None)
3355
+ if "z_label" in data_dict_filled:
3356
+ data_dict_filled.pop("z_label", None)
3357
+ if "type" not in data_dict:
3358
+ if graphical_dimensionality == 2:
3359
+ data_dict_filled['type'] = 'spline'
3360
+ elif graphical_dimensionality == 3:
3361
+ data_dict_filled['type'] = 'mesh3d'
3362
+ data_dicts_list[data_series_index] = data_dict_filled
3363
+ fig_dict['data'] = data_dicts_list
3364
+ return fig_dict
3365
+
3366
+
3367
+ def update_implicit_data_series_data(target_fig_dict, source_fig_dict, parallel_structure=True, modify_target_directly = False):
3368
+ """
3369
+ Updates the x and y values of implicit data series (equation/simulate) in target_fig_dict
3370
+ using values from the corresponding series in source_fig_dict.
3371
+
3372
+ Args:
3373
+ target_fig_dict (dict): The figure dictionary that needs updated data.
3374
+ source_fig_dict (dict): The figure dictionary that provides x and y values.
3375
+ parallel_structure (bool, optional): If True, assumes both data lists are the same
3376
+ length and updates using zip(). If False,
3377
+ matches by name instead. Default is True.
3378
+
3379
+ Returns:
3380
+ dict: A new figure dictionary with updated x and y values for implicit data series.
3381
+
3382
+ Notes:
3383
+ - If parallel_structure=True and both lists have the same length, updates use zip().
3384
+ - If parallel_structure=False, matching is done by the "name" field.
3385
+ - Only updates data series that contain "simulate" or "equation".
3386
+ - Ensures deep copying to avoid modifying the original structures.
3387
+ """
3388
+ if modify_target_directly == False:
3389
+ import copy # Import inside function to limit scope
3390
+ updated_fig_dict = copy.deepcopy(target_fig_dict) # Deep copy to avoid modifying original
3391
+ else:
3392
+ updated_fig_dict = target_fig_dict
3393
+
3394
+ target_data_series = updated_fig_dict.get("data", [])
3395
+ source_data_series = source_fig_dict.get("data", [])
3396
+
3397
+ if parallel_structure and len(target_data_series) == len(source_data_series):
3398
+ # Use zip() when parallel_structure=True and lengths match
3399
+ for target_series, source_series in zip(target_data_series, source_data_series):
3400
+ if ("equation" in target_series) or ("simulate" in target_series):
3401
+ target_series["x"] = source_series.get("x", []) # Extract and apply "x" values
3402
+ target_series["y"] = source_series.get("y", []) # Extract and apply "y" values
3403
+ if "z" in source_series:
3404
+ target_series["z"] = source_series.get("z", []) # Extract and apply "z" values
3405
+ else:
3406
+ # Match by name when parallel_structure=False or lengths differ
3407
+ source_data_dict = {series["name"]: series for series in source_data_series if "name" in series}
3408
+
3409
+ for target_series in target_data_series:
3410
+ if ("equation" in target_series) or ("simulate" in target_series):
3411
+ target_name = target_series.get("name")
3412
+
3413
+ if target_name in source_data_dict:
3414
+ source_series = source_data_dict[target_name]
3415
+ target_series["x"] = source_series.get("x", []) # Extract and apply "x" values
3416
+ target_series["y"] = source_series.get("y", []) # Extract and apply "y" values
3417
+ if "z" in source_series:
3418
+ target_series["z"] = source_series.get("z", []) # Extract and apply "z" values
3419
+ return updated_fig_dict
3420
+
3421
+
3422
+ def execute_implicit_data_series_operations(fig_dict, simulate_all_series=True, evaluate_all_equations=True, adjust_implicit_data_ranges=True):
3423
+ """
3424
+ This function is designed to be called during creation of a plotly or matplotlib figure creation.
3425
+ Processes implicit data series (equation/simulate), adjusting ranges, performing simulations,
3426
+ and evaluating equations as needed.
3427
+
3428
+ The important thing is that this function creates a "fresh" fig_dict, does some manipulation, then then gets the data from that
3429
+ and adds it to the original fig_dict.
3430
+ That way the original fig_dict is not changed other than getting the simulated/evaluated data.
3431
+
3432
+ The reason the function works this way is that the x_range_default of the implicit data series (equations and simulations)
3433
+ are adjusted to match the data in the fig_dict, but we don't want to change the x_range_default of our main record.
3434
+ That's why we make a copy for creating simulated/evaluated data from those adjusted ranges, and then put the simulated/evaluated data
3435
+ back into the original dict.
3436
+
3437
+
3438
+
3439
+ Args:
3440
+ fig_dict (dict): The figure dictionary containing data series.
3441
+ simulate_all_series (bool): If True, performs simulations for applicable series.
3442
+ evaluate_all_equations (bool): If True, evaluates all equation-based series.
3443
+ adjust_implicit_data_ranges (bool): If True, modifies ranges for implicit data series.
3444
+
3445
+ Returns:
3446
+ dict: Updated figure dictionary with processed implicit data series.
3447
+
3448
+ Notes:
3449
+ - If adjust_implicit_data_ranges=True, retrieves min/max values from regular data series
3450
+ (those that are not equations and not simulations) and applies them to implicit data.
3451
+ - If simulate_all_series=True, executes simulations for all series that require them
3452
+ and transfers the computed data back to fig_dict without copying ranges.
3453
+ - If evaluate_all_equations=True, solves equations as needed and transfers results
3454
+ back to fig_dict without copying ranges.
3455
+ - Uses deepcopy to avoid modifying the original input dictionary.
3456
+ """
3457
+ import copy # Import inside function for modularity
3458
+
3459
+ # Create a copy for processing implicit series separately
3460
+ fig_dict_for_implicit = copy.deepcopy(fig_dict)
3461
+ #first check if any data_series have an equatinon or simulation field. If not, we'll skip.
3462
+ #initialize with false:
3463
+ implicit_series_present = False
3464
+
3465
+ for data_series in fig_dict["data"]:
3466
+ if ("equation" in data_series) or ("simulate" in data_series):
3467
+ implicit_series_present = True
3468
+ if implicit_series_present == True:
3469
+ if adjust_implicit_data_ranges:
3470
+ # Retrieve ranges from data series that are not equation-based or simulation-based.
3471
+ fig_dict_ranges, data_series_ranges = get_fig_dict_ranges(fig_dict, skip_equations=True, skip_simulations=True)
3472
+ data_series_ranges # Variable not used. The remainder of this comment is to avoid vs code pylint flagging. pylint: disable=pointless-statement
3473
+ # Apply the extracted ranges to implicit data series before simulation or equation evaluation.
3474
+ fig_dict_for_implicit = update_implicit_data_series_x_ranges(fig_dict, fig_dict_ranges)
3475
+
3476
+ if simulate_all_series:
3477
+ # Perform simulations for applicable series
3478
+ fig_dict_for_implicit = simulate_as_needed_in_fig_dict(fig_dict_for_implicit)
3479
+ # Copy data back to fig_dict, ensuring ranges remain unchanged
3480
+ fig_dict = update_implicit_data_series_data(target_fig_dict=fig_dict, source_fig_dict=fig_dict_for_implicit, parallel_structure=True, modify_target_directly=True)
3481
+
3482
+ if evaluate_all_equations:
3483
+ # Evaluate equations that require computation
3484
+ fig_dict_for_implicit = evaluate_equations_as_needed_in_fig_dict(fig_dict_for_implicit)
3485
+ # Copy results back without overwriting the ranges
3486
+ fig_dict = update_implicit_data_series_data(target_fig_dict=fig_dict, source_fig_dict=fig_dict_for_implicit, parallel_structure=True, modify_target_directly=True)
3487
+
3488
+ return fig_dict
3489
+
3490
+
3491
+
3492
+ ### End of section of file that has functions for "simulate" and "equation" fields, to evaluate equations and call external javascript simulators, as well as support functions###
3493
+
873
3494
  # Example Usage
874
3495
  if __name__ == "__main__":
875
3496
  # Example of creating a record with optional attributes.
876
- record = JSONGrapherRecord(
3497
+ Record = JSONGrapherRecord(
877
3498
  comments="Here is a description.",
878
- graph_title="Graph Title",
3499
+ graph_title="Here Is The Graph Title Spot",
879
3500
  data_objects_list=[
880
- {"comments": "Initial data series.", "uid": "123", "line": {"shape": "solid"}, "name": "Series A", "type": "line", "x": [1, 2, 3], "y": [4, 5, 6]}
3501
+ {"comments": "Initial data series.", "uid": "123", "name": "Series A", "trace_style": "spline", "x": [1, 2, 3], "y": [4, 5, 8]}
881
3502
  ],
882
3503
  )
3504
+ x_label_including_units= "Time (years)"
3505
+ y_label_including_units = "Height (m)"
3506
+ Record.set_comments("Tree Growth Data collected from the US National Arboretum")
3507
+ Record.set_datatype("Tree_Growth_Curve")
3508
+ Record.set_x_axis_label_including_units(x_label_including_units)
3509
+ Record.set_y_axis_label_including_units(y_label_including_units)
3510
+
3511
+
3512
+ Record.export_to_json_file("test.json")
3513
+
3514
+ print(Record)
883
3515
 
884
3516
  # Example of creating a record from an existing dictionary.
885
- existing_JSONGrapher_record = {
3517
+ example_existing_JSONGrapher_record = {
886
3518
  "comments": "Existing record description.",
887
3519
  "graph_title": "Existing Graph",
888
3520
  "data": [
889
- {"comments": "Data series 1", "uid": "123", "line": {"shape": "solid"}, "name": "Series A", "type": "line", "x": [1, 2, 3], "y": [4, 5, 6]}
3521
+ {"comments": "Data series 1", "uid": "123", "name": "Series A", "type": "spline", "x": [1, 2, 3], "y": [4, 5, 8]}
890
3522
  ],
891
3523
  }
892
- record_from_existing = JSONGrapherRecord(existing_JSONGrapher_record=existing_JSONGrapher_record)
893
- record.export_to_json_file("test.json")
894
- print(record)
3524
+ Record_from_existing = JSONGrapherRecord(existing_JSONGrapher_record=example_existing_JSONGrapher_record)
3525
+ x_label_including_units= "Time (years)"
3526
+ y_label_including_units = "Height (cm)"
3527
+ Record_from_existing.set_comments("Tree Growth Data collected from the US National Arboretum")
3528
+ Record_from_existing.set_datatype("Tree_Growth_Curve")
3529
+ Record_from_existing.set_x_axis_label_including_units(x_label_including_units)
3530
+ Record_from_existing.set_y_axis_label_including_units(y_label_including_units)
3531
+ print(Record_from_existing)
3532
+
3533
+ print("NOW WILL MERGE THE RECORDS, AND USE THE SECOND ONE TWICE (AS A JSONGrapher OBJECT THEN JUST THE FIG_DICT)")
3534
+ print(merge_JSONGrapherRecords([Record, Record_from_existing, Record_from_existing.fig_dict]))
3535
+
3536
+
3537
+