vellum-ai 0.14.48__py3-none-any.whl → 0.14.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. vellum/__init__.py +6 -2
  2. vellum/client/core/client_wrapper.py +1 -1
  3. vellum/client/types/__init__.py +6 -2
  4. vellum/client/types/deployment_read.py +1 -1
  5. vellum/client/types/slim_workflow_execution_read.py +2 -2
  6. vellum/client/types/workflow_event_execution_read.py +2 -2
  7. vellum/client/types/{workflow_execution_usage_calculation_fulfilled_body.py → workflow_execution_usage_calculation_error.py} +5 -6
  8. vellum/client/types/workflow_execution_usage_calculation_error_code_enum.py +7 -0
  9. vellum/client/types/workflow_execution_usage_result.py +24 -0
  10. vellum/types/{workflow_execution_usage_calculation_fulfilled_body.py → workflow_execution_usage_calculation_error.py} +1 -1
  11. vellum/types/workflow_execution_usage_calculation_error_code_enum.py +3 -0
  12. vellum/types/workflow_execution_usage_result.py +3 -0
  13. vellum/workflows/nodes/core/map_node/node.py +74 -87
  14. vellum/workflows/nodes/core/map_node/tests/test_node.py +49 -0
  15. vellum/workflows/nodes/displayable/code_execution_node/node.py +6 -5
  16. vellum/workflows/nodes/displayable/code_execution_node/tests/test_code_execution_node.py +112 -1
  17. vellum/workflows/nodes/displayable/code_execution_node/utils.py +23 -2
  18. vellum/workflows/state/encoder.py +4 -0
  19. vellum/workflows/types/code_execution_node_wrappers.py +10 -2
  20. vellum/workflows/workflows/base.py +8 -0
  21. {vellum_ai-0.14.48.dist-info → vellum_ai-0.14.50.dist-info}/METADATA +1 -1
  22. {vellum_ai-0.14.48.dist-info → vellum_ai-0.14.50.dist-info}/RECORD +35 -30
  23. vellum_ee/workflows/display/nodes/base_node_display.py +23 -1
  24. vellum_ee/workflows/display/nodes/get_node_display_class.py +1 -24
  25. vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +2 -2
  26. vellum_ee/workflows/display/nodes/vellum/tests/test_code_execution_node.py +43 -0
  27. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +51 -5
  28. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_serialization.py +177 -0
  29. vellum_ee/workflows/display/utils/expressions.py +22 -3
  30. vellum_ee/workflows/display/utils/vellum.py +10 -4
  31. vellum_ee/workflows/display/workflows/base_workflow_display.py +3 -24
  32. vellum_ee/workflows/display/workflows/tests/test_workflow_display.py +187 -3
  33. {vellum_ai-0.14.48.dist-info → vellum_ai-0.14.50.dist-info}/LICENSE +0 -0
  34. {vellum_ai-0.14.48.dist-info → vellum_ai-0.14.50.dist-info}/WHEEL +0 -0
  35. {vellum_ai-0.14.48.dist-info → vellum_ai-0.14.50.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,177 @@
1
+ from deepdiff import DeepDiff
2
+
3
+ from vellum_ee.workflows.display.workflows.get_vellum_workflow_display_class import get_workflow_display
4
+
5
+ from tests.workflows.basic_tool_calling_node.workflow import BasicToolCallingNodeWorkflow
6
+
7
+
8
+ def test_serialize_workflow():
9
+ # GIVEN a Workflow that uses a generic node
10
+ # WHEN we serialize it
11
+ workflow_display = get_workflow_display(workflow_class=BasicToolCallingNodeWorkflow)
12
+
13
+ serialized_workflow: dict = workflow_display.serialize()
14
+ # THEN we should get a serialized representation of the Workflow
15
+ assert serialized_workflow.keys() == {
16
+ "workflow_raw_data",
17
+ "input_variables",
18
+ "state_variables",
19
+ "output_variables",
20
+ }
21
+
22
+ # AND its input variables should be what we expect
23
+ input_variables = serialized_workflow["input_variables"]
24
+ assert len(input_variables) == 0
25
+
26
+ # AND its output variables should be what we expect
27
+ output_variables = serialized_workflow["output_variables"]
28
+ assert len(output_variables) == 2
29
+ assert not DeepDiff(
30
+ [
31
+ {"id": "8e7c0147-930d-4b7f-b6b1-6d79641cd3eb", "key": "text", "type": "STRING"},
32
+ {"id": "01a07e6d-7269-4f45-8b44-ef0227a2e88d", "key": "chat_history", "type": "CHAT_HISTORY"},
33
+ ],
34
+ output_variables,
35
+ ignore_order=True,
36
+ )
37
+
38
+ # AND its raw data should be what we expect
39
+ workflow_raw_data = serialized_workflow["workflow_raw_data"]
40
+ tool_calling_node = workflow_raw_data["nodes"][1]
41
+ assert tool_calling_node == {
42
+ "id": "21f29cac-da87-495f-bba1-093d423f4e46",
43
+ "label": "GetCurrentWeatherNode",
44
+ "type": "GENERIC",
45
+ "display_data": {
46
+ "position": {"x": 0.0, "y": 0.0},
47
+ "comment": {"value": "\n A tool calling node that calls the get_current_weather function.\n "},
48
+ },
49
+ "base": {
50
+ "name": "ToolCallingNode",
51
+ "module": ["vellum", "workflows", "nodes", "experimental", "tool_calling_node", "node"],
52
+ },
53
+ "definition": {
54
+ "name": "GetCurrentWeatherNode",
55
+ "module": ["tests", "workflows", "basic_tool_calling_node", "workflow"],
56
+ },
57
+ "trigger": {"id": "2414743b-b1dd-4552-8abf-9b7481df9762", "merge_behavior": "AWAIT_ATTRIBUTES"},
58
+ "ports": [{"id": "3cd6d78c-9dad-42aa-ad38-31f67057c379", "name": "default", "type": "DEFAULT"}],
59
+ "adornments": None,
60
+ "attributes": [
61
+ {
62
+ "id": "44420e39-966f-4c59-bdf8-6365a61c5d2a",
63
+ "name": "ml_model",
64
+ "value": {"type": "CONSTANT_VALUE", "value": {"type": "STRING", "value": "gpt-4o-mini"}},
65
+ },
66
+ {
67
+ "id": "669cfb4b-8c25-460e-8952-b63d91302cbc",
68
+ "name": "blocks",
69
+ "value": {
70
+ "type": "CONSTANT_VALUE",
71
+ "value": {
72
+ "type": "JSON",
73
+ "value": [
74
+ {
75
+ "block_type": "CHAT_MESSAGE",
76
+ "state": None,
77
+ "cache_config": None,
78
+ "chat_role": "SYSTEM",
79
+ "chat_source": None,
80
+ "chat_message_unterminated": None,
81
+ "blocks": [
82
+ {
83
+ "block_type": "RICH_TEXT",
84
+ "state": None,
85
+ "cache_config": None,
86
+ "blocks": [
87
+ {
88
+ "block_type": "PLAIN_TEXT",
89
+ "state": None,
90
+ "cache_config": None,
91
+ "text": "You are a weather expert",
92
+ }
93
+ ],
94
+ }
95
+ ],
96
+ },
97
+ {
98
+ "block_type": "CHAT_MESSAGE",
99
+ "state": None,
100
+ "cache_config": None,
101
+ "chat_role": "USER",
102
+ "chat_source": None,
103
+ "chat_message_unterminated": None,
104
+ "blocks": [
105
+ {
106
+ "block_type": "RICH_TEXT",
107
+ "state": None,
108
+ "cache_config": None,
109
+ "blocks": [
110
+ {
111
+ "block_type": "VARIABLE",
112
+ "state": None,
113
+ "cache_config": None,
114
+ "input_variable": "question",
115
+ }
116
+ ],
117
+ }
118
+ ],
119
+ },
120
+ ],
121
+ },
122
+ },
123
+ },
124
+ {
125
+ "id": "78324739-ff89-47a5-902b-10da0cb95c6d",
126
+ "name": "functions",
127
+ "value": {
128
+ "type": "CONSTANT_VALUE",
129
+ "value": {
130
+ "type": "JSON",
131
+ "value": [
132
+ {
133
+ "state": None,
134
+ "cache_config": None,
135
+ "name": "get_current_weather",
136
+ "description": None,
137
+ "parameters": {
138
+ "type": "object",
139
+ "properties": {"location": {"type": "string"}, "unit": {"type": "string"}},
140
+ "required": ["location", "unit"],
141
+ },
142
+ "forced": None,
143
+ "strict": None,
144
+ }
145
+ ],
146
+ },
147
+ },
148
+ },
149
+ {
150
+ "id": "0f6dc102-3460-4963-91fa-7ba85d65ef7a",
151
+ "name": "prompt_inputs",
152
+ "value": {
153
+ "type": "CONSTANT_VALUE",
154
+ "value": {"type": "JSON", "value": {"question": "What's the weather like in San Francisco?"}},
155
+ },
156
+ },
157
+ {
158
+ "id": "5c041b7d-732c-4773-a93a-32211f2af0b3",
159
+ "name": "max_tool_calls",
160
+ "value": {"type": "CONSTANT_VALUE", "value": {"type": "NUMBER", "value": 1.0}},
161
+ },
162
+ ],
163
+ "outputs": [
164
+ {
165
+ "id": "e62bc785-a914-4066-b79e-8c89a5d0ec6c",
166
+ "name": "text",
167
+ "type": "STRING",
168
+ "value": {"type": "CONSTANT_VALUE", "value": {"type": "STRING", "value": ""}},
169
+ },
170
+ {
171
+ "id": "4674f1d9-e3af-411f-8a55-40a3a3ab5394",
172
+ "name": "chat_history",
173
+ "type": "CHAT_HISTORY",
174
+ "value": {"type": "CONSTANT_VALUE", "value": {"type": "JSON", "value": []}},
175
+ },
176
+ ],
177
+ }
@@ -253,7 +253,7 @@ def serialize_value(display_context: "WorkflowDisplayContext", value: Any) -> Js
253
253
  "type": "CONSTANT_VALUE",
254
254
  "value": {
255
255
  "type": "JSON",
256
- "items": constant_values,
256
+ "value": constant_values,
257
257
  },
258
258
  }
259
259
  else:
@@ -262,8 +262,27 @@ def serialize_value(display_context: "WorkflowDisplayContext", value: Any) -> Js
262
262
  "items": cast(JsonArray, serialized_items), # list[JsonObject] -> JsonArray
263
263
  }
264
264
 
265
- if isinstance(value, dict) and any(isinstance(v, BaseDescriptor) for v in value.values()):
266
- raise ValueError("Nested references are not supported.")
265
+ if isinstance(value, dict):
266
+ serialized_entries = [
267
+ {"key": key, "value": serialize_value(display_context, val)} for key, val in value.items()
268
+ ]
269
+
270
+ # Check if all entries have constant values
271
+ if all(entry["value"]["type"] == "CONSTANT_VALUE" for entry in serialized_entries):
272
+ constant_entries = {}
273
+ for entry in serialized_entries:
274
+ entry_value = entry["value"]["value"]
275
+ constant_entries[entry["key"]] = entry_value["value"]
276
+
277
+ return {
278
+ "type": "CONSTANT_VALUE",
279
+ "value": {
280
+ "type": "JSON",
281
+ "value": constant_entries,
282
+ },
283
+ }
284
+ else:
285
+ return {"type": "DICTIONARY_REFERENCE", "entries": cast(JsonArray, serialized_entries)}
267
286
 
268
287
  if not isinstance(value, BaseDescriptor):
269
288
  vellum_value = primitive_to_vellum_value(value)
@@ -1,5 +1,6 @@
1
1
  from typing import TYPE_CHECKING, Any, Literal, Optional, Union
2
2
 
3
+ from vellum.client.core.api_error import ApiError
3
4
  from vellum.client.core.pydantic_utilities import UniversalBaseModel
4
5
  from vellum.client.types.array_vellum_value import ArrayVellumValue
5
6
  from vellum.client.types.vellum_value import VellumValue
@@ -117,13 +118,18 @@ def create_node_input_value_pointer_rule(
117
118
  workflow_input_display = display_context.global_workflow_input_displays[value]
118
119
  return InputVariablePointer(data=InputVariableData(input_variable_id=str(workflow_input_display.id)))
119
120
  if isinstance(value, VellumSecretReference):
120
- workspace_secret = display_context.client.workspace_secrets.retrieve(
121
- id=value.name,
122
- )
121
+ try:
122
+ workspace_secret = display_context.client.workspace_secrets.retrieve(
123
+ id=value.name,
124
+ )
125
+ workspace_secret_id: Optional[str] = str(workspace_secret.id)
126
+ except ApiError:
127
+ workspace_secret_id = None
128
+
123
129
  return WorkspaceSecretPointer(
124
130
  data=WorkspaceSecretData(
125
131
  type="STRING",
126
- workspace_secret_id=str(workspace_secret.id),
132
+ workspace_secret_id=workspace_secret_id,
127
133
  ),
128
134
  )
129
135
  if isinstance(value, ExecutionCountReference):
@@ -153,20 +153,8 @@ class BaseWorkflowDisplay(Generic[WorkflowType]):
153
153
  "definition": None,
154
154
  }
155
155
 
156
- # Add all the nodes in the workflow
157
- for node in self._workflow.get_nodes():
158
- node_display = self.display_context.node_displays[node]
159
-
160
- try:
161
- serialized_node = node_display.serialize(self.display_context)
162
- except NotImplementedError as e:
163
- self.add_error(e)
164
- continue
165
-
166
- serialized_nodes[node_display.node_id] = serialized_node
167
-
168
- # Add all unused nodes in the workflow
169
- for node in self._workflow.get_unused_nodes():
156
+ # Add all the nodes in the workflows
157
+ for node in self._workflow.get_all_nodes():
170
158
  node_display = self.display_context.node_displays[node]
171
159
 
172
160
  try:
@@ -417,16 +405,7 @@ class BaseWorkflowDisplay(Generic[WorkflowType]):
417
405
 
418
406
  port_displays: PortDisplays = {}
419
407
 
420
- for node in self._workflow.get_nodes():
421
- self._enrich_node_displays(
422
- node=node,
423
- node_displays=node_displays,
424
- global_node_displays=global_node_displays,
425
- global_node_output_displays=global_node_output_displays,
426
- port_displays=port_displays,
427
- )
428
-
429
- for node in self._workflow.get_unused_nodes():
408
+ for node in self._workflow.get_all_nodes():
430
409
  self._enrich_node_displays(
431
410
  node=node,
432
411
  node_displays=node_displays,
@@ -466,7 +466,7 @@ def test_serialize_workflow__array_values():
466
466
  assert "value" in array_output
467
467
  assert array_output["value"] == {
468
468
  "type": "CONSTANT_VALUE",
469
- "value": {"type": "JSON", "items": ["item1", "item2", "item3"]},
469
+ "value": {"type": "JSON", "value": ["item1", "item2", "item3"]},
470
470
  }
471
471
 
472
472
  nested_array_outputs = [val for val in outputs if isinstance(val, dict) and val["name"] == "nested_array_value"]
@@ -477,7 +477,7 @@ def test_serialize_workflow__array_values():
477
477
  assert "value" in nested_array_output
478
478
  assert nested_array_output["value"] == {
479
479
  "type": "CONSTANT_VALUE",
480
- "value": {"type": "JSON", "items": [["item1", "item2", "item3"], ["item4", "item5", "item6"]]},
480
+ "value": {"type": "JSON", "value": [["item1", "item2", "item3"], ["item4", "item5", "item6"]]},
481
481
  }
482
482
 
483
483
  mixed_array_outputs = [val for val in outputs if isinstance(val, dict) and val["name"] == "mixed_array_value"]
@@ -488,7 +488,7 @@ def test_serialize_workflow__array_values():
488
488
  assert "value" in mixed_array_output
489
489
  assert mixed_array_output["value"] == {
490
490
  "type": "CONSTANT_VALUE",
491
- "value": {"type": "JSON", "items": [["item1"], "item2", "item3"]},
491
+ "value": {"type": "JSON", "value": [["item1"], "item2", "item3"]},
492
492
  }
493
493
 
494
494
 
@@ -587,3 +587,187 @@ def test_serialize_workflow__array_reference():
587
587
  },
588
588
  ],
589
589
  }
590
+
591
+
592
+ def test_serialize_workflow__dict_values():
593
+ # GIVEN a node with a dictionary value
594
+ class MyNode(BaseNode):
595
+ class Outputs(BaseNode.Outputs):
596
+ dict_value = {"key1": "value1", "key2": "value2"}
597
+ nested_dict_value = {
598
+ "key1": {"nested_key1": "value1", "nested_key2": "value2"},
599
+ "key2": {"nested_key1": "value1", "nested_key2": "value2"},
600
+ }
601
+ mixed_dict_value = {"key1": "value1", "key2": {"key3": "value3", "key4": "value4"}}
602
+
603
+ # AND a workflow that uses these outputs
604
+ class MyWorkflow(BaseWorkflow):
605
+ graph = MyNode
606
+
607
+ class Outputs(BaseWorkflow.Outputs):
608
+ dict_output = MyNode.Outputs.dict_value
609
+
610
+ # WHEN we serialize it
611
+ workflow_display = get_workflow_display(workflow_class=MyWorkflow)
612
+ data = workflow_display.serialize()
613
+
614
+ # THEN it should serialize as a CONSTANT_VALUE
615
+ assert isinstance(data["workflow_raw_data"], dict)
616
+ assert isinstance(data["workflow_raw_data"]["nodes"], list)
617
+ my_node = next(
618
+ node for node in data["workflow_raw_data"]["nodes"] if isinstance(node, dict) and node["type"] == "GENERIC"
619
+ )
620
+
621
+ assert isinstance(my_node["outputs"], list)
622
+ outputs = my_node["outputs"]
623
+
624
+ dict_output = next(val for val in outputs if isinstance(val, dict) and val["name"] == "dict_value")
625
+ assert isinstance(dict_output, dict)
626
+ assert "value" in dict_output
627
+ assert dict_output["value"] == {
628
+ "type": "CONSTANT_VALUE",
629
+ "value": {"type": "JSON", "value": {"key1": "value1", "key2": "value2"}},
630
+ }
631
+
632
+ nested_dict_output = next(val for val in outputs if isinstance(val, dict) and val["name"] == "nested_dict_value")
633
+ assert isinstance(nested_dict_output, dict)
634
+ assert "value" in nested_dict_output
635
+ assert nested_dict_output["value"] == {
636
+ "type": "CONSTANT_VALUE",
637
+ "value": {
638
+ "type": "JSON",
639
+ "value": {
640
+ "key1": {"nested_key1": "value1", "nested_key2": "value2"},
641
+ "key2": {"nested_key1": "value1", "nested_key2": "value2"},
642
+ },
643
+ },
644
+ }
645
+
646
+ mixed_dict_output = next(val for val in outputs if isinstance(val, dict) and val["name"] == "mixed_dict_value")
647
+ assert isinstance(mixed_dict_output, dict)
648
+ assert "value" in mixed_dict_output
649
+ assert mixed_dict_output["value"] == {
650
+ "type": "CONSTANT_VALUE",
651
+ "value": {"type": "JSON", "value": {"key1": "value1", "key2": {"key3": "value3", "key4": "value4"}}},
652
+ }
653
+
654
+
655
+ def test_serialize_workflow__dict_reference():
656
+ # GIVEN a node with a dictionary containing non-constant values (node references)
657
+ class FirstNode(BaseNode):
658
+ class Outputs(BaseNode.Outputs):
659
+ value1: str
660
+
661
+ class SecondNode(BaseNode):
662
+ class Outputs(BaseNode.Outputs):
663
+ # Dictionary containing a mix of constants and node references
664
+ mixed_dict = {
665
+ "key1": "constant1",
666
+ "key2": FirstNode.Outputs.value1,
667
+ "key3": "constant2",
668
+ "key4": FirstNode.Outputs.value1,
669
+ }
670
+ mixed_nested_dict = {
671
+ "key1": {"key1": "constant1", "key2": FirstNode.Outputs.value1},
672
+ "key2": {"key1": "constant2", "key2": FirstNode.Outputs.value1},
673
+ }
674
+
675
+ # AND a workflow that uses these outputs
676
+ class MyWorkflow(BaseWorkflow):
677
+ graph = FirstNode >> SecondNode
678
+
679
+ class Outputs(BaseWorkflow.Outputs):
680
+ mixed_dict_output = SecondNode.Outputs.mixed_dict
681
+ mixed_nested_dict_output = SecondNode.Outputs.mixed_nested_dict
682
+
683
+ # WHEN we serialize it
684
+ workflow_display = get_workflow_display(workflow_class=MyWorkflow)
685
+ data = workflow_display.serialize()
686
+
687
+ # THEN it should serialize as a CONSTANT_VALUE
688
+ assert isinstance(data["workflow_raw_data"], dict)
689
+ assert isinstance(data["workflow_raw_data"]["nodes"], list)
690
+ second_node = data["workflow_raw_data"]["nodes"][2]
691
+
692
+ assert isinstance(second_node, dict)
693
+ assert "outputs" in second_node
694
+ assert isinstance(second_node["outputs"], list)
695
+
696
+ outputs = second_node["outputs"]
697
+ mixed_dict_output = next(val for val in outputs if isinstance(val, dict) and val["name"] == "mixed_dict")
698
+ assert isinstance(mixed_dict_output, dict)
699
+ assert "value" in mixed_dict_output
700
+ assert mixed_dict_output["value"] == {
701
+ "type": "DICTIONARY_REFERENCE",
702
+ "entries": [
703
+ {"key": "key1", "value": {"type": "CONSTANT_VALUE", "value": {"type": "STRING", "value": "constant1"}}},
704
+ {
705
+ "key": "key2",
706
+ "value": {
707
+ "type": "NODE_OUTPUT",
708
+ "node_id": "13b4f5c0-e6aa-4ef9-9a1a-79476bc32500",
709
+ "node_output_id": "50a6bc11-afb3-49f2-879c-b28f5e16d974",
710
+ },
711
+ },
712
+ {"key": "key3", "value": {"type": "CONSTANT_VALUE", "value": {"type": "STRING", "value": "constant2"}}},
713
+ {
714
+ "key": "key4",
715
+ "value": {
716
+ "type": "NODE_OUTPUT",
717
+ "node_id": "13b4f5c0-e6aa-4ef9-9a1a-79476bc32500",
718
+ "node_output_id": "50a6bc11-afb3-49f2-879c-b28f5e16d974",
719
+ },
720
+ },
721
+ ],
722
+ }
723
+
724
+ mixed_nested_dict_output = next(
725
+ val for val in outputs if isinstance(val, dict) and val["name"] == "mixed_nested_dict"
726
+ )
727
+ assert isinstance(mixed_nested_dict_output, dict)
728
+ assert "value" in mixed_nested_dict_output
729
+ assert mixed_nested_dict_output["value"] == {
730
+ "type": "DICTIONARY_REFERENCE",
731
+ "entries": [
732
+ {
733
+ "key": "key1",
734
+ "value": {
735
+ "type": "DICTIONARY_REFERENCE",
736
+ "entries": [
737
+ {
738
+ "key": "key1",
739
+ "value": {"type": "CONSTANT_VALUE", "value": {"type": "STRING", "value": "constant1"}},
740
+ },
741
+ {
742
+ "key": "key2",
743
+ "value": {
744
+ "type": "NODE_OUTPUT",
745
+ "node_id": "13b4f5c0-e6aa-4ef9-9a1a-79476bc32500",
746
+ "node_output_id": "50a6bc11-afb3-49f2-879c-b28f5e16d974",
747
+ },
748
+ },
749
+ ],
750
+ },
751
+ },
752
+ {
753
+ "key": "key2",
754
+ "value": {
755
+ "type": "DICTIONARY_REFERENCE",
756
+ "entries": [
757
+ {
758
+ "key": "key1",
759
+ "value": {"type": "CONSTANT_VALUE", "value": {"type": "STRING", "value": "constant2"}},
760
+ },
761
+ {
762
+ "key": "key2",
763
+ "value": {
764
+ "type": "NODE_OUTPUT",
765
+ "node_id": "13b4f5c0-e6aa-4ef9-9a1a-79476bc32500",
766
+ "node_output_id": "50a6bc11-afb3-49f2-879c-b28f5e16d974",
767
+ },
768
+ },
769
+ ],
770
+ },
771
+ },
772
+ ],
773
+ }