infrahub-server 1.5.1__py3-none-any.whl → 1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -119,6 +119,12 @@ class ComputedAttrJinja2TriggerDefinition(TriggerBranchDefinition):
119
119
  type: TriggerType = TriggerType.COMPUTED_ATTR_JINJA2
120
120
  computed_attribute: ComputedAttributeTarget
121
121
  template_hash: str
122
+ trigger_kind: str
123
+
124
+ @property
125
+ def targets_self(self) -> bool:
126
+ """Determine if the specific trigger definition targets the actual node kind of the computed attribute."""
127
+ return self.trigger_kind == self.computed_attribute.kind
122
128
 
123
129
  def get_description(self) -> str:
124
130
  return f"{super().get_description()} | hash:{self.template_hash}"
@@ -190,6 +196,7 @@ class ComputedAttrJinja2TriggerDefinition(TriggerBranchDefinition):
190
196
  definition = cls(
191
197
  name=f"{computed_attribute.key_name}{NAME_SEPARATOR}kind{NAME_SEPARATOR}{trigger_node.kind}",
192
198
  template_hash=template_hash,
199
+ trigger_kind=trigger_node.kind,
193
200
  branch=branch,
194
201
  computed_attribute=computed_attribute,
195
202
  trigger=event_trigger,
@@ -29,6 +29,7 @@ from .gather import gather_trigger_computed_attribute_jinja2, gather_trigger_com
29
29
  from .models import (
30
30
  ComputedAttrJinja2GraphQL,
31
31
  ComputedAttrJinja2GraphQLResponse,
32
+ ComputedAttrJinja2TriggerDefinition,
32
33
  PythonTransformTarget,
33
34
  )
34
35
 
@@ -312,21 +313,46 @@ async def computed_attribute_setup_jinja2(
312
313
  ) # type: ignore[misc]
313
314
  # Configure all ComputedAttrJinja2Trigger in Prefect
314
315
 
316
+ all_triggers = report.triggers_with_type(trigger_type=ComputedAttrJinja2TriggerDefinition)
317
+
315
318
  # Since we can have multiple trigger per NodeKind
316
- # we need to extract the list of unique node that should be processed
317
- unique_nodes: set[tuple[str, str, str]] = {
318
- (trigger.branch, trigger.computed_attribute.kind, trigger.computed_attribute.attribute.name) # type: ignore[attr-defined]
319
- for trigger in report.updated + report.created
320
- }
321
- for branch, kind, attribute_name in unique_nodes:
322
- if event_name != BranchDeletedEvent.event_name and branch == branch_name:
319
+ # we need to extract the list of unique node that should be processed, this is done by filtering the triggers that targets_self
320
+ modified_triggers = [
321
+ trigger
322
+ for trigger in report.modified_triggers_with_type(trigger_type=ComputedAttrJinja2TriggerDefinition)
323
+ if trigger.targets_self
324
+ ]
325
+
326
+ for modified_trigger in modified_triggers:
327
+ if event_name != BranchDeletedEvent.event_name and modified_trigger.branch == branch_name:
328
+ if branch_name != registry.default_branch:
329
+ default_branch_triggers = [
330
+ trigger
331
+ for trigger in all_triggers
332
+ if trigger.branch == registry.default_branch
333
+ and trigger.targets_self
334
+ and trigger.computed_attribute.kind == modified_trigger.computed_attribute.kind
335
+ and trigger.computed_attribute.attribute.name
336
+ == modified_trigger.computed_attribute.attribute.name
337
+ ]
338
+ if (
339
+ default_branch_triggers
340
+ and len(default_branch_triggers) == 1
341
+ and default_branch_triggers[0].template_hash == modified_trigger.template_hash
342
+ ):
343
+ log.debug(
344
+ f"Skipping computed attribute updates for {modified_trigger.computed_attribute.kind}."
345
+ f"{modified_trigger.computed_attribute.attribute.name} [{branch_name}], schema is identical to default branch"
346
+ )
347
+ continue
348
+
323
349
  await get_workflow().submit_workflow(
324
350
  workflow=TRIGGER_UPDATE_JINJA_COMPUTED_ATTRIBUTES,
325
351
  context=context,
326
352
  parameters={
327
- "branch_name": branch,
328
- "computed_attribute_name": attribute_name,
329
- "computed_attribute_kind": kind,
353
+ "branch_name": modified_trigger.branch,
354
+ "computed_attribute_name": modified_trigger.computed_attribute.attribute.name,
355
+ "computed_attribute_kind": modified_trigger.computed_attribute.kind,
330
356
  },
331
357
  )
332
358
 
infrahub/config.py CHANGED
@@ -402,6 +402,11 @@ class WorkflowSettings(BaseSettings):
402
402
  worker_polling_interval: int = Field(
403
403
  default=2, ge=1, le=30, description="Specify how often the worker should poll the server for tasks (sec)"
404
404
  )
405
+ flow_run_count_cache_threshold: int = Field(
406
+ default=100_000,
407
+ ge=0,
408
+ description="Threshold for caching flow run counts (0 to always cache, higher values to disable)",
409
+ )
405
410
 
406
411
  @property
407
412
  def api_endpoint(self) -> str:
@@ -336,7 +336,7 @@ class BaseAttribute(FlagPropertyMixin, NodePropertyMixin):
336
336
 
337
337
  delete_at = Timestamp(at)
338
338
 
339
- query = await AttributeGetQuery.init(db=db, attr=self)
339
+ query = await AttributeGetQuery.init(db=db, attr=self, at=delete_at)
340
340
  await query.execute(db=db)
341
341
  results = query.get_results()
342
342
 
@@ -141,8 +141,9 @@ class Branch(StandardNode):
141
141
  """
142
142
 
143
143
  params: dict[str, Any] = {"name": name}
144
+ params["ignore_statuses"] = []
144
145
  if ignore_deleting:
145
- params["ignore_statuses"] = [BranchStatus.DELETING.value]
146
+ params["ignore_statuses"].append(BranchStatus.DELETING.value)
146
147
 
147
148
  results = await db.execute_query(query=query, params=params, name="branch_get_by_name", type=QueryType.READ)
148
149
 
@@ -338,7 +339,7 @@ class Branch(StandardNode):
338
339
  at = Timestamp(at)
339
340
  at_str = at.to_string()
340
341
  if branch_agnostic:
341
- filter_str = f"{variable_name}.from <= ${pp}time1 AND ({variable_name}.to IS NULL or {variable_name}.to >= ${pp}time1)"
342
+ filter_str = f"{variable_name}.from < ${pp}time1 AND ({variable_name}.to IS NULL or {variable_name}.to >= ${pp}time1)"
342
343
  params[f"{pp}time1"] = at_str
343
344
  return filter_str, params
344
345
 
@@ -351,10 +352,13 @@ class Branch(StandardNode):
351
352
  filters = []
352
353
  for idx in range(len(branches_times)):
353
354
  filters.append(
354
- f"({variable_name}.branch IN ${pp}branch{idx} AND {variable_name}.from <= ${pp}time{idx} AND {variable_name}.to IS NULL)"
355
+ f"({variable_name}.branch IN ${pp}branch{idx} "
356
+ f"AND {variable_name}.from < ${pp}time{idx} AND {variable_name}.to IS NULL)"
355
357
  )
356
358
  filters.append(
357
- f"({variable_name}.branch IN ${pp}branch{idx} AND {variable_name}.from <= ${pp}time{idx} AND {variable_name}.to >= ${pp}time{idx})"
359
+ f"({variable_name}.branch IN ${pp}branch{idx} "
360
+ f"AND {variable_name}.from < ${pp}time{idx} "
361
+ f"AND {variable_name}.to >= ${pp}time{idx})"
358
362
  )
359
363
 
360
364
  filter_str = "(" + "\n OR ".join(filters) + ")"
@@ -502,9 +502,13 @@ async def post_process_branch_merge(source_branch: str, target_branch: str, cont
502
502
  parameters={"branch": target_branch, "source": GeneratorDefinitionRunSource.MERGE},
503
503
  )
504
504
 
505
+ active_branches = await Branch.get_list(db=db)
506
+ active_branch_names = {branch.name for branch in active_branches}
507
+
505
508
  for diff_root in branch_diff_roots:
506
509
  if (
507
510
  diff_root.base_branch_name != diff_root.diff_branch_name
511
+ and diff_root.diff_branch_name in active_branch_names
508
512
  and diff_root.tracking_id
509
513
  and isinstance(diff_root.tracking_id, BranchTrackingId)
510
514
  ):
@@ -8,6 +8,7 @@ from infrahub.core.diff.coordinator import DiffCoordinator
8
8
  from infrahub.core.diff.models import RequestDiffUpdate # noqa: TC001 needed for prefect flow
9
9
  from infrahub.core.diff.repository.repository import DiffRepository
10
10
  from infrahub.dependencies.registry import get_component_registry
11
+ from infrahub.exceptions import BranchNotFoundError
11
12
  from infrahub.log import get_logger
12
13
  from infrahub.workers.dependencies import get_database, get_workflow
13
14
  from infrahub.workflows.catalogue import DIFF_REFRESH
@@ -24,7 +25,11 @@ async def update_diff(model: RequestDiffUpdate) -> None:
24
25
  async with database.start_session(read_only=False) as db:
25
26
  component_registry = get_component_registry()
26
27
  base_branch = await registry.get_branch(db=db, branch=registry.default_branch)
27
- diff_branch = await registry.get_branch(db=db, branch=model.branch_name)
28
+ try:
29
+ diff_branch = await registry.get_branch(db=db, branch=model.branch_name)
30
+ except BranchNotFoundError:
31
+ log.warn(f"Branch {model.branch_name} not found, skipping diff update")
32
+ return
28
33
 
29
34
  diff_coordinator = await component_registry.get_component(DiffCoordinator, db=db, branch=diff_branch)
30
35
 
@@ -1 +1 @@
1
- GRAPH_VERSION = 44
1
+ GRAPH_VERSION = 46
@@ -46,6 +46,8 @@ from .m041_deleted_dup_edges import Migration041
46
46
  from .m042_profile_attrs_in_db import Migration042
47
47
  from .m043_create_hfid_display_label_in_db import Migration043
48
48
  from .m044_backfill_hfid_display_label_in_db import Migration044
49
+ from .m045_backfill_hfid_display_label_in_db_profile_template import Migration045
50
+ from .m046_fill_agnostic_hfid_display_labels import Migration046
49
51
 
50
52
  if TYPE_CHECKING:
51
53
  from ..shared import MigrationTypes
@@ -96,6 +98,8 @@ MIGRATIONS: list[type[MigrationTypes]] = [
96
98
  Migration042,
97
99
  Migration043,
98
100
  Migration044,
101
+ Migration045,
102
+ Migration046,
99
103
  ]
100
104
 
101
105
 
@@ -6,7 +6,7 @@ from rich.progress import Progress
6
6
 
7
7
  from infrahub.core import registry
8
8
  from infrahub.core.branch import Branch
9
- from infrahub.core.constants import SchemaPathType
9
+ from infrahub.core.constants import BranchSupportType, SchemaPathType
10
10
  from infrahub.core.initialization import get_root_node
11
11
  from infrahub.core.migrations.schema.node_attribute_add import NodeAttributeAddMigration
12
12
  from infrahub.core.migrations.shared import MigrationRequiringRebase, MigrationResult, get_migration_console
@@ -78,6 +78,8 @@ class Migration043(MigrationRequiringRebase):
78
78
 
79
79
  for node_schema_kind in main_schema_branch.node_names:
80
80
  schema = main_schema_branch.get(name=node_schema_kind, duplicate=False)
81
+ if schema.branch is not BranchSupportType.AWARE:
82
+ continue
81
83
  migrations.extend(
82
84
  [
83
85
  NodeAttributeAddMigration(
@@ -123,6 +125,8 @@ class Migration043(MigrationRequiringRebase):
123
125
 
124
126
  for node_kind, node_ids in node_ids_by_kind.items():
125
127
  schema = schema_branch.get(name=node_kind, duplicate=False)
128
+ if schema.branch not in (BranchSupportType.AWARE, BranchSupportType.LOCAL):
129
+ continue
126
130
  migrations.extend(
127
131
  [
128
132
  NodeAttributeAddMigration(
@@ -13,13 +13,15 @@ from infrahub.core.constants import GLOBAL_BRANCH_NAME, BranchSupportType, Relat
13
13
  from infrahub.core.initialization import get_root_node
14
14
  from infrahub.core.migrations.shared import MigrationResult, get_migration_console
15
15
  from infrahub.core.query import Query, QueryType
16
+ from infrahub.core.schema import NodeSchema
17
+ from infrahub.exceptions import SchemaNotFoundError
16
18
  from infrahub.types import is_large_attribute_type
17
19
 
18
20
  from ..shared import MigrationRequiringRebase
19
21
  from .load_schema_branch import get_or_load_schema_branch
20
22
 
21
23
  if TYPE_CHECKING:
22
- from infrahub.core.schema import AttributeSchema, NodeSchema
24
+ from infrahub.core.schema import AttributeSchema, NodeSchema, ProfileSchema, TemplateSchema
23
25
  from infrahub.core.schema.basenode_schema import SchemaAttributePath
24
26
  from infrahub.core.schema.schema_branch import SchemaBranch
25
27
  from infrahub.database import InfrahubDatabase
@@ -38,18 +40,23 @@ class DefaultBranchNodeCount(Query):
38
40
  name = "get_branch_node_count"
39
41
  type = QueryType.READ
40
42
 
41
- def __init__(self, kinds_to_skip: list[str], **kwargs: Any) -> None:
43
+ def __init__(
44
+ self, kinds_to_skip: list[str] | None = None, kinds_to_include: list[str] | None = None, **kwargs: Any
45
+ ) -> None:
42
46
  super().__init__(**kwargs)
43
- self.kinds_to_skip = kinds_to_skip
47
+ self.kinds_to_skip = kinds_to_skip or []
48
+ self.kinds_to_include = kinds_to_include
44
49
 
45
50
  async def query_init(self, db: InfrahubDatabase, **kwargs: dict[str, Any]) -> None: # noqa: ARG002
46
51
  self.params = {
47
52
  "branch_names": [registry.default_branch, GLOBAL_BRANCH_NAME],
48
53
  "kinds_to_skip": self.kinds_to_skip,
54
+ "kinds_to_include": self.kinds_to_include,
49
55
  }
50
56
  query = """
51
57
  MATCH (n:Node)-[e:IS_PART_OF]->(:Root)
52
58
  WHERE NOT n.kind IN $kinds_to_skip
59
+ AND ($kinds_to_include IS NULL OR n.kind IN $kinds_to_include)
53
60
  AND e.branch IN $branch_names
54
61
  AND e.status = "active"
55
62
  AND e.to IS NULL
@@ -621,7 +628,7 @@ class Migration044(MigrationRequiringRebase):
621
628
  self,
622
629
  db: InfrahubDatabase,
623
630
  branch: Branch,
624
- schema: NodeSchema,
631
+ schema: NodeSchema | ProfileSchema | TemplateSchema,
625
632
  schema_branch: SchemaBranch,
626
633
  attribute_schema_map: dict[AttributeSchema, AttributeSchema],
627
634
  progress: Progress | None = None,
@@ -729,6 +736,10 @@ class Migration044(MigrationRequiringRebase):
729
736
  continue
730
737
 
731
738
  node_schema = main_schema_branch.get_node(name=node_schema_name, duplicate=False)
739
+
740
+ if node_schema.branch is not BranchSupportType.AWARE:
741
+ continue
742
+
732
743
  attribute_schema_map = {}
733
744
  if node_schema.display_labels:
734
745
  attribute_schema_map[display_labels_attribute_schema] = display_label_attribute_schema
@@ -755,7 +766,7 @@ class Migration044(MigrationRequiringRebase):
755
766
  self,
756
767
  db: InfrahubDatabase,
757
768
  branch: Branch,
758
- schema: NodeSchema,
769
+ schema: NodeSchema | ProfileSchema | TemplateSchema,
759
770
  schema_branch: SchemaBranch,
760
771
  source_attribute_schema: AttributeSchema,
761
772
  destination_attribute_schema: AttributeSchema,
@@ -824,18 +835,32 @@ class Migration044(MigrationRequiringRebase):
824
835
  continue
825
836
 
826
837
  node_schema = schema_branch.get_node(name=node_schema_name, duplicate=False)
827
- default_node_schema = main_schema_branch.get_node(name=node_schema_name, duplicate=False)
838
+ if node_schema.branch not in (BranchSupportType.AWARE, BranchSupportType.LOCAL):
839
+ continue
840
+ try:
841
+ default_node_schema = main_schema_branch.get_node(name=node_schema_name, duplicate=False)
842
+ except SchemaNotFoundError:
843
+ default_node_schema = None
828
844
  schemas_for_universal_update_map = {}
829
845
  schemas_for_targeted_update_map = {}
830
- if default_node_schema.display_label != node_schema.display_label:
831
- schemas_for_universal_update_map[display_labels_attribute_schema] = display_label_attribute_schema
832
- elif node_schema.display_labels:
833
- schemas_for_targeted_update_map[display_labels_attribute_schema] = display_label_attribute_schema
834
-
835
- if default_node_schema.human_friendly_id != node_schema.human_friendly_id:
836
- schemas_for_universal_update_map[hfid_attribute_schema] = hfid_attribute_schema
837
- elif node_schema.human_friendly_id:
838
- schemas_for_targeted_update_map[hfid_attribute_schema] = hfid_attribute_schema
846
+ if node_schema.display_label:
847
+ if default_node_schema is None or default_node_schema.display_label != node_schema.display_label:
848
+ schemas_for_universal_update_map[display_labels_attribute_schema] = (
849
+ display_label_attribute_schema
850
+ )
851
+ else:
852
+ schemas_for_targeted_update_map[display_labels_attribute_schema] = (
853
+ display_label_attribute_schema
854
+ )
855
+
856
+ if node_schema.human_friendly_id:
857
+ if (
858
+ default_node_schema is None
859
+ or default_node_schema.human_friendly_id != node_schema.human_friendly_id
860
+ ):
861
+ schemas_for_universal_update_map[hfid_attribute_schema] = hfid_attribute_schema
862
+ else:
863
+ schemas_for_targeted_update_map[hfid_attribute_schema] = hfid_attribute_schema
839
864
 
840
865
  if schemas_for_universal_update_map:
841
866
  await self._do_one_schema_all(
@@ -0,0 +1,163 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ from rich.progress import Progress
6
+
7
+ from infrahub.core import registry
8
+ from infrahub.core.branch import Branch
9
+ from infrahub.core.initialization import get_root_node
10
+ from infrahub.core.migrations.graph.m044_backfill_hfid_display_label_in_db import DefaultBranchNodeCount, Migration044
11
+ from infrahub.core.migrations.shared import MigrationResult, get_migration_console
12
+ from infrahub.exceptions import SchemaNotFoundError
13
+
14
+ from .load_schema_branch import get_or_load_schema_branch
15
+
16
+ if TYPE_CHECKING:
17
+ from infrahub.core.schema import ProfileSchema, TemplateSchema
18
+ from infrahub.database import InfrahubDatabase
19
+
20
+
21
+ console = get_migration_console()
22
+
23
+
24
+ class Migration045(Migration044):
25
+ """
26
+ Backfill `human_friendly_id` and `display_label` attributes for profile and template nodes with schemas that define them.
27
+ """
28
+
29
+ name: str = "045_backfill_hfid_display_label_in_db_profile_template"
30
+ minimum_version: int = 44
31
+ update_batch_size: int = 1000
32
+
33
+ async def execute(self, db: InfrahubDatabase) -> MigrationResult:
34
+ root_node = await get_root_node(db=db, initialize=False)
35
+ default_branch_name = root_node.default_branch
36
+ default_branch = await Branch.get_by_name(db=db, name=default_branch_name)
37
+
38
+ main_schema_branch = await get_or_load_schema_branch(db=db, branch=default_branch)
39
+ kinds_to_skip = self.kinds_to_skip + main_schema_branch.node_names
40
+
41
+ total_nodes_query = await DefaultBranchNodeCount.init(db=db, kinds_to_skip=kinds_to_skip)
42
+ await total_nodes_query.execute(db=db)
43
+ total_nodes_count = total_nodes_query.get_num_nodes()
44
+
45
+ base_node_schema = main_schema_branch.get("SchemaNode", duplicate=False)
46
+ display_label_attribute_schema = base_node_schema.get_attribute("display_label")
47
+ display_labels_attribute_schema = base_node_schema.get_attribute("display_labels")
48
+ hfid_attribute_schema = base_node_schema.get_attribute("human_friendly_id")
49
+
50
+ try:
51
+ with Progress(console=console) as progress:
52
+ update_task = progress.add_task(
53
+ f"Set display_label and human_friendly_id for {total_nodes_count} nodes on default branch",
54
+ total=total_nodes_count,
55
+ )
56
+ for node_schema_name in main_schema_branch.profile_names + main_schema_branch.template_names:
57
+ if node_schema_name in self.kinds_to_skip:
58
+ continue
59
+
60
+ node_schema: ProfileSchema | TemplateSchema
61
+ if node_schema_name in main_schema_branch.profile_names:
62
+ node_schema = main_schema_branch.get_profile(name=node_schema_name, duplicate=False)
63
+ else:
64
+ node_schema = main_schema_branch.get_template(name=node_schema_name, duplicate=False)
65
+
66
+ attribute_schema_map = {}
67
+ if node_schema.display_labels:
68
+ attribute_schema_map[display_labels_attribute_schema] = display_label_attribute_schema
69
+ if node_schema.human_friendly_id:
70
+ attribute_schema_map[hfid_attribute_schema] = hfid_attribute_schema
71
+ if not attribute_schema_map:
72
+ continue
73
+
74
+ await self._do_one_schema_all(
75
+ db=db,
76
+ branch=default_branch,
77
+ schema=node_schema,
78
+ schema_branch=main_schema_branch,
79
+ attribute_schema_map=attribute_schema_map,
80
+ progress=progress,
81
+ update_task=update_task,
82
+ )
83
+
84
+ except Exception as exc:
85
+ return MigrationResult(errors=[str(exc)])
86
+ return MigrationResult()
87
+
88
+ async def execute_against_branch(self, db: InfrahubDatabase, branch: Branch) -> MigrationResult:
89
+ default_branch = await Branch.get_by_name(db=db, name=registry.default_branch)
90
+ main_schema_branch = await get_or_load_schema_branch(db=db, branch=default_branch)
91
+ schema_branch = await get_or_load_schema_branch(db=db, branch=branch)
92
+
93
+ base_node_schema = schema_branch.get("SchemaNode", duplicate=False)
94
+ display_label_attribute_schema = base_node_schema.get_attribute("display_label")
95
+ display_labels_attribute_schema = base_node_schema.get_attribute("display_labels")
96
+ hfid_attribute_schema = base_node_schema.get_attribute("human_friendly_id")
97
+
98
+ try:
99
+ for node_schema_name in schema_branch.profile_names + schema_branch.template_names:
100
+ if node_schema_name in self.kinds_to_skip:
101
+ continue
102
+
103
+ node_schema: ProfileSchema | TemplateSchema
104
+ default_node_schema: ProfileSchema | TemplateSchema | None
105
+ if node_schema_name in schema_branch.profile_names:
106
+ node_schema = schema_branch.get_profile(name=node_schema_name, duplicate=False)
107
+ try:
108
+ default_node_schema = main_schema_branch.get_profile(name=node_schema_name, duplicate=False)
109
+ except SchemaNotFoundError:
110
+ default_node_schema = None
111
+ else:
112
+ node_schema = schema_branch.get_template(name=node_schema_name, duplicate=False)
113
+ try:
114
+ default_node_schema = main_schema_branch.get_template(name=node_schema_name, duplicate=False)
115
+ except SchemaNotFoundError:
116
+ default_node_schema = None
117
+
118
+ schemas_for_universal_update_map = {}
119
+ schemas_for_targeted_update_map = {}
120
+ if node_schema.display_labels:
121
+ if default_node_schema is None or default_node_schema.display_label != node_schema.display_label:
122
+ schemas_for_universal_update_map[display_labels_attribute_schema] = (
123
+ display_label_attribute_schema
124
+ )
125
+ else:
126
+ schemas_for_targeted_update_map[display_labels_attribute_schema] = (
127
+ display_label_attribute_schema
128
+ )
129
+
130
+ if node_schema.human_friendly_id:
131
+ if (
132
+ default_node_schema is None
133
+ or default_node_schema.human_friendly_id != node_schema.human_friendly_id
134
+ ):
135
+ schemas_for_universal_update_map[hfid_attribute_schema] = hfid_attribute_schema
136
+ else:
137
+ schemas_for_targeted_update_map[hfid_attribute_schema] = hfid_attribute_schema
138
+
139
+ if schemas_for_universal_update_map:
140
+ await self._do_one_schema_all(
141
+ db=db,
142
+ branch=branch,
143
+ schema=node_schema,
144
+ schema_branch=schema_branch,
145
+ attribute_schema_map=schemas_for_universal_update_map,
146
+ )
147
+
148
+ if not schemas_for_targeted_update_map:
149
+ continue
150
+
151
+ for source_attribute_schema, destination_attribute_schema in schemas_for_targeted_update_map.items():
152
+ await self._do_one_schema_branch(
153
+ db=db,
154
+ branch=branch,
155
+ schema=node_schema,
156
+ schema_branch=schema_branch,
157
+ source_attribute_schema=source_attribute_schema,
158
+ destination_attribute_schema=destination_attribute_schema,
159
+ )
160
+
161
+ except Exception as exc:
162
+ return MigrationResult(errors=[str(exc)])
163
+ return MigrationResult()