tinybird 0.0.1.dev1__py3-none-any.whl → 0.0.1.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tinybird/__cli__.py CHANGED
@@ -4,5 +4,5 @@ __description__ = 'Tinybird Command Line Tool'
4
4
  __url__ = 'https://www.tinybird.co/docs/cli/introduction.html'
5
5
  __author__ = 'Tinybird'
6
6
  __author_email__ = 'support@tinybird.co'
7
- __version__ = '0.0.1.dev1'
8
- __revision__ = 'dfec803'
7
+ __version__ = '0.0.1.dev3'
8
+ __revision__ = '50bec29'
tinybird/client.py CHANGED
@@ -248,10 +248,10 @@ class TinyB:
248
248
  url = url + "?" + scopes_url
249
249
  return await self._req(url, method="PUT", data="")
250
250
 
251
- async def datasources(self, branch: Optional[str] = None, used_by: bool = False):
252
- params = {
253
- "attrs": "used_by" if used_by else "",
254
- }
251
+ async def datasources(self, branch: Optional[str] = None, used_by: bool = False) -> List[Dict[str, Any]]:
252
+ params = {}
253
+ if used_by:
254
+ params["attrs"] = "used_by"
255
255
  response = await self._req(f"/v0/datasources?{urlencode(params)}")
256
256
  ds = response["datasources"]
257
257
 
@@ -498,6 +498,9 @@ class TinyB:
498
498
  payload = {"datasource_a": datasource_a, "datasource_b": datasource_b}
499
499
  return await self._req("/v0/datasources/exchange", method="POST", data=payload)
500
500
 
501
+ async def datasource_events(self, datasource_name: str, data: Dict[str, Any]):
502
+ return await self._req(f"/v0/events?name={datasource_name}", method="POST", data=data)
503
+
501
504
  async def analyze_pipe_node(
502
505
  self, pipe_name: str, node: Dict[str, Any], dry_run: str = "false", datasource_name: Optional[str] = None
503
506
  ):
@@ -532,7 +535,7 @@ class TinyB:
532
535
  )
533
536
  return response
534
537
 
535
- async def pipes(self, branch=None, dependencies: bool = False, node_attrs=None, attrs=None):
538
+ async def pipes(self, branch=None, dependencies: bool = False, node_attrs=None, attrs=None) -> List[Dict[str, Any]]:
536
539
  params = {
537
540
  "dependencies": "true" if dependencies else "false",
538
541
  "attrs": attrs if attrs else "",
tinybird/config.py CHANGED
@@ -20,7 +20,7 @@ CURRENT_VERSION = f"{__cli__.__version__}"
20
20
  VERSION = f"{__cli__.__version__} (rev {__revision__})"
21
21
  DEFAULT_UI_HOST = "https://app.tinybird.co"
22
22
  SUPPORTED_CONNECTORS = ["bigquery", "snowflake"]
23
- PROJECT_PATHS = ["datasources", "datasources/fixtures", "endpoints", "pipes", "tests", "scripts", "deploy"]
23
+ PROJECT_PATHS = ["datasources", "pipes", "tests"]
24
24
  DEPRECATED_PROJECT_PATHS = ["endpoints"]
25
25
  MIN_WORKSPACE_ID_LENGTH = 36
26
26
  LEGACY_HOSTS = {
tinybird/datafile.py CHANGED
@@ -3066,7 +3066,7 @@ async def new_pipe(
3066
3066
  )
3067
3067
  )
3068
3068
 
3069
- click.echo(FeedbackManager.success_test_endpoint_no_token(host=host, pipe=p["name"]))
3069
+ click.echo(FeedbackManager.success_test_endpoint(host=host, pipe=p["name"], token="$TB_TOKEN"))
3070
3070
 
3071
3071
  if copy_node:
3072
3072
  pipe_id = data["id"]
@@ -3126,7 +3126,7 @@ async def new_pipe(
3126
3126
  raise click.ClickException(FeedbackManager.error_creating_pipe(error=e))
3127
3127
 
3128
3128
  if data.get("type") == "endpoint":
3129
- click.echo(FeedbackManager.success_test_endpoint_no_token(host=host, pipe=p["name"]))
3129
+ click.echo(FeedbackManager.success_test_endpoint(host=host, pipe=p["name"], token=t["token"]))
3130
3130
 
3131
3131
 
3132
3132
  async def share_and_unshare_datasource(
@@ -3217,6 +3217,7 @@ async def new_ds(
3217
3217
  fork_downstream: Optional[bool] = False,
3218
3218
  fork: Optional[bool] = False,
3219
3219
  git_release: Optional[bool] = False,
3220
+ build: Optional[bool] = False,
3220
3221
  ):
3221
3222
  ds_name = ds["params"]["name"]
3222
3223
 
@@ -3239,9 +3240,14 @@ async def new_ds(
3239
3240
  scopes.append(sc)
3240
3241
  await client.alter_tokens(token_name, scopes)
3241
3242
 
3243
+ datasource_exists = False
3242
3244
  try:
3243
3245
  existing_ds = await client.get_datasource(ds_name)
3244
- datasource_exists = True
3246
+ if build:
3247
+ await client.datasource_delete(ds_name, force=True)
3248
+ else:
3249
+ datasource_exists = True
3250
+
3245
3251
  except DoesNotExistException:
3246
3252
  datasource_exists = False
3247
3253
 
@@ -3574,6 +3580,7 @@ async def exec_file(
3574
3580
  fork_downstream: Optional[bool] = False,
3575
3581
  fork: Optional[bool] = False,
3576
3582
  git_release: Optional[bool] = False,
3583
+ build: Optional[bool] = False,
3577
3584
  ):
3578
3585
  if debug:
3579
3586
  click.echo(FeedbackManager.debug_running_file(file=pp.pformat(r)))
@@ -3616,6 +3623,7 @@ async def exec_file(
3616
3623
  fork_downstream=fork_downstream,
3617
3624
  fork=fork,
3618
3625
  git_release=git_release,
3626
+ build=build,
3619
3627
  )
3620
3628
  await update_tags_in_resource(r, "datasource", tb_client)
3621
3629
  elif r["resource"] == "tokens":
@@ -3900,6 +3908,7 @@ async def build_graph(
3900
3908
  only_changes: bool = False,
3901
3909
  fork_downstream: Optional[bool] = False,
3902
3910
  is_internal: Optional[bool] = False,
3911
+ build: Optional[bool] = False,
3903
3912
  ) -> GraphDependencies:
3904
3913
  """
3905
3914
  This method will generate a dependency graph for the given files. It will also return a map of all the resources that are going to be deployed.
@@ -3936,6 +3945,7 @@ async def build_graph(
3936
3945
  changed=None,
3937
3946
  only_changes=False,
3938
3947
  is_internal=is_internal,
3948
+ build=build,
3939
3949
  )
3940
3950
  all_dep_map = all_dependencies_graph.dep_map
3941
3951
  all_resources = all_dependencies_graph.to_run
@@ -4122,8 +4132,10 @@ def get_project_filenames(folder: str, with_vendor=False) -> List[str]:
4122
4132
  f"{folder}/*.pipe",
4123
4133
  f"{folder}/pipes/*.pipe",
4124
4134
  f"{folder}/endpoints/*.pipe",
4125
- f"{folder}/*.token",
4126
- f"{folder}/tokens/*.token",
4135
+ f"{folder}/materializations/*.pipe",
4136
+ f"{folder}/sinks/*.pipe",
4137
+ f"{folder}/copies/*.pipe",
4138
+ f"{folder}/playgrounds/*.pipe",
4127
4139
  ]
4128
4140
  if with_vendor:
4129
4141
  folders.append(f"{folder}/vendor/**/**/*.datasource")
@@ -4348,6 +4360,7 @@ async def folder_push(
4348
4360
  dry_run: bool,
4349
4361
  fork_downstream: Optional[bool] = False,
4350
4362
  fork: Optional[bool] = False,
4363
+ build: Optional[bool] = False,
4351
4364
  ):
4352
4365
  if name in to_run:
4353
4366
  resource = to_run[name]["resource"]
@@ -4397,6 +4410,7 @@ async def folder_push(
4397
4410
  fork_downstream,
4398
4411
  fork,
4399
4412
  git_release,
4413
+ build,
4400
4414
  )
4401
4415
  if not run_tests:
4402
4416
  click.echo(
@@ -4446,7 +4460,13 @@ async def folder_push(
4446
4460
  version = ""
4447
4461
  if name in latest_datasource_versions:
4448
4462
  version = f"(v{latest_datasource_versions[name]})"
4449
- click.echo(FeedbackManager.info_dry_processing_new_resource(name=name, version=version))
4463
+ if build:
4464
+ extension = "pipe" if resource == "pipes" else "datasource"
4465
+ click.echo(
4466
+ FeedbackManager.info_building_resource(name=f"{name}.{extension}", version=version)
4467
+ )
4468
+ else:
4469
+ click.echo(FeedbackManager.info_dry_processing_new_resource(name=name, version=version))
4450
4470
  else:
4451
4471
  click.echo(
4452
4472
  FeedbackManager.info_dry_processing_resource(
@@ -5587,3 +5607,517 @@ def is_file_a_datasource(filename: str) -> bool:
5587
5607
  return True
5588
5608
 
5589
5609
  return False
5610
+
5611
+
5612
+ async def folder_build(
5613
+ tb_client: TinyB,
5614
+ workspaces: List[Dict[str, Any]],
5615
+ datasources: List[Dict[str, Any]],
5616
+ pipes: List[Dict[str, Any]],
5617
+ filenames: Optional[List[str]] = None,
5618
+ folder: str = ".",
5619
+ ignore_sql_errors: bool = False,
5620
+ is_internal: bool = False,
5621
+ only_pipes: bool = False,
5622
+ ):
5623
+ if only_pipes:
5624
+ filenames = [f for f in filenames if f.endswith(".pipe")]
5625
+
5626
+ config = CLIConfig.get_project_config()
5627
+ build = True
5628
+ dry_run = False
5629
+ force = True
5630
+ push_deps = False
5631
+ only_changes = False
5632
+ debug = False
5633
+ check = True
5634
+ populate = False
5635
+ populate_subset = None
5636
+ populate_condition = None
5637
+ tests_to_run = 0
5638
+ user_token = None
5639
+ tests_failfast = True
5640
+ override_datasource = True
5641
+ tests_check_requests_from_branch = False
5642
+ skip_confirmation = True
5643
+ wait = False
5644
+ unlink_on_populate_error = False
5645
+ upload_fixtures = False
5646
+ only_response_times = False
5647
+ workspace_map: Dict[str, Any] = {}
5648
+ tests_sample_by_params = 1
5649
+ tests_ignore_order = False
5650
+ tests_validate_processed_bytes = False
5651
+ run_tests = False
5652
+ verbose = False
5653
+ as_standard = False
5654
+ raise_on_exists = False
5655
+ fork_downstream = False
5656
+ fork = False
5657
+ release_created = False
5658
+ auto_promote = False
5659
+ hide_folders = False
5660
+ tests_relative_change = 0.01
5661
+ tests_sample_by_params = 0
5662
+ tests_filter_by = None
5663
+ tests_failfast = False
5664
+ tests_ignore_order = False
5665
+ tests_validate_processed_bytes = False
5666
+ tests_check_requests_from_branch = False
5667
+ git_release = False
5668
+ workspace_lib_paths = []
5669
+ current_ws: Dict[str, Any] = next(
5670
+ (workspace for workspace in workspaces if config and workspace.get("id", ".") == config.get("id", "..")), {}
5671
+ )
5672
+
5673
+ workspace_lib_paths = list(workspace_lib_paths)
5674
+ # include vendor libs without overriding user ones
5675
+ existing_workspaces = set(x[1] for x in workspace_lib_paths)
5676
+ vendor_path = Path("vendor")
5677
+ if vendor_path.exists():
5678
+ for x in vendor_path.iterdir():
5679
+ if x.is_dir() and x.name not in existing_workspaces:
5680
+ workspace_lib_paths.append((x.name, x))
5681
+
5682
+ datasources: List[Dict[str, Any]] = await tb_client.datasources()
5683
+ pipes: List[Dict[str, Any]] = await tb_client.pipes(dependencies=True)
5684
+
5685
+ existing_resources: List[str] = [x["name"] for x in datasources] + [x["name"] for x in pipes]
5686
+ # replace workspace mapping names
5687
+ for old_ws, new_ws in workspace_map.items():
5688
+ existing_resources = [re.sub(f"^{old_ws}\.", f"{new_ws}.", x) for x in existing_resources]
5689
+
5690
+ remote_resource_names = [get_remote_resource_name_without_version(x) for x in existing_resources]
5691
+
5692
+ # replace workspace mapping names
5693
+ for old_ws, new_ws in workspace_map.items():
5694
+ remote_resource_names = [re.sub(f"^{old_ws}\.", f"{new_ws}.", x) for x in remote_resource_names]
5695
+
5696
+ if not filenames:
5697
+ filenames = get_project_filenames(folder)
5698
+
5699
+ changed = None
5700
+
5701
+ # build graph to get new versions for all the files involved in the query
5702
+ # dependencies need to be processed always to get the versions
5703
+ dependencies_graph = await build_graph(
5704
+ filenames,
5705
+ tb_client,
5706
+ dir_path=folder,
5707
+ process_dependencies=True,
5708
+ workspace_map=workspace_map,
5709
+ skip_connectors=True,
5710
+ workspace_lib_paths=workspace_lib_paths,
5711
+ current_ws=current_ws,
5712
+ changed=changed,
5713
+ only_changes=only_changes,
5714
+ fork_downstream=fork_downstream,
5715
+ is_internal=is_internal,
5716
+ build=build,
5717
+ )
5718
+
5719
+ resource_versions = {}
5720
+ latest_datasource_versions = {}
5721
+
5722
+ # If we have datasources using VERSION, let's try to get the latest version
5723
+ dependencies_graph = await build_graph(
5724
+ filenames,
5725
+ tb_client,
5726
+ dir_path=folder,
5727
+ resource_versions=latest_datasource_versions,
5728
+ workspace_map=workspace_map,
5729
+ process_dependencies=push_deps,
5730
+ verbose=verbose,
5731
+ workspace_lib_paths=workspace_lib_paths,
5732
+ current_ws=current_ws,
5733
+ changed=changed,
5734
+ only_changes=only_changes,
5735
+ fork_downstream=fork_downstream,
5736
+ is_internal=is_internal,
5737
+ build=build,
5738
+ )
5739
+
5740
+ if debug:
5741
+ pp.pprint(dependencies_graph.to_run)
5742
+
5743
+ def should_push_file(
5744
+ name: str,
5745
+ remote_resource_names: List[str],
5746
+ latest_datasource_versions: Dict[str, Any],
5747
+ force: bool,
5748
+ run_tests: bool,
5749
+ ) -> bool:
5750
+ """
5751
+ Function to know if we need to run a file or not
5752
+ """
5753
+ if name not in remote_resource_names:
5754
+ return True
5755
+ # When we need to try to push a file when it doesn't exist and the version is different that the existing one
5756
+ resource_full_name = (
5757
+ f"{name}__v{latest_datasource_versions.get(name)}" if name in latest_datasource_versions else name
5758
+ )
5759
+ if resource_full_name not in existing_resources:
5760
+ return True
5761
+ if force or run_tests:
5762
+ return True
5763
+ return False
5764
+
5765
+ async def push(
5766
+ name: str,
5767
+ to_run: Dict[str, Dict[str, Any]],
5768
+ resource_versions: Dict[str, Any],
5769
+ latest_datasource_versions: Dict[str, Any],
5770
+ dry_run: bool,
5771
+ fork_downstream: Optional[bool] = False,
5772
+ fork: Optional[bool] = False,
5773
+ ):
5774
+ if name in to_run:
5775
+ resource = to_run[name]["resource"]
5776
+ if not dry_run:
5777
+ if should_push_file(name, remote_resource_names, latest_datasource_versions, force, run_tests):
5778
+ if name not in resource_versions:
5779
+ version = ""
5780
+ if name in latest_datasource_versions:
5781
+ version = f"(v{latest_datasource_versions[name]})"
5782
+ click.echo(FeedbackManager.info_processing_new_resource(name=name, version=version))
5783
+ else:
5784
+ click.echo(
5785
+ FeedbackManager.info_processing_resource(
5786
+ name=name,
5787
+ version=latest_datasource_versions[name],
5788
+ latest_version=resource_versions.get(name),
5789
+ )
5790
+ )
5791
+ try:
5792
+ await exec_file(
5793
+ to_run[name],
5794
+ tb_client,
5795
+ force,
5796
+ check,
5797
+ debug and verbose,
5798
+ populate,
5799
+ populate_subset,
5800
+ populate_condition,
5801
+ unlink_on_populate_error,
5802
+ wait,
5803
+ user_token,
5804
+ override_datasource,
5805
+ ignore_sql_errors,
5806
+ skip_confirmation,
5807
+ only_response_times,
5808
+ run_tests,
5809
+ as_standard,
5810
+ tests_to_run,
5811
+ tests_relative_change,
5812
+ tests_sample_by_params,
5813
+ tests_filter_by,
5814
+ tests_failfast,
5815
+ tests_ignore_order,
5816
+ tests_validate_processed_bytes,
5817
+ tests_check_requests_from_branch,
5818
+ current_ws,
5819
+ fork_downstream,
5820
+ fork,
5821
+ git_release,
5822
+ build,
5823
+ )
5824
+ if not run_tests:
5825
+ click.echo(
5826
+ FeedbackManager.success_create(
5827
+ name=(
5828
+ name
5829
+ if to_run[name]["version"] is None
5830
+ else f'{name}__v{to_run[name]["version"]}'
5831
+ )
5832
+ )
5833
+ )
5834
+ except Exception as e:
5835
+ filename = (
5836
+ os.path.basename(to_run[name]["filename"]) if hide_folders else to_run[name]["filename"]
5837
+ )
5838
+ exception = FeedbackManager.error_push_file_exception(
5839
+ filename=filename,
5840
+ error=e,
5841
+ )
5842
+ raise click.ClickException(exception)
5843
+ else:
5844
+ if raise_on_exists:
5845
+ raise AlreadyExistsException(
5846
+ FeedbackManager.warning_name_already_exists(
5847
+ name=name if to_run[name]["version"] is None else f'{name}__v{to_run[name]["version"]}'
5848
+ )
5849
+ )
5850
+ else:
5851
+ if await name_matches_existing_resource(resource, name, tb_client):
5852
+ if resource == "pipes":
5853
+ click.echo(FeedbackManager.error_pipe_cannot_be_pushed(name=name))
5854
+ else:
5855
+ click.echo(FeedbackManager.error_datasource_cannot_be_pushed(name=name))
5856
+ else:
5857
+ click.echo(
5858
+ FeedbackManager.warning_name_already_exists(
5859
+ name=(
5860
+ name
5861
+ if to_run[name]["version"] is None
5862
+ else f'{name}__v{to_run[name]["version"]}'
5863
+ )
5864
+ )
5865
+ )
5866
+ else:
5867
+ if should_push_file(name, remote_resource_names, latest_datasource_versions, force, run_tests):
5868
+ if name not in resource_versions:
5869
+ version = ""
5870
+ if name in latest_datasource_versions:
5871
+ version = f"(v{latest_datasource_versions[name]})"
5872
+ if build:
5873
+ extension = "pipe" if resource == "pipes" else "datasource"
5874
+ click.echo(
5875
+ FeedbackManager.info_building_resource(name=f"{name}.{extension}", version=version)
5876
+ )
5877
+ else:
5878
+ click.echo(FeedbackManager.info_dry_processing_new_resource(name=name, version=version))
5879
+ else:
5880
+ click.echo(
5881
+ FeedbackManager.info_dry_processing_resource(
5882
+ name=name,
5883
+ version=latest_datasource_versions[name],
5884
+ latest_version=resource_versions.get(name),
5885
+ )
5886
+ )
5887
+ else:
5888
+ if await name_matches_existing_resource(resource, name, tb_client):
5889
+ if resource == "pipes":
5890
+ click.echo(FeedbackManager.warning_pipe_cannot_be_pushed(name=name))
5891
+ else:
5892
+ click.echo(FeedbackManager.warning_datasource_cannot_be_pushed(name=name))
5893
+ else:
5894
+ click.echo(FeedbackManager.warning_dry_name_already_exists(name=name))
5895
+
5896
+ async def push_files(
5897
+ dependency_graph: GraphDependencies,
5898
+ dry_run: bool = False,
5899
+ check_backfill_required: bool = False,
5900
+ ):
5901
+ endpoints_dep_map = dict()
5902
+ processed = set()
5903
+
5904
+ dependencies_graph = dependency_graph.dep_map
5905
+ resources_to_run = dependency_graph.to_run
5906
+
5907
+ if not fork_downstream:
5908
+ # First, we will deploy the all the resources following the dependency graph except for the endpoints
5909
+ groups = [group for group in toposort(dependencies_graph)]
5910
+ for group in groups:
5911
+ for name in group:
5912
+ if name in processed:
5913
+ continue
5914
+
5915
+ if is_endpoint_with_no_dependencies(
5916
+ resources_to_run.get(name, {}),
5917
+ dependencies_graph,
5918
+ resources_to_run,
5919
+ ):
5920
+ endpoints_dep_map[name] = dependencies_graph[name]
5921
+ continue
5922
+
5923
+ await push(
5924
+ name,
5925
+ resources_to_run,
5926
+ resource_versions,
5927
+ latest_datasource_versions,
5928
+ dry_run,
5929
+ fork_downstream,
5930
+ fork,
5931
+ )
5932
+ processed.add(name)
5933
+
5934
+ # Then, we will deploy the endpoints that are on the dependency graph
5935
+ groups = [group for group in toposort(endpoints_dep_map)]
5936
+ for group in groups:
5937
+ for name in group:
5938
+ if name not in processed:
5939
+ await push(
5940
+ name,
5941
+ resources_to_run,
5942
+ resource_versions,
5943
+ latest_datasource_versions,
5944
+ dry_run,
5945
+ fork_downstream,
5946
+ fork,
5947
+ )
5948
+ processed.add(name)
5949
+ else:
5950
+ # This will generate the graph from right to left and will fill the gaps of the dependencies
5951
+ # If we have a graph like this:
5952
+ # A -> B -> C
5953
+ # If we only modify A, the normal dependencies graph will only contain a node like _{A => B}
5954
+ # But we need a graph that contains A, B and C and the dependencies between them to deploy them in the right order
5955
+ dependencies_graph_fork_downstream, resources_to_run_fork_downstream = generate_forkdownstream_graph(
5956
+ dependency_graph.all_dep_map,
5957
+ dependency_graph.all_resources,
5958
+ resources_to_run,
5959
+ list(dependency_graph.dep_map.keys()),
5960
+ )
5961
+
5962
+ # First, we will deploy the datasources that need to be deployed.
5963
+ # We need to deploy the datasources from left to right as some datasources might have MV that depend on the column types of previous datasources. Ex: `test_change_column_type_landing_datasource` test
5964
+ groups = [group for group in toposort(dependencies_graph_fork_downstream)]
5965
+ groups.reverse()
5966
+ for group in groups:
5967
+ for name in group:
5968
+ if name in processed or not is_datasource(resources_to_run_fork_downstream[name]):
5969
+ continue
5970
+
5971
+ # If the resource is new, we will use the normal resource information to deploy it
5972
+ # This is mostly used for datasources with connections.
5973
+ # At the moment, `resources_to_run_fork_downstream` is generated by `all_resources` and this is generated using the parameter `skip_connectors=True`
5974
+ # TODO: Should the `resources_to_run_fork_downstream` be generated using the `skip_connectors` parameter?
5975
+ if is_new(name, changed, dependencies_graph_fork_downstream, dependencies_graph_fork_downstream):
5976
+ await push(
5977
+ name,
5978
+ resources_to_run,
5979
+ resource_versions,
5980
+ latest_datasource_versions,
5981
+ dry_run,
5982
+ fork_downstream,
5983
+ fork,
5984
+ )
5985
+ else:
5986
+ # If we are trying to modify a Kafka or CDK datasource, we need to inform the user that the resource needs to be post-released
5987
+ kafka_connection_name = (
5988
+ resources_to_run_fork_downstream[name].get("params", {}).get("kafka_connection_name")
5989
+ )
5990
+ service = resources_to_run_fork_downstream[name].get("params", {}).get("import_service")
5991
+ if release_created and (kafka_connection_name or service):
5992
+ connector = "Kafka" if kafka_connection_name else service
5993
+ error_msg = FeedbackManager.error_connector_require_post_release(connector=connector)
5994
+ raise click.ClickException(error_msg)
5995
+
5996
+ # If we are pushing a modified datasource, inform about the backfill``
5997
+ if check_backfill_required and auto_promote and release_created:
5998
+ error_msg = FeedbackManager.error_check_backfill_required(resource_name=name)
5999
+ raise click.ClickException(error_msg)
6000
+
6001
+ await push(
6002
+ name,
6003
+ resources_to_run_fork_downstream,
6004
+ resource_versions,
6005
+ latest_datasource_versions,
6006
+ dry_run,
6007
+ fork_downstream,
6008
+ fork,
6009
+ )
6010
+ processed.add(name)
6011
+
6012
+ # Now, we will create a map of all the endpoints and there dependencies
6013
+ # We are using the forkdownstream graph to get the dependencies of the endpoints as the normal dependencies graph only contains the resources that are going to be deployed
6014
+ # But does not include the missing gaps
6015
+ # If we have ENDPOINT_A ----> MV_PIPE_B -----> DATASOURCE_B ------> ENDPOINT_C
6016
+ # Where endpoint A is being used in the MV_PIPE_B, if we only modify the endpoint A
6017
+ # The dependencies graph will only contain the endpoint A and the MV_PIPE_B, but not the DATASOURCE_B and the ENDPOINT_C
6018
+ groups = [group for group in toposort(dependencies_graph_fork_downstream)]
6019
+ for group in groups:
6020
+ for name in group:
6021
+ if name in processed or not is_endpoint(resources_to_run_fork_downstream[name]):
6022
+ continue
6023
+
6024
+ endpoints_dep_map[name] = dependencies_graph_fork_downstream[name]
6025
+
6026
+ # Now that we have the dependencies of the endpoints, we need to check that the resources has not been deployed yet and only care about the endpoints that depend on endpoints
6027
+ groups = [group for group in toposort(endpoints_dep_map)]
6028
+
6029
+ # As we have used the forkdownstream graph to get the dependencies of the endpoints, we have all the dependencies of the endpoints
6030
+ # But we need to deploy the endpoints and the dependencies of the endpoints from left to right
6031
+ # So we need to reverse the groups
6032
+ groups.reverse()
6033
+ for group in groups:
6034
+ for name in group:
6035
+ if name in processed or not is_endpoint(resources_to_run_fork_downstream[name]):
6036
+ continue
6037
+
6038
+ await push(
6039
+ name,
6040
+ resources_to_run_fork_downstream,
6041
+ resource_versions,
6042
+ latest_datasource_versions,
6043
+ dry_run,
6044
+ fork_downstream,
6045
+ fork,
6046
+ )
6047
+ processed.add(name)
6048
+
6049
+ # Now we should have the endpoints and datasources deployed, we can deploy the rest of the pipes (copy & sinks)
6050
+ # We need to rely on the forkdownstream graph as it contains all the modified pipes as well as the dependencies of the pipes
6051
+ # In this case, we don't need to generate a new graph as we did for the endpoints as the pipes are not going to be used as dependencies and the datasources are already deployed
6052
+ groups = [group for group in toposort(dependencies_graph_fork_downstream)]
6053
+ for group in groups:
6054
+ for name in group:
6055
+ if name in processed or is_materialized(resources_to_run_fork_downstream.get(name)):
6056
+ continue
6057
+
6058
+ await push(
6059
+ name,
6060
+ resources_to_run_fork_downstream,
6061
+ resource_versions,
6062
+ latest_datasource_versions,
6063
+ dry_run,
6064
+ fork_downstream,
6065
+ fork,
6066
+ )
6067
+ processed.add(name)
6068
+
6069
+ # Finally, we need to deploy the materialized views from right to left.
6070
+ # We need to rely on the forkdownstream graph as it contains all the modified materialized views as well as the dependencies of the materialized views
6071
+ # In this case, we don't need to generate a new graph as we did for the endpoints as the pipes are not going to be used as dependencies and the datasources are already deployed
6072
+ groups = [group for group in toposort(dependencies_graph_fork_downstream)]
6073
+ for group in groups:
6074
+ for name in group:
6075
+ if name in processed or not is_materialized(resources_to_run_fork_downstream.get(name)):
6076
+ continue
6077
+
6078
+ await push(
6079
+ name,
6080
+ resources_to_run_fork_downstream,
6081
+ resource_versions,
6082
+ latest_datasource_versions,
6083
+ dry_run,
6084
+ fork_downstream,
6085
+ fork,
6086
+ )
6087
+ processed.add(name)
6088
+
6089
+ await push_files(dependencies_graph, dry_run)
6090
+
6091
+ if not dry_run and not run_tests:
6092
+ if upload_fixtures:
6093
+ click.echo(FeedbackManager.info_pushing_fixtures())
6094
+
6095
+ processed = set()
6096
+ for group in toposort(dependencies_graph.dep_map):
6097
+ for f in group:
6098
+ name = os.path.basename(f)
6099
+ if name not in processed and name in dependencies_graph.to_run:
6100
+ await check_fixtures_data(
6101
+ tb_client,
6102
+ dependencies_graph.to_run[name],
6103
+ debug,
6104
+ folder,
6105
+ force,
6106
+ mode="replace",
6107
+ )
6108
+ processed.add(name)
6109
+ for f in dependencies_graph.to_run:
6110
+ if f not in processed:
6111
+ await check_fixtures_data(
6112
+ tb_client,
6113
+ dependencies_graph.to_run[f],
6114
+ debug,
6115
+ folder,
6116
+ force,
6117
+ mode="replace",
6118
+ )
6119
+ else:
6120
+ if verbose:
6121
+ click.echo(FeedbackManager.info_not_pushing_fixtures())
6122
+
6123
+ return dependencies_graph.to_run