cumulusci-plus 5.0.23__py3-none-any.whl → 5.0.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cumulusci-plus might be problematic. Click here for more details.
- cumulusci/__about__.py +1 -1
- cumulusci/cli/task.py +17 -0
- cumulusci/cli/tests/test_flow.py +279 -2
- cumulusci/cli/tests/test_task.py +88 -2
- cumulusci/core/flowrunner.py +86 -6
- cumulusci/cumulusci.yml +24 -0
- cumulusci/tasks/create_package_version.py +14 -6
- cumulusci/tasks/salesforce/SfPackageCommands.py +363 -0
- cumulusci/tasks/salesforce/getPackageVersion.py +89 -0
- cumulusci/tasks/salesforce/tests/test_SfPackageCommands.py +554 -0
- cumulusci/tasks/salesforce/tests/test_getPackageVersion.py +651 -0
- cumulusci/tasks/salesforce/tests/test_update_external_credential.py +912 -0
- cumulusci/tasks/salesforce/tests/test_update_named_credential.py +1042 -0
- cumulusci/tasks/salesforce/update_external_credential.py +562 -0
- cumulusci/tasks/salesforce/update_named_credential.py +441 -0
- cumulusci/tasks/salesforce/users/permsets.py +63 -2
- cumulusci/tasks/salesforce/users/tests/test_permsets.py +184 -0
- cumulusci/tasks/sfdmu/__init__.py +0 -0
- cumulusci/tasks/sfdmu/sfdmu.py +256 -0
- cumulusci/tasks/sfdmu/tests/__init__.py +1 -0
- cumulusci/tasks/sfdmu/tests/test_runner.py +212 -0
- cumulusci/tasks/sfdmu/tests/test_sfdmu.py +443 -0
- cumulusci/utils/__init__.py +24 -2
- {cumulusci_plus-5.0.23.dist-info → cumulusci_plus-5.0.25.dist-info}/METADATA +7 -5
- {cumulusci_plus-5.0.23.dist-info → cumulusci_plus-5.0.25.dist-info}/RECORD +29 -16
- {cumulusci_plus-5.0.23.dist-info → cumulusci_plus-5.0.25.dist-info}/WHEEL +0 -0
- {cumulusci_plus-5.0.23.dist-info → cumulusci_plus-5.0.25.dist-info}/entry_points.txt +0 -0
- {cumulusci_plus-5.0.23.dist-info → cumulusci_plus-5.0.25.dist-info}/licenses/AUTHORS.rst +0 -0
- {cumulusci_plus-5.0.23.dist-info → cumulusci_plus-5.0.25.dist-info}/licenses/LICENSE +0 -0
cumulusci/__about__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "5.0.
|
|
1
|
+
__version__ = "5.0.25"
|
cumulusci/cli/task.py
CHANGED
|
@@ -2,6 +2,7 @@ import json
|
|
|
2
2
|
from pathlib import Path
|
|
3
3
|
|
|
4
4
|
import click
|
|
5
|
+
from dotenv import load_dotenv
|
|
5
6
|
from rich.console import Console
|
|
6
7
|
from rst2ansi import rst2ansi
|
|
7
8
|
|
|
@@ -126,6 +127,10 @@ class RunTaskCommand(click.MultiCommand):
|
|
|
126
127
|
"help": "Drops into the Python debugger at task completion.",
|
|
127
128
|
"is_flag": True,
|
|
128
129
|
},
|
|
130
|
+
"loadenv": {
|
|
131
|
+
"help": "Loads environment variables from the .env file.",
|
|
132
|
+
"is_flag": True,
|
|
133
|
+
},
|
|
129
134
|
}
|
|
130
135
|
|
|
131
136
|
def list_commands(self, ctx):
|
|
@@ -151,6 +156,17 @@ class RunTaskCommand(click.MultiCommand):
|
|
|
151
156
|
|
|
152
157
|
def run_task(*args, **kwargs):
|
|
153
158
|
"""Callback function that executes when the command fires."""
|
|
159
|
+
# Load environment variables FIRST, before any task processing
|
|
160
|
+
if kwargs.get("loadenv", None):
|
|
161
|
+
# Load .env file from the project root directory
|
|
162
|
+
env_path = (
|
|
163
|
+
Path(runtime.project_config.repo_root) / ".env"
|
|
164
|
+
if runtime.project_config
|
|
165
|
+
else None
|
|
166
|
+
)
|
|
167
|
+
if env_path:
|
|
168
|
+
load_dotenv(env_path)
|
|
169
|
+
|
|
154
170
|
org, org_config = runtime.get_org(
|
|
155
171
|
kwargs.pop("org", None), fail_if_missing=False
|
|
156
172
|
)
|
|
@@ -168,6 +184,7 @@ class RunTaskCommand(click.MultiCommand):
|
|
|
168
184
|
task_config.config["options"].update(options)
|
|
169
185
|
|
|
170
186
|
try:
|
|
187
|
+
|
|
171
188
|
task = task_class(
|
|
172
189
|
task_config.project_config, task_config, org_config=org_config
|
|
173
190
|
)
|
cumulusci/cli/tests/test_flow.py
CHANGED
|
@@ -4,9 +4,10 @@ import click
|
|
|
4
4
|
import pytest
|
|
5
5
|
|
|
6
6
|
from cumulusci.cli.runtime import CliRuntime
|
|
7
|
-
from cumulusci.core.config import FlowConfig
|
|
7
|
+
from cumulusci.core.config import FlowConfig, OrgConfig
|
|
8
8
|
from cumulusci.core.exceptions import CumulusCIException, FlowNotFoundError
|
|
9
|
-
from cumulusci.core.flowrunner import FlowCoordinator
|
|
9
|
+
from cumulusci.core.flowrunner import FlowCoordinator, FlowStepSpec, StepSpec
|
|
10
|
+
from cumulusci.tests.util import create_project_config
|
|
10
11
|
|
|
11
12
|
from .. import flow
|
|
12
13
|
from .utils import DummyTask, run_click_command
|
|
@@ -274,3 +275,279 @@ def test_flow_run__org_delete_error(echo):
|
|
|
274
275
|
echo.assert_any_call(
|
|
275
276
|
"Scratch org deletion failed. Ignoring the error below to complete the flow:"
|
|
276
277
|
)
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
# Tests for new FlowStepSpec and flow skipping functionality
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
class TestFlowStepSpec:
|
|
284
|
+
"""Test the FlowStepSpec class functionality."""
|
|
285
|
+
|
|
286
|
+
def test_flowstep_spec_creation(self):
|
|
287
|
+
"""Test that FlowStepSpec can be created with proper inheritance."""
|
|
288
|
+
project_config = create_project_config("TestOwner", "TestRepo")
|
|
289
|
+
|
|
290
|
+
flow_step = FlowStepSpec(
|
|
291
|
+
task_config={"test": "value"},
|
|
292
|
+
step_num="1.0",
|
|
293
|
+
task_name="test_flow",
|
|
294
|
+
task_class=None,
|
|
295
|
+
project_config=project_config,
|
|
296
|
+
allow_failure=False,
|
|
297
|
+
when="org_config.username == 'test@example.com'",
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
assert isinstance(flow_step, StepSpec)
|
|
301
|
+
assert isinstance(flow_step, FlowStepSpec)
|
|
302
|
+
assert flow_step.task_name == "test_flow"
|
|
303
|
+
assert flow_step.when == "org_config.username == 'test@example.com'"
|
|
304
|
+
assert flow_step.task_config == {"test": "value"}
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
class TestEvaluationMethods:
|
|
308
|
+
"""Test the evaluation methods for flow and task skipping."""
|
|
309
|
+
|
|
310
|
+
def setup_method(self):
|
|
311
|
+
"""Set up test fixtures."""
|
|
312
|
+
self.project_config = create_project_config("TestOwner", "TestRepo")
|
|
313
|
+
self.org_config = OrgConfig(
|
|
314
|
+
{"username": "test@example.com"}, "test", mock.Mock()
|
|
315
|
+
)
|
|
316
|
+
self.org_config.refresh_oauth_token = mock.Mock()
|
|
317
|
+
|
|
318
|
+
def test_evaluate_flow_step_with_true_condition(self):
|
|
319
|
+
"""Test _evaluate_flow_step with a condition that evaluates to True."""
|
|
320
|
+
flow_config = FlowConfig({"description": "Test Flow", "steps": {}})
|
|
321
|
+
flow_config.project_config = self.project_config
|
|
322
|
+
coordinator = FlowCoordinator(self.project_config, flow_config)
|
|
323
|
+
coordinator.org_config = self.org_config
|
|
324
|
+
|
|
325
|
+
step = FlowStepSpec(
|
|
326
|
+
task_config={},
|
|
327
|
+
step_num="1.0",
|
|
328
|
+
task_name="test_flow",
|
|
329
|
+
task_class=None,
|
|
330
|
+
project_config=self.project_config,
|
|
331
|
+
allow_failure=False,
|
|
332
|
+
when="org_config.username == 'test@example.com'",
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
result = coordinator._evaluate_flow_step(step)
|
|
336
|
+
assert result is True
|
|
337
|
+
|
|
338
|
+
def test_evaluate_flow_step_with_false_condition(self):
|
|
339
|
+
"""Test _evaluate_flow_step with a condition that evaluates to False."""
|
|
340
|
+
flow_config = FlowConfig({"description": "Test Flow", "steps": {}})
|
|
341
|
+
flow_config.project_config = self.project_config
|
|
342
|
+
coordinator = FlowCoordinator(self.project_config, flow_config)
|
|
343
|
+
coordinator.org_config = self.org_config
|
|
344
|
+
|
|
345
|
+
step = FlowStepSpec(
|
|
346
|
+
task_config={},
|
|
347
|
+
step_num="1.0",
|
|
348
|
+
task_name="test_flow",
|
|
349
|
+
task_class=None,
|
|
350
|
+
project_config=self.project_config,
|
|
351
|
+
allow_failure=False,
|
|
352
|
+
when="org_config.username == 'wrong@example.com'",
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
result = coordinator._evaluate_flow_step(step)
|
|
356
|
+
assert result is False
|
|
357
|
+
|
|
358
|
+
def test_evaluate_flow_step_without_when_condition(self):
|
|
359
|
+
"""Test _evaluate_flow_step without a when condition."""
|
|
360
|
+
flow_config = FlowConfig({"description": "Test Flow", "steps": {}})
|
|
361
|
+
flow_config.project_config = self.project_config
|
|
362
|
+
coordinator = FlowCoordinator(self.project_config, flow_config)
|
|
363
|
+
coordinator.org_config = self.org_config
|
|
364
|
+
|
|
365
|
+
step = FlowStepSpec(
|
|
366
|
+
task_config={},
|
|
367
|
+
step_num="1.0",
|
|
368
|
+
task_name="test_flow",
|
|
369
|
+
task_class=None,
|
|
370
|
+
project_config=self.project_config,
|
|
371
|
+
allow_failure=False,
|
|
372
|
+
when=None,
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
result = coordinator._evaluate_flow_step(step)
|
|
376
|
+
assert result is True
|
|
377
|
+
|
|
378
|
+
def test_is_task_in_skipped_flow_true(self):
|
|
379
|
+
"""Test _is_task_in_skipped_flow returns True when task is in skipped flow."""
|
|
380
|
+
flow_config = FlowConfig({"description": "Test Flow", "steps": {}})
|
|
381
|
+
flow_config.project_config = self.project_config
|
|
382
|
+
coordinator = FlowCoordinator(self.project_config, flow_config)
|
|
383
|
+
coordinator.org_config = self.org_config
|
|
384
|
+
|
|
385
|
+
skipped_flows_set = {"skipped_flow", "another_flow"}
|
|
386
|
+
task_path = "skipped_flow.sub_task"
|
|
387
|
+
|
|
388
|
+
result = coordinator._is_task_in_skipped_flow(task_path, skipped_flows_set)
|
|
389
|
+
assert result is True
|
|
390
|
+
|
|
391
|
+
def test_is_task_in_skipped_flow_false(self):
|
|
392
|
+
"""Test _is_task_in_skipped_flow returns False when task is not in skipped flow."""
|
|
393
|
+
flow_config = FlowConfig({"description": "Test Flow", "steps": {}})
|
|
394
|
+
flow_config.project_config = self.project_config
|
|
395
|
+
coordinator = FlowCoordinator(self.project_config, flow_config)
|
|
396
|
+
coordinator.org_config = self.org_config
|
|
397
|
+
|
|
398
|
+
skipped_flows_set = {"skipped_flow", "another_flow"}
|
|
399
|
+
task_path = "normal_flow.sub_task"
|
|
400
|
+
|
|
401
|
+
result = coordinator._is_task_in_skipped_flow(task_path, skipped_flows_set)
|
|
402
|
+
assert result is False
|
|
403
|
+
|
|
404
|
+
def test_is_task_in_skipped_flow_empty_set(self):
|
|
405
|
+
"""Test _is_task_in_skipped_flow with empty skipped flows set."""
|
|
406
|
+
flow_config = FlowConfig({"description": "Test Flow", "steps": {}})
|
|
407
|
+
flow_config.project_config = self.project_config
|
|
408
|
+
coordinator = FlowCoordinator(self.project_config, flow_config)
|
|
409
|
+
coordinator.org_config = self.org_config
|
|
410
|
+
|
|
411
|
+
skipped_flows_set = set()
|
|
412
|
+
task_path = "any_flow.sub_task"
|
|
413
|
+
|
|
414
|
+
result = coordinator._is_task_in_skipped_flow(task_path, skipped_flows_set)
|
|
415
|
+
assert result is False
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
class TestExpressionCaching:
|
|
419
|
+
"""Test Jinja2 expression caching functionality."""
|
|
420
|
+
|
|
421
|
+
def setup_method(self):
|
|
422
|
+
"""Set up test fixtures."""
|
|
423
|
+
self.project_config = create_project_config("TestOwner", "TestRepo")
|
|
424
|
+
self.org_config = OrgConfig(
|
|
425
|
+
{"username": "test@example.com"}, "test", mock.Mock()
|
|
426
|
+
)
|
|
427
|
+
self.org_config.refresh_oauth_token = mock.Mock()
|
|
428
|
+
|
|
429
|
+
def test_expression_caching_reuse(self):
|
|
430
|
+
"""Test that compiled expressions are cached and reused."""
|
|
431
|
+
flow_config = FlowConfig({"description": "Test Flow", "steps": {}})
|
|
432
|
+
flow_config.project_config = self.project_config
|
|
433
|
+
coordinator = FlowCoordinator(self.project_config, flow_config)
|
|
434
|
+
coordinator.org_config = self.org_config
|
|
435
|
+
|
|
436
|
+
# Clear any existing cache
|
|
437
|
+
coordinator._expression_cache = {}
|
|
438
|
+
|
|
439
|
+
step1 = FlowStepSpec(
|
|
440
|
+
task_config={},
|
|
441
|
+
step_num="1.0",
|
|
442
|
+
task_name="test_flow1",
|
|
443
|
+
task_class=None,
|
|
444
|
+
project_config=self.project_config,
|
|
445
|
+
allow_failure=False,
|
|
446
|
+
when="org_config.username == 'test@example.com'",
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
step2 = FlowStepSpec(
|
|
450
|
+
task_config={},
|
|
451
|
+
step_num="2.0",
|
|
452
|
+
task_name="test_flow2",
|
|
453
|
+
task_class=None,
|
|
454
|
+
project_config=self.project_config,
|
|
455
|
+
allow_failure=False,
|
|
456
|
+
when="org_config.username == 'test@example.com'",
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
# First evaluation should compile and cache the expression
|
|
460
|
+
result1 = coordinator._evaluate_flow_step(step1)
|
|
461
|
+
assert result1 is True
|
|
462
|
+
assert len(coordinator._expression_cache) == 1
|
|
463
|
+
|
|
464
|
+
# Second evaluation should use cached expression
|
|
465
|
+
result2 = coordinator._evaluate_flow_step(step2)
|
|
466
|
+
assert result2 is True
|
|
467
|
+
assert (
|
|
468
|
+
len(coordinator._expression_cache) == 1
|
|
469
|
+
) # Still only one cached expression
|
|
470
|
+
|
|
471
|
+
def test_expression_caching_different_expressions(self):
|
|
472
|
+
"""Test that different expressions are cached separately."""
|
|
473
|
+
flow_config = FlowConfig({"description": "Test Flow", "steps": {}})
|
|
474
|
+
flow_config.project_config = self.project_config
|
|
475
|
+
coordinator = FlowCoordinator(self.project_config, flow_config)
|
|
476
|
+
coordinator.org_config = self.org_config
|
|
477
|
+
|
|
478
|
+
# Clear any existing cache
|
|
479
|
+
coordinator._expression_cache = {}
|
|
480
|
+
|
|
481
|
+
step1 = FlowStepSpec(
|
|
482
|
+
task_config={},
|
|
483
|
+
step_num="1.0",
|
|
484
|
+
task_name="test_flow1",
|
|
485
|
+
task_class=None,
|
|
486
|
+
project_config=self.project_config,
|
|
487
|
+
allow_failure=False,
|
|
488
|
+
when="org_config.username == 'test@example.com'",
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
step2 = FlowStepSpec(
|
|
492
|
+
task_config={},
|
|
493
|
+
step_num="2.0",
|
|
494
|
+
task_name="test_flow2",
|
|
495
|
+
task_class=None,
|
|
496
|
+
project_config=self.project_config,
|
|
497
|
+
allow_failure=False,
|
|
498
|
+
when="org_config.username == 'wrong@example.com'",
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
# Evaluate both steps
|
|
502
|
+
coordinator._evaluate_flow_step(step1)
|
|
503
|
+
coordinator._evaluate_flow_step(step2)
|
|
504
|
+
|
|
505
|
+
# Should have two different cached expressions
|
|
506
|
+
assert len(coordinator._expression_cache) == 2
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
class TestPerformanceImprovements:
|
|
510
|
+
"""Test that performance improvements work correctly."""
|
|
511
|
+
|
|
512
|
+
def setup_method(self):
|
|
513
|
+
"""Set up test fixtures."""
|
|
514
|
+
self.project_config = create_project_config("TestOwner", "TestRepo")
|
|
515
|
+
self.org_config = OrgConfig(
|
|
516
|
+
{"username": "test@example.com"}, "test", mock.Mock()
|
|
517
|
+
)
|
|
518
|
+
self.org_config.refresh_oauth_token = mock.Mock()
|
|
519
|
+
|
|
520
|
+
def test_context_reuse(self):
|
|
521
|
+
"""Test that Jinja2 context is reused when possible."""
|
|
522
|
+
flow_config = FlowConfig({"description": "Test Flow", "steps": {}})
|
|
523
|
+
flow_config.project_config = self.project_config
|
|
524
|
+
coordinator = FlowCoordinator(self.project_config, flow_config)
|
|
525
|
+
coordinator.org_config = self.org_config
|
|
526
|
+
|
|
527
|
+
# Clear any existing context
|
|
528
|
+
coordinator._jinja2_context = None
|
|
529
|
+
coordinator._context_project_config = None
|
|
530
|
+
coordinator._context_org_config = None
|
|
531
|
+
|
|
532
|
+
step = FlowStepSpec(
|
|
533
|
+
task_config={},
|
|
534
|
+
step_num="1.0",
|
|
535
|
+
task_name="test_flow",
|
|
536
|
+
task_class=None,
|
|
537
|
+
project_config=self.project_config,
|
|
538
|
+
allow_failure=False,
|
|
539
|
+
when="org_config.username == 'test@example.com'",
|
|
540
|
+
)
|
|
541
|
+
|
|
542
|
+
# First evaluation should create context
|
|
543
|
+
result1 = coordinator._evaluate_flow_step(step)
|
|
544
|
+
assert result1 is True
|
|
545
|
+
assert coordinator._jinja2_context is not None
|
|
546
|
+
assert coordinator._context_project_config == self.project_config
|
|
547
|
+
assert coordinator._context_org_config == self.org_config
|
|
548
|
+
|
|
549
|
+
# Second evaluation should reuse context
|
|
550
|
+
original_context = coordinator._jinja2_context
|
|
551
|
+
result2 = coordinator._evaluate_flow_step(step)
|
|
552
|
+
assert result2 is True
|
|
553
|
+
assert coordinator._jinja2_context is original_context # Same object reused
|
cumulusci/cli/tests/test_task.py
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import contextlib
|
|
2
2
|
import io
|
|
3
3
|
import json
|
|
4
|
+
import tempfile
|
|
5
|
+
from pathlib import Path
|
|
4
6
|
from unittest.mock import Mock, patch
|
|
5
7
|
|
|
6
8
|
import click
|
|
@@ -126,10 +128,10 @@ def test_format_help(runtime):
|
|
|
126
128
|
|
|
127
129
|
def test_get_default_command_options():
|
|
128
130
|
opts = task.RunTaskCommand()._get_default_command_options(is_salesforce_task=False)
|
|
129
|
-
assert len(opts) ==
|
|
131
|
+
assert len(opts) == 5
|
|
130
132
|
|
|
131
133
|
opts = task.RunTaskCommand()._get_default_command_options(is_salesforce_task=True)
|
|
132
|
-
assert len(opts) ==
|
|
134
|
+
assert len(opts) == 6
|
|
133
135
|
assert any([o.name == "org" for o in opts])
|
|
134
136
|
|
|
135
137
|
|
|
@@ -264,3 +266,87 @@ class SetTrace(Exception):
|
|
|
264
266
|
class DummyDerivedTask(DummyTask):
|
|
265
267
|
def _run_task(self):
|
|
266
268
|
click.echo(f"<{self.__class__}>\n\tcolor: {self.options['color']}")
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
@patch("cumulusci.cli.task.load_dotenv")
|
|
272
|
+
def test_task_run__loadenv_with_project_root(load_dotenv, runtime):
|
|
273
|
+
"""Test that loadenv loads .env file from project root when project exists."""
|
|
274
|
+
DummyTask._run_task = Mock()
|
|
275
|
+
|
|
276
|
+
# Create a temporary directory for the test
|
|
277
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
278
|
+
runtime.project_config._repo_info = {"root": temp_dir}
|
|
279
|
+
|
|
280
|
+
multi_cmd = task.RunTaskCommand()
|
|
281
|
+
with click.Context(multi_cmd, obj=runtime) as ctx:
|
|
282
|
+
cmd = multi_cmd.get_command(ctx, "dummy-task")
|
|
283
|
+
cmd.callback(runtime, "dummy-task", color="blue", loadenv=True)
|
|
284
|
+
|
|
285
|
+
# Verify load_dotenv was called with the correct path
|
|
286
|
+
expected_path = Path(temp_dir) / ".env"
|
|
287
|
+
load_dotenv.assert_called_once_with(expected_path)
|
|
288
|
+
DummyTask._run_task.assert_called_once()
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
@patch("cumulusci.cli.task.load_dotenv")
|
|
292
|
+
def test_task_run__loadenv_false(load_dotenv, runtime):
|
|
293
|
+
"""Test that loadenv does not call load_dotenv when loadenv=False."""
|
|
294
|
+
DummyTask._run_task = Mock()
|
|
295
|
+
|
|
296
|
+
multi_cmd = task.RunTaskCommand()
|
|
297
|
+
with click.Context(multi_cmd, obj=runtime) as ctx:
|
|
298
|
+
cmd = multi_cmd.get_command(ctx, "dummy-task")
|
|
299
|
+
cmd.callback(runtime, "dummy-task", color="blue", loadenv=False)
|
|
300
|
+
|
|
301
|
+
# Verify load_dotenv was not called
|
|
302
|
+
load_dotenv.assert_not_called()
|
|
303
|
+
DummyTask._run_task.assert_called_once()
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
@patch("cumulusci.cli.task.load_dotenv")
|
|
307
|
+
def test_task_run__loadenv_not_provided(load_dotenv, runtime):
|
|
308
|
+
"""Test that loadenv does not call load_dotenv when loadenv is not provided."""
|
|
309
|
+
DummyTask._run_task = Mock()
|
|
310
|
+
|
|
311
|
+
multi_cmd = task.RunTaskCommand()
|
|
312
|
+
with click.Context(multi_cmd, obj=runtime) as ctx:
|
|
313
|
+
cmd = multi_cmd.get_command(ctx, "dummy-task")
|
|
314
|
+
cmd.callback(runtime, "dummy-task", color="blue")
|
|
315
|
+
|
|
316
|
+
# Verify load_dotenv was not called
|
|
317
|
+
load_dotenv.assert_not_called()
|
|
318
|
+
DummyTask._run_task.assert_called_once()
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
@patch("cumulusci.cli.task.load_dotenv")
|
|
322
|
+
def test_task_run__loadenv_none_value(load_dotenv, runtime):
|
|
323
|
+
"""Test that loadenv does not call load_dotenv when loadenv=None."""
|
|
324
|
+
DummyTask._run_task = Mock()
|
|
325
|
+
|
|
326
|
+
multi_cmd = task.RunTaskCommand()
|
|
327
|
+
with click.Context(multi_cmd, obj=runtime) as ctx:
|
|
328
|
+
cmd = multi_cmd.get_command(ctx, "dummy-task")
|
|
329
|
+
cmd.callback(runtime, "dummy-task", color="blue", loadenv=None)
|
|
330
|
+
|
|
331
|
+
# Verify load_dotenv was not called
|
|
332
|
+
load_dotenv.assert_not_called()
|
|
333
|
+
DummyTask._run_task.assert_called_once()
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
def test_get_default_command_options_includes_loadenv():
|
|
337
|
+
"""Test that the loadenv option is included in default command options."""
|
|
338
|
+
opts = task.RunTaskCommand()._get_default_command_options(is_salesforce_task=False)
|
|
339
|
+
|
|
340
|
+
# Should have 5 global options including loadenv
|
|
341
|
+
assert len(opts) == 5
|
|
342
|
+
|
|
343
|
+
# Find the loadenv option
|
|
344
|
+
loadenv_opt = None
|
|
345
|
+
for opt in opts:
|
|
346
|
+
if hasattr(opt, "name") and opt.name == "loadenv":
|
|
347
|
+
loadenv_opt = opt
|
|
348
|
+
break
|
|
349
|
+
|
|
350
|
+
assert loadenv_opt is not None
|
|
351
|
+
assert loadenv_opt.is_flag is True
|
|
352
|
+
assert "Loads environment variables from the .env file" in loadenv_opt.help
|
cumulusci/core/flowrunner.py
CHANGED
|
@@ -164,6 +164,11 @@ class StepSpec:
|
|
|
164
164
|
)
|
|
165
165
|
|
|
166
166
|
|
|
167
|
+
class FlowStepSpec(StepSpec):
|
|
168
|
+
def __init__(self, *args, **kwargs):
|
|
169
|
+
super().__init__(*args, **kwargs)
|
|
170
|
+
|
|
171
|
+
|
|
167
172
|
class StepResult(NamedTuple):
|
|
168
173
|
step_num: StepVersion
|
|
169
174
|
task_name: str
|
|
@@ -356,6 +361,10 @@ class FlowCoordinator:
|
|
|
356
361
|
|
|
357
362
|
self.logger = self._init_logger()
|
|
358
363
|
self.steps = self._init_steps()
|
|
364
|
+
self._expression_cache = {}
|
|
365
|
+
self._jinja2_context = None
|
|
366
|
+
self._context_project_config = None
|
|
367
|
+
self._context_org_config = None
|
|
359
368
|
|
|
360
369
|
@classmethod
|
|
361
370
|
def from_steps(
|
|
@@ -403,6 +412,9 @@ class FlowCoordinator:
|
|
|
403
412
|
previous_parts = []
|
|
404
413
|
previous_source = None
|
|
405
414
|
for step in self.steps:
|
|
415
|
+
if isinstance(step, FlowStepSpec):
|
|
416
|
+
continue
|
|
417
|
+
|
|
406
418
|
parts = step.path.split(".")
|
|
407
419
|
steps = str(step.step_num).split("/")
|
|
408
420
|
if len(parts) > len(steps):
|
|
@@ -491,7 +503,27 @@ class FlowCoordinator:
|
|
|
491
503
|
self._rule(new_line=True)
|
|
492
504
|
|
|
493
505
|
try:
|
|
506
|
+
# Pre-evaluate all flow conditions
|
|
507
|
+
skipped_flows_set = set()
|
|
494
508
|
for step in self.steps:
|
|
509
|
+
if isinstance(step, FlowStepSpec):
|
|
510
|
+
if not self._evaluate_flow_step(step):
|
|
511
|
+
skipped_flows_set.add(step.path)
|
|
512
|
+
|
|
513
|
+
# Main execution loop with optimized path checking
|
|
514
|
+
for step in self.steps:
|
|
515
|
+
if isinstance(step, FlowStepSpec):
|
|
516
|
+
self.logger.info(
|
|
517
|
+
f"Skipping Flow {step.task_name} (skipped unless {step.when})"
|
|
518
|
+
)
|
|
519
|
+
continue
|
|
520
|
+
|
|
521
|
+
if self._is_task_in_skipped_flow(step.path, skipped_flows_set):
|
|
522
|
+
self.logger.info(
|
|
523
|
+
f"Skipping Task {step.task_name} in flow {step.path} (parent flow is skipped)"
|
|
524
|
+
)
|
|
525
|
+
continue
|
|
526
|
+
|
|
495
527
|
self._run_step(step)
|
|
496
528
|
flow_name = f"'{self.name}' " if self.name else ""
|
|
497
529
|
self.logger.info(
|
|
@@ -500,6 +532,45 @@ class FlowCoordinator:
|
|
|
500
532
|
finally:
|
|
501
533
|
self.callbacks.post_flow(self)
|
|
502
534
|
|
|
535
|
+
def _get_jinja2_context(self, project_config, org_config):
|
|
536
|
+
"""Get or create jinja2 context, reusing when possible."""
|
|
537
|
+
if (
|
|
538
|
+
self._jinja2_context is None
|
|
539
|
+
or self._context_project_config != project_config
|
|
540
|
+
or self._context_org_config != org_config
|
|
541
|
+
):
|
|
542
|
+
|
|
543
|
+
self._jinja2_context = {
|
|
544
|
+
"project_config": project_config,
|
|
545
|
+
"org_config": org_config,
|
|
546
|
+
}
|
|
547
|
+
self._context_project_config = project_config
|
|
548
|
+
self._context_org_config = org_config
|
|
549
|
+
|
|
550
|
+
return self._jinja2_context
|
|
551
|
+
|
|
552
|
+
def _evaluate_flow_step(self, step: StepSpec) -> bool:
|
|
553
|
+
if not step.when:
|
|
554
|
+
return True
|
|
555
|
+
|
|
556
|
+
# Check cache first
|
|
557
|
+
if step.when in self._expression_cache:
|
|
558
|
+
expr = self._expression_cache[step.when]
|
|
559
|
+
else:
|
|
560
|
+
expr = jinja2_env.compile_expression(step.when)
|
|
561
|
+
self._expression_cache[step.when] = expr
|
|
562
|
+
|
|
563
|
+
jinja2_context = self._get_jinja2_context(step.project_config, self.org_config)
|
|
564
|
+
|
|
565
|
+
return expr(**jinja2_context)
|
|
566
|
+
|
|
567
|
+
def _is_task_in_skipped_flow(self, task_path: str, skipped_flows_set: set) -> bool:
|
|
568
|
+
"""Check if task belongs to any skipped flow using O(1) set lookup."""
|
|
569
|
+
for skipped_path in skipped_flows_set:
|
|
570
|
+
if task_path.startswith(skipped_path + "."):
|
|
571
|
+
return True
|
|
572
|
+
return False
|
|
573
|
+
|
|
503
574
|
def _run_step(self, step: StepSpec):
|
|
504
575
|
if step.skip:
|
|
505
576
|
self._rule(fill="*")
|
|
@@ -508,12 +579,7 @@ class FlowCoordinator:
|
|
|
508
579
|
return
|
|
509
580
|
|
|
510
581
|
if step.when:
|
|
511
|
-
|
|
512
|
-
"project_config": step.project_config,
|
|
513
|
-
"org_config": self.org_config,
|
|
514
|
-
}
|
|
515
|
-
expr = jinja2_env.compile_expression(step.when)
|
|
516
|
-
value = expr(**jinja2_context)
|
|
582
|
+
value = self._evaluate_flow_step(step)
|
|
517
583
|
if not value:
|
|
518
584
|
self.logger.info(
|
|
519
585
|
f"Skipping task {step.task_name} (skipped unless {step.when})"
|
|
@@ -685,6 +751,20 @@ class FlowCoordinator:
|
|
|
685
751
|
step_options.update(parent_task_options)
|
|
686
752
|
step_ui_options = step_config.get("ui_options", {})
|
|
687
753
|
flow_config = project_config.get_flow(name)
|
|
754
|
+
|
|
755
|
+
if step_config.get("when"):
|
|
756
|
+
visited_steps.append(
|
|
757
|
+
FlowStepSpec(
|
|
758
|
+
task_config={},
|
|
759
|
+
step_num=step_number,
|
|
760
|
+
task_name=path,
|
|
761
|
+
task_class=None,
|
|
762
|
+
project_config=flow_config.project_config,
|
|
763
|
+
allow_failure=step_config.get("ignore_failure", False),
|
|
764
|
+
when=step_config.get("when"),
|
|
765
|
+
)
|
|
766
|
+
)
|
|
767
|
+
|
|
688
768
|
for sub_number, sub_stepconf in flow_config.steps.items():
|
|
689
769
|
# append the flow number to the child number, since its a LooseVersion.
|
|
690
770
|
# e.g. if we're in step 2.3 which references a flow with steps 1-5, it
|
cumulusci/cumulusci.yml
CHANGED
|
@@ -795,6 +795,10 @@ tasks:
|
|
|
795
795
|
description: Load Custom Settings specified in a YAML file to the target org
|
|
796
796
|
class_path: cumulusci.tasks.salesforce.LoadCustomSettings
|
|
797
797
|
group: "Data Operations"
|
|
798
|
+
sfdmu:
|
|
799
|
+
description: Execute SFDmu data migration with namespace injection support
|
|
800
|
+
class_path: cumulusci.tasks.sfdmu.sfdmu.SfdmuTask
|
|
801
|
+
group: "Data Operations"
|
|
798
802
|
remove_metadata_xml_elements:
|
|
799
803
|
description: Remove specified XML elements from one or more metadata files
|
|
800
804
|
class_path: cumulusci.tasks.metadata.modify.RemoveElementsXPath
|
|
@@ -831,6 +835,26 @@ tasks:
|
|
|
831
835
|
class_path: cumulusci.tasks.salesforce.SfDataCommands.DataCreateRecordTask
|
|
832
836
|
description: "Executes the `sf data create` command against an org"
|
|
833
837
|
group: SalesforceDX Data Commands
|
|
838
|
+
update_package_version:
|
|
839
|
+
class_path: cumulusci.tasks.salesforce.SfPackageCommands.PackageVersionUpdateTask
|
|
840
|
+
description: "Executes the `sf package version update` command against an org"
|
|
841
|
+
group: Salesforce Packages
|
|
842
|
+
get_package_version:
|
|
843
|
+
description: Get package version id from package name and version
|
|
844
|
+
class_path: cumulusci.tasks.salesforce.getPackageVersion.GetPackageVersion
|
|
845
|
+
group: Salesforce Packages
|
|
846
|
+
options:
|
|
847
|
+
package_name: $project_config.project__package__name
|
|
848
|
+
fail_on_error: False
|
|
849
|
+
update_named_credential:
|
|
850
|
+
class_path: cumulusci.tasks.salesforce.update_named_credential.UpdateNamedCredential
|
|
851
|
+
description: Update named credential parameters
|
|
852
|
+
group: Metadata Transformations
|
|
853
|
+
update_external_credential:
|
|
854
|
+
class_path: cumulusci.tasks.salesforce.update_external_credential.UpdateExternalCredential
|
|
855
|
+
description: Update external credential parameters
|
|
856
|
+
group: Metadata Transformations
|
|
857
|
+
|
|
834
858
|
flows:
|
|
835
859
|
ci_beta:
|
|
836
860
|
group: Continuous Integration
|
|
@@ -29,7 +29,7 @@ from cumulusci.core.exceptions import (
|
|
|
29
29
|
VcsException,
|
|
30
30
|
)
|
|
31
31
|
from cumulusci.core.sfdx import convert_sfdx_source
|
|
32
|
-
from cumulusci.core.utils import process_bool_arg
|
|
32
|
+
from cumulusci.core.utils import process_bool_arg, process_list_arg
|
|
33
33
|
from cumulusci.core.versions import PackageType, PackageVersionNumber, VersionTypeEnum
|
|
34
34
|
from cumulusci.salesforce_api.package_zip import (
|
|
35
35
|
BasePackageZipBuilder,
|
|
@@ -163,10 +163,9 @@ class CreatePackageVersion(BaseSalesforceApiTask):
|
|
|
163
163
|
"Defaults to False."
|
|
164
164
|
},
|
|
165
165
|
"dependencies": {
|
|
166
|
-
"description": "The dependencies to use when creating the package version. Defaults to None."
|
|
167
|
-
"Ensure that the dependencies are in the correct format for Package2VersionCreateRequest."
|
|
166
|
+
"description": "The list of dependencies to use when creating the package version. Defaults to None."
|
|
168
167
|
"If not provided, the dependencies will be resolved using the resolution_strategy."
|
|
169
|
-
"The format should be a
|
|
168
|
+
"The format should be a pcakge version Ids i.e '04t...,04t...'"
|
|
170
169
|
},
|
|
171
170
|
}
|
|
172
171
|
|
|
@@ -219,6 +218,14 @@ class CreatePackageVersion(BaseSalesforceApiTask):
|
|
|
219
218
|
if self.options.get("version_number")
|
|
220
219
|
else None
|
|
221
220
|
)
|
|
221
|
+
self.options["dependencies"] = (
|
|
222
|
+
[
|
|
223
|
+
{"subscriberPackageVersionId": x}
|
|
224
|
+
for x in process_list_arg(self.options.get("dependencies"))
|
|
225
|
+
]
|
|
226
|
+
if self.options.get("dependencies")
|
|
227
|
+
else None
|
|
228
|
+
)
|
|
222
229
|
|
|
223
230
|
def _init_task(self):
|
|
224
231
|
self.tooling = get_simple_salesforce_connection(
|
|
@@ -465,8 +472,9 @@ class CreatePackageVersion(BaseSalesforceApiTask):
|
|
|
465
472
|
and not is_dependency
|
|
466
473
|
):
|
|
467
474
|
self.logger.info("Determining dependencies for package")
|
|
468
|
-
dependencies =
|
|
469
|
-
|
|
475
|
+
dependencies = (
|
|
476
|
+
self.options.get("dependencies") or self._get_dependencies()
|
|
477
|
+
)
|
|
470
478
|
if dependencies:
|
|
471
479
|
package_descriptor["dependencies"] = dependencies
|
|
472
480
|
|