pulumi-newrelic 5.23.0a1712988017__py3-none-any.whl → 5.23.0a1713561620__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumi_newrelic/account_management.py +6 -2
- pulumi_newrelic/alert_channel.py +48 -36
- pulumi_newrelic/alert_condition.py +20 -16
- pulumi_newrelic/alert_muting_rule.py +12 -10
- pulumi_newrelic/alert_policy.py +26 -10
- pulumi_newrelic/alert_policy_channel.py +12 -4
- pulumi_newrelic/cloud/aws_govcloud_link_account.py +4 -2
- pulumi_newrelic/cloud/aws_integrations.py +6 -4
- pulumi_newrelic/cloud/aws_link_account.py +4 -2
- pulumi_newrelic/cloud/azure_integrations.py +10 -8
- pulumi_newrelic/cloud/azure_link_account.py +4 -2
- pulumi_newrelic/cloud/gcp_integrations.py +6 -2
- pulumi_newrelic/cloud/gcp_link_account.py +4 -2
- pulumi_newrelic/entity_tags.py +6 -6
- pulumi_newrelic/events_to_metrics_rule.py +2 -0
- pulumi_newrelic/get_application.py +8 -6
- pulumi_newrelic/get_authentication_domain.py +4 -4
- pulumi_newrelic/get_entity.py +28 -16
- pulumi_newrelic/get_group.py +46 -4
- pulumi_newrelic/get_key_transaction.py +8 -6
- pulumi_newrelic/get_obfuscation_expression.py +2 -0
- pulumi_newrelic/get_service_level_alert_helper.py +22 -16
- pulumi_newrelic/get_test_grok_pattern.py +4 -2
- pulumi_newrelic/group.py +40 -30
- pulumi_newrelic/infra_alert_condition.py +28 -18
- pulumi_newrelic/insights/event.py +8 -8
- pulumi_newrelic/log_parsing_rule.py +6 -2
- pulumi_newrelic/monitor_downtime.py +104 -92
- pulumi_newrelic/notification_channel.py +124 -106
- pulumi_newrelic/nrql_alert_condition.py +38 -28
- pulumi_newrelic/nrql_drop_rule.py +6 -6
- pulumi_newrelic/obfuscation_expression.py +2 -0
- pulumi_newrelic/obfuscation_rule.py +4 -0
- pulumi_newrelic/one_dashboard_raw.py +86 -82
- pulumi_newrelic/plugins/application_settings.py +6 -4
- pulumi_newrelic/plugins/workload.py +138 -0
- pulumi_newrelic/service_level.py +38 -32
- pulumi_newrelic/synthetics/alert_condition.py +20 -14
- pulumi_newrelic/synthetics/broken_links_monitor.py +18 -12
- pulumi_newrelic/synthetics/cert_check_monitor.py +16 -10
- pulumi_newrelic/synthetics/monitor.py +48 -36
- pulumi_newrelic/synthetics/multi_location_alert_condition.py +20 -12
- pulumi_newrelic/synthetics/private_location.py +6 -2
- pulumi_newrelic/synthetics/script_monitor.py +34 -22
- pulumi_newrelic/synthetics/secure_credential.py +4 -4
- pulumi_newrelic/synthetics/step_monitor.py +14 -8
- pulumi_newrelic/user.py +8 -6
- pulumi_newrelic/workflow.py +20 -8
- {pulumi_newrelic-5.23.0a1712988017.dist-info → pulumi_newrelic-5.23.0a1713561620.dist-info}/METADATA +1 -1
- pulumi_newrelic-5.23.0a1713561620.dist-info/RECORD +89 -0
- pulumi_newrelic-5.23.0a1712988017.dist-info/RECORD +0 -89
- {pulumi_newrelic-5.23.0a1712988017.dist-info → pulumi_newrelic-5.23.0a1713561620.dist-info}/WHEEL +0 -0
- {pulumi_newrelic-5.23.0a1712988017.dist-info → pulumi_newrelic-5.23.0a1713561620.dist-info}/top_level.txt +0 -0
@@ -604,9 +604,10 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
604
604
|
import pulumi
|
605
605
|
import pulumi_newrelic as newrelic
|
606
606
|
|
607
|
-
foo = newrelic.AlertPolicy("foo")
|
608
|
-
high_disk_usage = newrelic.InfraAlertCondition("
|
607
|
+
foo = newrelic.AlertPolicy("foo", name="foo")
|
608
|
+
high_disk_usage = newrelic.InfraAlertCondition("high_disk_usage",
|
609
609
|
policy_id=foo.id,
|
610
|
+
name="High disk usage",
|
610
611
|
description="Warning if disk usage goes above 80% and critical alert if goes above 90%",
|
611
612
|
type="infra_metric",
|
612
613
|
event="StorageSample",
|
@@ -623,8 +624,9 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
623
624
|
value=80,
|
624
625
|
time_function="all",
|
625
626
|
))
|
626
|
-
high_db_conn_count = newrelic.InfraAlertCondition("
|
627
|
+
high_db_conn_count = newrelic.InfraAlertCondition("high_db_conn_count",
|
627
628
|
policy_id=foo.id,
|
629
|
+
name="High database connection count",
|
628
630
|
description="Critical alert when the number of database connections goes above 90",
|
629
631
|
type="infra_metric",
|
630
632
|
event="DatastoreSample",
|
@@ -637,8 +639,9 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
637
639
|
value=90,
|
638
640
|
time_function="all",
|
639
641
|
))
|
640
|
-
process_not_running = newrelic.InfraAlertCondition("
|
642
|
+
process_not_running = newrelic.InfraAlertCondition("process_not_running",
|
641
643
|
policy_id=foo.id,
|
644
|
+
name="Process not running (/usr/bin/ruby)",
|
642
645
|
description="Critical alert when ruby isn't running",
|
643
646
|
type="infra_process_running",
|
644
647
|
comparison="equal",
|
@@ -648,8 +651,9 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
648
651
|
duration=5,
|
649
652
|
value=0,
|
650
653
|
))
|
651
|
-
host_not_reporting = newrelic.InfraAlertCondition("
|
654
|
+
host_not_reporting = newrelic.InfraAlertCondition("host_not_reporting",
|
652
655
|
policy_id=foo.id,
|
656
|
+
name="Host not reporting",
|
653
657
|
description="Critical alert when the host is not reporting",
|
654
658
|
type="infra_host_not_reporting",
|
655
659
|
where="(hostname LIKE '%frontend%')",
|
@@ -676,9 +680,10 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
676
680
|
import pulumi
|
677
681
|
import pulumi_newrelic as newrelic
|
678
682
|
|
679
|
-
|
680
|
-
foo_infra_alert_condition = newrelic.InfraAlertCondition("
|
681
|
-
policy_id=
|
683
|
+
foo = newrelic.AlertPolicy("foo", name="foo policy")
|
684
|
+
foo_infra_alert_condition = newrelic.InfraAlertCondition("foo",
|
685
|
+
policy_id=foo.id,
|
686
|
+
name="foo infra condition",
|
682
687
|
description="Warning if disk usage goes above 80% and critical alert if goes above 90%",
|
683
688
|
type="infra_metric",
|
684
689
|
event="StorageSample",
|
@@ -695,7 +700,7 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
695
700
|
value=80,
|
696
701
|
time_function="all",
|
697
702
|
))
|
698
|
-
my_condition_entity_tags = newrelic.EntityTags("
|
703
|
+
my_condition_entity_tags = newrelic.EntityTags("my_condition_entity_tags",
|
699
704
|
guid=foo_infra_alert_condition.entity_guid,
|
700
705
|
tags=[
|
701
706
|
newrelic.EntityTagsTagArgs(
|
@@ -761,9 +766,10 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
761
766
|
import pulumi
|
762
767
|
import pulumi_newrelic as newrelic
|
763
768
|
|
764
|
-
foo = newrelic.AlertPolicy("foo")
|
765
|
-
high_disk_usage = newrelic.InfraAlertCondition("
|
769
|
+
foo = newrelic.AlertPolicy("foo", name="foo")
|
770
|
+
high_disk_usage = newrelic.InfraAlertCondition("high_disk_usage",
|
766
771
|
policy_id=foo.id,
|
772
|
+
name="High disk usage",
|
767
773
|
description="Warning if disk usage goes above 80% and critical alert if goes above 90%",
|
768
774
|
type="infra_metric",
|
769
775
|
event="StorageSample",
|
@@ -780,8 +786,9 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
780
786
|
value=80,
|
781
787
|
time_function="all",
|
782
788
|
))
|
783
|
-
high_db_conn_count = newrelic.InfraAlertCondition("
|
789
|
+
high_db_conn_count = newrelic.InfraAlertCondition("high_db_conn_count",
|
784
790
|
policy_id=foo.id,
|
791
|
+
name="High database connection count",
|
785
792
|
description="Critical alert when the number of database connections goes above 90",
|
786
793
|
type="infra_metric",
|
787
794
|
event="DatastoreSample",
|
@@ -794,8 +801,9 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
794
801
|
value=90,
|
795
802
|
time_function="all",
|
796
803
|
))
|
797
|
-
process_not_running = newrelic.InfraAlertCondition("
|
804
|
+
process_not_running = newrelic.InfraAlertCondition("process_not_running",
|
798
805
|
policy_id=foo.id,
|
806
|
+
name="Process not running (/usr/bin/ruby)",
|
799
807
|
description="Critical alert when ruby isn't running",
|
800
808
|
type="infra_process_running",
|
801
809
|
comparison="equal",
|
@@ -805,8 +813,9 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
805
813
|
duration=5,
|
806
814
|
value=0,
|
807
815
|
))
|
808
|
-
host_not_reporting = newrelic.InfraAlertCondition("
|
816
|
+
host_not_reporting = newrelic.InfraAlertCondition("host_not_reporting",
|
809
817
|
policy_id=foo.id,
|
818
|
+
name="Host not reporting",
|
810
819
|
description="Critical alert when the host is not reporting",
|
811
820
|
type="infra_host_not_reporting",
|
812
821
|
where="(hostname LIKE '%frontend%')",
|
@@ -833,9 +842,10 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
833
842
|
import pulumi
|
834
843
|
import pulumi_newrelic as newrelic
|
835
844
|
|
836
|
-
|
837
|
-
foo_infra_alert_condition = newrelic.InfraAlertCondition("
|
838
|
-
policy_id=
|
845
|
+
foo = newrelic.AlertPolicy("foo", name="foo policy")
|
846
|
+
foo_infra_alert_condition = newrelic.InfraAlertCondition("foo",
|
847
|
+
policy_id=foo.id,
|
848
|
+
name="foo infra condition",
|
839
849
|
description="Warning if disk usage goes above 80% and critical alert if goes above 90%",
|
840
850
|
type="infra_metric",
|
841
851
|
event="StorageSample",
|
@@ -852,7 +862,7 @@ class InfraAlertCondition(pulumi.CustomResource):
|
|
852
862
|
value=80,
|
853
863
|
time_function="all",
|
854
864
|
))
|
855
|
-
my_condition_entity_tags = newrelic.EntityTags("
|
865
|
+
my_condition_entity_tags = newrelic.EntityTags("my_condition_entity_tags",
|
856
866
|
guid=foo_infra_alert_condition.entity_guid,
|
857
867
|
tags=[
|
858
868
|
newrelic.EntityTagsTagArgs(
|
@@ -78,6 +78,8 @@ class Event(pulumi.CustomResource):
|
|
78
78
|
import pulumi_newrelic as newrelic
|
79
79
|
|
80
80
|
foo = newrelic.insights.Event("foo", events=[newrelic.insights.EventEventArgs(
|
81
|
+
type="MyEvent",
|
82
|
+
timestamp=1232471100,
|
81
83
|
attributes=[
|
82
84
|
newrelic.insights.EventEventAttributeArgs(
|
83
85
|
key="a_string_attribute",
|
@@ -85,17 +87,15 @@ class Event(pulumi.CustomResource):
|
|
85
87
|
),
|
86
88
|
newrelic.insights.EventEventAttributeArgs(
|
87
89
|
key="an_integer_attribute",
|
88
|
-
type="int",
|
89
90
|
value="42",
|
91
|
+
type="int",
|
90
92
|
),
|
91
93
|
newrelic.insights.EventEventAttributeArgs(
|
92
94
|
key="a_float_attribute",
|
93
|
-
type="float",
|
94
95
|
value="101.1",
|
96
|
+
type="float",
|
95
97
|
),
|
96
98
|
],
|
97
|
-
timestamp=1232471100,
|
98
|
-
type="MyEvent",
|
99
99
|
)])
|
100
100
|
```
|
101
101
|
<!--End PulumiCodeChooser -->
|
@@ -137,6 +137,8 @@ class Event(pulumi.CustomResource):
|
|
137
137
|
import pulumi_newrelic as newrelic
|
138
138
|
|
139
139
|
foo = newrelic.insights.Event("foo", events=[newrelic.insights.EventEventArgs(
|
140
|
+
type="MyEvent",
|
141
|
+
timestamp=1232471100,
|
140
142
|
attributes=[
|
141
143
|
newrelic.insights.EventEventAttributeArgs(
|
142
144
|
key="a_string_attribute",
|
@@ -144,17 +146,15 @@ class Event(pulumi.CustomResource):
|
|
144
146
|
),
|
145
147
|
newrelic.insights.EventEventAttributeArgs(
|
146
148
|
key="an_integer_attribute",
|
147
|
-
type="int",
|
148
149
|
value="42",
|
150
|
+
type="int",
|
149
151
|
),
|
150
152
|
newrelic.insights.EventEventAttributeArgs(
|
151
153
|
key="a_float_attribute",
|
152
|
-
type="float",
|
153
154
|
value="101.1",
|
155
|
+
type="float",
|
154
156
|
),
|
155
157
|
],
|
156
|
-
timestamp=1232471100,
|
157
|
-
type="MyEvent",
|
158
158
|
)])
|
159
159
|
```
|
160
160
|
<!--End PulumiCodeChooser -->
|
@@ -321,9 +321,10 @@ class LogParsingRule(pulumi.CustomResource):
|
|
321
321
|
import pulumi_newrelic as newrelic
|
322
322
|
|
323
323
|
foo = newrelic.LogParsingRule("foo",
|
324
|
+
name="log_parse_rule",
|
324
325
|
attribute="message",
|
325
326
|
enabled=True,
|
326
|
-
grok="sampleattribute='
|
327
|
+
grok="sampleattribute='%{NUMBER:test:int}'",
|
327
328
|
lucene="logtype:linux_messages",
|
328
329
|
nrql="SELECT * FROM Log WHERE logtype = 'linux_messages'")
|
329
330
|
```
|
@@ -341,6 +342,7 @@ class LogParsingRule(pulumi.CustomResource):
|
|
341
342
|
grok = newrelic.get_test_grok_pattern(grok="%{IP:host_ip}",
|
342
343
|
log_lines=["host_ip: 43.3.120.2"])
|
343
344
|
foo = newrelic.LogParsingRule("foo",
|
345
|
+
name="log_parse_rule",
|
344
346
|
attribute="message",
|
345
347
|
enabled=True,
|
346
348
|
grok=grok.grok,
|
@@ -389,9 +391,10 @@ class LogParsingRule(pulumi.CustomResource):
|
|
389
391
|
import pulumi_newrelic as newrelic
|
390
392
|
|
391
393
|
foo = newrelic.LogParsingRule("foo",
|
394
|
+
name="log_parse_rule",
|
392
395
|
attribute="message",
|
393
396
|
enabled=True,
|
394
|
-
grok="sampleattribute='
|
397
|
+
grok="sampleattribute='%{NUMBER:test:int}'",
|
395
398
|
lucene="logtype:linux_messages",
|
396
399
|
nrql="SELECT * FROM Log WHERE logtype = 'linux_messages'")
|
397
400
|
```
|
@@ -409,6 +412,7 @@ class LogParsingRule(pulumi.CustomResource):
|
|
409
412
|
grok = newrelic.get_test_grok_pattern(grok="%{IP:host_ip}",
|
410
413
|
log_lines=["host_ip: 43.3.120.2"])
|
411
414
|
foo = newrelic.LogParsingRule("foo",
|
415
|
+
name="log_parse_rule",
|
412
416
|
attribute="message",
|
413
417
|
enabled=True,
|
414
418
|
grok=grok.grok,
|
@@ -380,21 +380,22 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
380
380
|
import pulumi_newrelic as newrelic
|
381
381
|
|
382
382
|
foo = newrelic.MonitorDowntime("foo",
|
383
|
+
name="Sample Monitor Downtime",
|
384
|
+
monitor_guids=[
|
385
|
+
"<GUID-1>",
|
386
|
+
"<GUID-2>",
|
387
|
+
],
|
388
|
+
mode="WEEKLY",
|
389
|
+
start_time="2023-11-30T10:30:00",
|
390
|
+
end_time="2023-12-10T02:45:30",
|
391
|
+
time_zone="Asia/Kolkata",
|
383
392
|
end_repeat=newrelic.MonitorDowntimeEndRepeatArgs(
|
384
393
|
on_date="2023-12-20",
|
385
394
|
),
|
386
|
-
end_time="2023-12-10T02:45:30",
|
387
395
|
maintenance_days=[
|
388
396
|
"FRIDAY",
|
389
397
|
"SATURDAY",
|
390
|
-
]
|
391
|
-
mode="WEEKLY",
|
392
|
-
monitor_guids=[
|
393
|
-
"<GUID-1>",
|
394
|
-
"<GUID-2>",
|
395
|
-
],
|
396
|
-
start_time="2023-11-30T10:30:00",
|
397
|
-
time_zone="Asia/Kolkata")
|
398
|
+
])
|
398
399
|
```
|
399
400
|
<!--End PulumiCodeChooser -->
|
400
401
|
Monitor Downtimes are of four types; **one-time**, **daily**, **weekly** and **monthly**. For more details on each type and the right arguments that go with them, check out the argument reference and examples sections below.
|
@@ -410,14 +411,15 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
410
411
|
import pulumi
|
411
412
|
import pulumi_newrelic as newrelic
|
412
413
|
|
413
|
-
sample_one_time_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
414
|
-
|
415
|
-
mode="ONE_TIME",
|
414
|
+
sample_one_time_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_one_time_newrelic_monitor_downtime",
|
415
|
+
name="Sample One Time Monitor Downtime",
|
416
416
|
monitor_guids=[
|
417
417
|
"<GUID-1>",
|
418
418
|
"<GUID-2>",
|
419
419
|
],
|
420
|
+
mode="ONE_TIME",
|
420
421
|
start_time="2023-12-04T10:15:00",
|
422
|
+
end_time="2024-01-04T16:24:30",
|
421
423
|
time_zone="America/Los_Angeles")
|
422
424
|
```
|
423
425
|
<!--End PulumiCodeChooser -->
|
@@ -433,17 +435,18 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
433
435
|
import pulumi
|
434
436
|
import pulumi_newrelic as newrelic
|
435
437
|
|
436
|
-
sample_daily_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
437
|
-
|
438
|
-
on_date="2023-12-25",
|
439
|
-
),
|
440
|
-
end_time="2024-01-04T07:15:00",
|
441
|
-
mode="DAILY",
|
438
|
+
sample_daily_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_daily_newrelic_monitor_downtime",
|
439
|
+
name="Sample Daily Monitor Downtime",
|
442
440
|
monitor_guids=[
|
443
441
|
"<GUID-1>",
|
444
442
|
"<GUID-2>",
|
445
443
|
],
|
444
|
+
mode="DAILY",
|
446
445
|
start_time="2023-12-04T18:15:00",
|
446
|
+
end_time="2024-01-04T07:15:00",
|
447
|
+
end_repeat=newrelic.MonitorDowntimeEndRepeatArgs(
|
448
|
+
on_date="2023-12-25",
|
449
|
+
),
|
447
450
|
time_zone="Asia/Kolkata")
|
448
451
|
```
|
449
452
|
<!--End PulumiCodeChooser -->
|
@@ -459,19 +462,20 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
459
462
|
import pulumi
|
460
463
|
import pulumi_newrelic as newrelic
|
461
464
|
|
462
|
-
sample_weekly_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
463
|
-
|
464
|
-
maintenance_days=[
|
465
|
-
"SATURDAY",
|
466
|
-
"SUNDAY",
|
467
|
-
],
|
468
|
-
mode="WEEKLY",
|
465
|
+
sample_weekly_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_weekly_newrelic_monitor_downtime",
|
466
|
+
name="Sample Weekly Monitor Downtime",
|
469
467
|
monitor_guids=[
|
470
468
|
"<GUID-1>",
|
471
469
|
"<GUID-2>",
|
472
470
|
],
|
471
|
+
mode="WEEKLY",
|
473
472
|
start_time="2023-12-04T14:15:00",
|
474
|
-
|
473
|
+
end_time="2024-01-04T23:55:00",
|
474
|
+
time_zone="US/Hawaii",
|
475
|
+
maintenance_days=[
|
476
|
+
"SATURDAY",
|
477
|
+
"SUNDAY",
|
478
|
+
])
|
475
479
|
```
|
476
480
|
<!--End PulumiCodeChooser -->
|
477
481
|
|
@@ -486,24 +490,25 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
486
490
|
import pulumi
|
487
491
|
import pulumi_newrelic as newrelic
|
488
492
|
|
489
|
-
sample_monthly_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
493
|
+
sample_monthly_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_monthly_newrelic_monitor_downtime",
|
494
|
+
name="Sample Monthly Monitor Downtime",
|
495
|
+
monitor_guids=[
|
496
|
+
"<GUID-1>",
|
497
|
+
"<GUID-2>",
|
498
|
+
],
|
499
|
+
mode="MONTHLY",
|
500
|
+
start_time="2023-12-04T07:15:00",
|
501
|
+
end_time="2024-01-04T19:15:00",
|
490
502
|
end_repeat=newrelic.MonitorDowntimeEndRepeatArgs(
|
491
503
|
on_repeat=6,
|
492
504
|
),
|
493
|
-
|
505
|
+
time_zone="Europe/Dublin",
|
494
506
|
frequency=newrelic.MonitorDowntimeFrequencyArgs(
|
495
507
|
days_of_week=newrelic.MonitorDowntimeFrequencyDaysOfWeekArgs(
|
496
508
|
ordinal_day_of_month="SECOND",
|
497
509
|
week_day="SATURDAY",
|
498
510
|
),
|
499
|
-
)
|
500
|
-
mode="MONTHLY",
|
501
|
-
monitor_guids=[
|
502
|
-
"<GUID-1>",
|
503
|
-
"<GUID-2>",
|
504
|
-
],
|
505
|
-
start_time="2023-12-04T07:15:00",
|
506
|
-
time_zone="Europe/Dublin")
|
511
|
+
))
|
507
512
|
```
|
508
513
|
<!--End PulumiCodeChooser -->
|
509
514
|
However, the `frequency` block in monthly monitor downtimes may also be specified with its other nested argument, `days_of_month`, as shown in the example below - though both `days_of_month` and `days_of_week` cannot be specified together, as they are mutually exclusive.
|
@@ -512,11 +517,19 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
512
517
|
import pulumi
|
513
518
|
import pulumi_newrelic as newrelic
|
514
519
|
|
515
|
-
sample_monthly_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
520
|
+
sample_monthly_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_monthly_newrelic_monitor_downtime",
|
521
|
+
name="Sample Monthly Monitor Downtime",
|
522
|
+
monitor_guids=[
|
523
|
+
"<GUID-1>",
|
524
|
+
"<GUID-2>",
|
525
|
+
],
|
526
|
+
mode="MONTHLY",
|
527
|
+
start_time="2023-12-04T07:15:00",
|
528
|
+
end_time="2024-01-04T19:15:00",
|
516
529
|
end_repeat=newrelic.MonitorDowntimeEndRepeatArgs(
|
517
530
|
on_repeat=6,
|
518
531
|
),
|
519
|
-
|
532
|
+
time_zone="Europe/Dublin",
|
520
533
|
frequency=newrelic.MonitorDowntimeFrequencyArgs(
|
521
534
|
days_of_months=[
|
522
535
|
3,
|
@@ -524,14 +537,7 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
524
537
|
14,
|
525
538
|
23,
|
526
539
|
],
|
527
|
-
)
|
528
|
-
mode="MONTHLY",
|
529
|
-
monitor_guids=[
|
530
|
-
"<GUID-1>",
|
531
|
-
"<GUID-2>",
|
532
|
-
],
|
533
|
-
start_time="2023-12-04T07:15:00",
|
534
|
-
time_zone="Europe/Dublin")
|
540
|
+
))
|
535
541
|
```
|
536
542
|
<!--End PulumiCodeChooser -->
|
537
543
|
|
@@ -577,21 +583,22 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
577
583
|
import pulumi_newrelic as newrelic
|
578
584
|
|
579
585
|
foo = newrelic.MonitorDowntime("foo",
|
586
|
+
name="Sample Monitor Downtime",
|
587
|
+
monitor_guids=[
|
588
|
+
"<GUID-1>",
|
589
|
+
"<GUID-2>",
|
590
|
+
],
|
591
|
+
mode="WEEKLY",
|
592
|
+
start_time="2023-11-30T10:30:00",
|
593
|
+
end_time="2023-12-10T02:45:30",
|
594
|
+
time_zone="Asia/Kolkata",
|
580
595
|
end_repeat=newrelic.MonitorDowntimeEndRepeatArgs(
|
581
596
|
on_date="2023-12-20",
|
582
597
|
),
|
583
|
-
end_time="2023-12-10T02:45:30",
|
584
598
|
maintenance_days=[
|
585
599
|
"FRIDAY",
|
586
600
|
"SATURDAY",
|
587
|
-
]
|
588
|
-
mode="WEEKLY",
|
589
|
-
monitor_guids=[
|
590
|
-
"<GUID-1>",
|
591
|
-
"<GUID-2>",
|
592
|
-
],
|
593
|
-
start_time="2023-11-30T10:30:00",
|
594
|
-
time_zone="Asia/Kolkata")
|
601
|
+
])
|
595
602
|
```
|
596
603
|
<!--End PulumiCodeChooser -->
|
597
604
|
Monitor Downtimes are of four types; **one-time**, **daily**, **weekly** and **monthly**. For more details on each type and the right arguments that go with them, check out the argument reference and examples sections below.
|
@@ -607,14 +614,15 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
607
614
|
import pulumi
|
608
615
|
import pulumi_newrelic as newrelic
|
609
616
|
|
610
|
-
sample_one_time_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
611
|
-
|
612
|
-
mode="ONE_TIME",
|
617
|
+
sample_one_time_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_one_time_newrelic_monitor_downtime",
|
618
|
+
name="Sample One Time Monitor Downtime",
|
613
619
|
monitor_guids=[
|
614
620
|
"<GUID-1>",
|
615
621
|
"<GUID-2>",
|
616
622
|
],
|
623
|
+
mode="ONE_TIME",
|
617
624
|
start_time="2023-12-04T10:15:00",
|
625
|
+
end_time="2024-01-04T16:24:30",
|
618
626
|
time_zone="America/Los_Angeles")
|
619
627
|
```
|
620
628
|
<!--End PulumiCodeChooser -->
|
@@ -630,17 +638,18 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
630
638
|
import pulumi
|
631
639
|
import pulumi_newrelic as newrelic
|
632
640
|
|
633
|
-
sample_daily_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
634
|
-
|
635
|
-
on_date="2023-12-25",
|
636
|
-
),
|
637
|
-
end_time="2024-01-04T07:15:00",
|
638
|
-
mode="DAILY",
|
641
|
+
sample_daily_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_daily_newrelic_monitor_downtime",
|
642
|
+
name="Sample Daily Monitor Downtime",
|
639
643
|
monitor_guids=[
|
640
644
|
"<GUID-1>",
|
641
645
|
"<GUID-2>",
|
642
646
|
],
|
647
|
+
mode="DAILY",
|
643
648
|
start_time="2023-12-04T18:15:00",
|
649
|
+
end_time="2024-01-04T07:15:00",
|
650
|
+
end_repeat=newrelic.MonitorDowntimeEndRepeatArgs(
|
651
|
+
on_date="2023-12-25",
|
652
|
+
),
|
644
653
|
time_zone="Asia/Kolkata")
|
645
654
|
```
|
646
655
|
<!--End PulumiCodeChooser -->
|
@@ -656,19 +665,20 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
656
665
|
import pulumi
|
657
666
|
import pulumi_newrelic as newrelic
|
658
667
|
|
659
|
-
sample_weekly_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
660
|
-
|
661
|
-
maintenance_days=[
|
662
|
-
"SATURDAY",
|
663
|
-
"SUNDAY",
|
664
|
-
],
|
665
|
-
mode="WEEKLY",
|
668
|
+
sample_weekly_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_weekly_newrelic_monitor_downtime",
|
669
|
+
name="Sample Weekly Monitor Downtime",
|
666
670
|
monitor_guids=[
|
667
671
|
"<GUID-1>",
|
668
672
|
"<GUID-2>",
|
669
673
|
],
|
674
|
+
mode="WEEKLY",
|
670
675
|
start_time="2023-12-04T14:15:00",
|
671
|
-
|
676
|
+
end_time="2024-01-04T23:55:00",
|
677
|
+
time_zone="US/Hawaii",
|
678
|
+
maintenance_days=[
|
679
|
+
"SATURDAY",
|
680
|
+
"SUNDAY",
|
681
|
+
])
|
672
682
|
```
|
673
683
|
<!--End PulumiCodeChooser -->
|
674
684
|
|
@@ -683,24 +693,25 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
683
693
|
import pulumi
|
684
694
|
import pulumi_newrelic as newrelic
|
685
695
|
|
686
|
-
sample_monthly_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
696
|
+
sample_monthly_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_monthly_newrelic_monitor_downtime",
|
697
|
+
name="Sample Monthly Monitor Downtime",
|
698
|
+
monitor_guids=[
|
699
|
+
"<GUID-1>",
|
700
|
+
"<GUID-2>",
|
701
|
+
],
|
702
|
+
mode="MONTHLY",
|
703
|
+
start_time="2023-12-04T07:15:00",
|
704
|
+
end_time="2024-01-04T19:15:00",
|
687
705
|
end_repeat=newrelic.MonitorDowntimeEndRepeatArgs(
|
688
706
|
on_repeat=6,
|
689
707
|
),
|
690
|
-
|
708
|
+
time_zone="Europe/Dublin",
|
691
709
|
frequency=newrelic.MonitorDowntimeFrequencyArgs(
|
692
710
|
days_of_week=newrelic.MonitorDowntimeFrequencyDaysOfWeekArgs(
|
693
711
|
ordinal_day_of_month="SECOND",
|
694
712
|
week_day="SATURDAY",
|
695
713
|
),
|
696
|
-
)
|
697
|
-
mode="MONTHLY",
|
698
|
-
monitor_guids=[
|
699
|
-
"<GUID-1>",
|
700
|
-
"<GUID-2>",
|
701
|
-
],
|
702
|
-
start_time="2023-12-04T07:15:00",
|
703
|
-
time_zone="Europe/Dublin")
|
714
|
+
))
|
704
715
|
```
|
705
716
|
<!--End PulumiCodeChooser -->
|
706
717
|
However, the `frequency` block in monthly monitor downtimes may also be specified with its other nested argument, `days_of_month`, as shown in the example below - though both `days_of_month` and `days_of_week` cannot be specified together, as they are mutually exclusive.
|
@@ -709,11 +720,19 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
709
720
|
import pulumi
|
710
721
|
import pulumi_newrelic as newrelic
|
711
722
|
|
712
|
-
sample_monthly_newrelic_monitor_downtime = newrelic.MonitorDowntime("
|
723
|
+
sample_monthly_newrelic_monitor_downtime = newrelic.MonitorDowntime("sample_monthly_newrelic_monitor_downtime",
|
724
|
+
name="Sample Monthly Monitor Downtime",
|
725
|
+
monitor_guids=[
|
726
|
+
"<GUID-1>",
|
727
|
+
"<GUID-2>",
|
728
|
+
],
|
729
|
+
mode="MONTHLY",
|
730
|
+
start_time="2023-12-04T07:15:00",
|
731
|
+
end_time="2024-01-04T19:15:00",
|
713
732
|
end_repeat=newrelic.MonitorDowntimeEndRepeatArgs(
|
714
733
|
on_repeat=6,
|
715
734
|
),
|
716
|
-
|
735
|
+
time_zone="Europe/Dublin",
|
717
736
|
frequency=newrelic.MonitorDowntimeFrequencyArgs(
|
718
737
|
days_of_months=[
|
719
738
|
3,
|
@@ -721,14 +740,7 @@ class MonitorDowntime(pulumi.CustomResource):
|
|
721
740
|
14,
|
722
741
|
23,
|
723
742
|
],
|
724
|
-
)
|
725
|
-
mode="MONTHLY",
|
726
|
-
monitor_guids=[
|
727
|
-
"<GUID-1>",
|
728
|
-
"<GUID-2>",
|
729
|
-
],
|
730
|
-
start_time="2023-12-04T07:15:00",
|
731
|
-
time_zone="Europe/Dublin")
|
743
|
+
))
|
732
744
|
```
|
733
745
|
<!--End PulumiCodeChooser -->
|
734
746
|
|