qontract-reconcile 0.10.1rc1030__py3-none-any.whl → 0.10.1rc1032__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {qontract_reconcile-0.10.1rc1030.dist-info → qontract_reconcile-0.10.1rc1032.dist-info}/METADATA +1 -1
- {qontract_reconcile-0.10.1rc1030.dist-info → qontract_reconcile-0.10.1rc1032.dist-info}/RECORD +46 -46
- reconcile/aws_support_cases_sos.py +1 -1
- reconcile/cli.py +1 -1
- reconcile/dynatrace_token_provider/integration.py +2 -2
- reconcile/endpoints_discovery/merge_request.py +10 -6
- reconcile/github_org.py +1 -1
- reconcile/gitlab_permissions.py +22 -130
- reconcile/ocm_aws_infrastructure_access.py +2 -2
- reconcile/openshift_base.py +2 -2
- reconcile/openshift_cluster_bots.py +1 -1
- reconcile/openshift_namespaces.py +1 -1
- reconcile/openshift_resources_base.py +2 -2
- reconcile/openshift_saas_deploy_change_tester.py +1 -1
- reconcile/openshift_saas_deploy_trigger_base.py +1 -1
- reconcile/query_validator.py +2 -2
- reconcile/terraform_aws_route53.py +2 -2
- reconcile/test/test_closedbox_endpoint_monitoring.py +5 -5
- reconcile/test/test_gitlab_permissions.py +9 -97
- reconcile/test/test_openshift_resource.py +2 -2
- reconcile/test/test_openshift_resources_base.py +7 -7
- reconcile/test/test_openshift_serviceaccount_tokens.py +7 -5
- reconcile/test/test_terraform_vpc_peerings.py +2 -2
- reconcile/utils/acs/base.py +1 -1
- reconcile/utils/acs/notifiers.py +1 -1
- reconcile/utils/aws_api.py +1 -1
- reconcile/utils/config.py +2 -4
- reconcile/utils/external_resources.py +1 -1
- reconcile/utils/gitlab_api.py +43 -7
- reconcile/utils/gql.py +1 -1
- reconcile/utils/jinja2/utils.py +1 -1
- reconcile/utils/jobcontroller/controller.py +3 -3
- reconcile/utils/mr/__init__.py +8 -8
- reconcile/utils/ocm/__init__.py +2 -2
- reconcile/utils/saasherder/__init__.py +1 -1
- reconcile/utils/saasherder/saasherder.py +5 -5
- reconcile/utils/state.py +2 -2
- reconcile/utils/terraform_client.py +1 -1
- reconcile/utils/terrascript_aws_client.py +1 -1
- reconcile/utils/unleash/__init__.py +2 -2
- reconcile/utils/vcs.py +2 -2
- tools/app_interface_reporter.py +7 -7
- tools/qontract_cli.py +21 -45
- {qontract_reconcile-0.10.1rc1030.dist-info → qontract_reconcile-0.10.1rc1032.dist-info}/WHEEL +0 -0
- {qontract_reconcile-0.10.1rc1030.dist-info → qontract_reconcile-0.10.1rc1032.dist-info}/entry_points.txt +0 -0
- {qontract_reconcile-0.10.1rc1030.dist-info → qontract_reconcile-0.10.1rc1032.dist-info}/top_level.txt +0 -0
@@ -147,7 +147,7 @@ def collect_compare_diffs(
|
|
147
147
|
# that changes another saas file was merged but is not yet
|
148
148
|
# reflected in the baseline graphql endpoint.
|
149
149
|
# https://issues.redhat.com/browse/APPSRE-3029
|
150
|
-
logging.debug(f"Diff not found in changed paths, skipping: {
|
150
|
+
logging.debug(f"Diff not found in changed paths, skipping: {d!s}")
|
151
151
|
continue
|
152
152
|
for c in current_state:
|
153
153
|
if d.saas_file_name != c.saas_file_name:
|
reconcile/query_validator.py
CHANGED
@@ -42,13 +42,13 @@ def run(dry_run):
|
|
42
42
|
gqlapi.query(gql.get_resource(q["path"])["content"])
|
43
43
|
except (gql.GqlGetResourceError, gql.GqlApiError) as e:
|
44
44
|
error = True
|
45
|
-
logging.error(f"query validation error in {qv_name}: {
|
45
|
+
logging.error(f"query validation error in {qv_name}: {e!s}")
|
46
46
|
for r in qv.get("resources") or []:
|
47
47
|
try:
|
48
48
|
fetch_openshift_resource(r, qv, settings=settings, skip_validation=True)
|
49
49
|
except Exception as e:
|
50
50
|
error = True
|
51
|
-
logging.error(f"query validation error in {qv_name}: {
|
51
|
+
logging.error(f"query validation error in {qv_name}: {e!s}")
|
52
52
|
|
53
53
|
if error:
|
54
54
|
sys.exit(ExitCodes.ERROR)
|
@@ -147,9 +147,9 @@ def build_desired_state(
|
|
147
147
|
sys.exit(ExitCodes.ERROR)
|
148
148
|
tf_zone_spec = tf_zone_specs[0]
|
149
149
|
tf_zone_account_name = tf_zone_spec.provisioner_name
|
150
|
-
zone_account =
|
150
|
+
zone_account = next(
|
151
151
|
a for a in all_accounts if a["name"] == tf_zone_account_name
|
152
|
-
|
152
|
+
)
|
153
153
|
tf_zone_region = (
|
154
154
|
tf_zone_spec.resource.get("region")
|
155
155
|
or zone_account["resourcesDefaultRegion"]
|
@@ -43,7 +43,7 @@ def test_blackbox_exporter_endpoint_loading(mocker):
|
|
43
43
|
assert endpoints is not None
|
44
44
|
assert len(endpoints) == 1
|
45
45
|
|
46
|
-
provider =
|
46
|
+
provider = next(iter(endpoints.keys()))
|
47
47
|
assert provider.provider == BLACKBOX_EXPORTER_PROVIDER
|
48
48
|
provider_endpoints = endpoints.get(provider)
|
49
49
|
assert provider_endpoints is not None
|
@@ -74,7 +74,7 @@ def test_blackbox_exporter_probe_building(mocker):
|
|
74
74
|
endpoints = get_endpoints(BLACKBOX_EXPORTER_PROVIDER)
|
75
75
|
assert len(endpoints) == 1
|
76
76
|
|
77
|
-
provider =
|
77
|
+
provider = next(iter(endpoints.keys()))
|
78
78
|
provider_endpoints = endpoints.get(provider)
|
79
79
|
assert provider_endpoints is not None
|
80
80
|
probe_resource = blackbox_exporter_probe_builder(provider, provider_endpoints)
|
@@ -108,7 +108,7 @@ def test_signalfx_probe_building(mocker):
|
|
108
108
|
endpoints = get_endpoints(SIGNALFX_PROVIDER)
|
109
109
|
assert len(endpoints) == 1
|
110
110
|
|
111
|
-
provider =
|
111
|
+
provider = next(iter(endpoints.keys()))
|
112
112
|
provider_endpoints = endpoints.get(provider)
|
113
113
|
assert provider_endpoints is not None
|
114
114
|
probe_resource = signalfx_probe_builder(provider, provider_endpoints)
|
@@ -157,7 +157,7 @@ def test_blackbox_exporter_filling_desired_state(mocker):
|
|
157
157
|
add_desired_mock = mocker.patch.object(ResourceInventory, "add_desired")
|
158
158
|
|
159
159
|
endpoints = get_endpoints(BLACKBOX_EXPORTER_PROVIDER)
|
160
|
-
provider =
|
160
|
+
provider = next(iter(endpoints.keys()))
|
161
161
|
probe = blackbox_exporter_probe_builder(provider, endpoints[provider])
|
162
162
|
assert probe is not None
|
163
163
|
fill_desired_state(provider, probe, ResourceInventory())
|
@@ -178,7 +178,7 @@ def test_signalfx_filling_desired_state(mocker):
|
|
178
178
|
add_desired_mock = mocker.patch.object(ResourceInventory, "add_desired")
|
179
179
|
|
180
180
|
endpoints = get_endpoints(SIGNALFX_PROVIDER)
|
181
|
-
provider =
|
181
|
+
provider = next(iter(endpoints.keys()))
|
182
182
|
probe = signalfx_probe_builder(provider, endpoints[provider])
|
183
183
|
assert probe is not None
|
184
184
|
fill_desired_state(provider, probe, ResourceInventory())
|
@@ -1,17 +1,7 @@
|
|
1
1
|
from unittest.mock import MagicMock, create_autospec
|
2
2
|
|
3
3
|
import pytest
|
4
|
-
from gitlab.v4.objects import
|
5
|
-
CurrentUser,
|
6
|
-
Group,
|
7
|
-
GroupMember,
|
8
|
-
GroupProjectManager,
|
9
|
-
Project,
|
10
|
-
ProjectMember,
|
11
|
-
ProjectMemberAllManager,
|
12
|
-
SharedProject,
|
13
|
-
SharedProjectManager,
|
14
|
-
)
|
4
|
+
from gitlab.v4.objects import CurrentUser, GroupMember
|
15
5
|
from pytest_mock import MockerFixture
|
16
6
|
|
17
7
|
from reconcile import gitlab_permissions
|
@@ -33,7 +23,6 @@ def mocked_gl() -> MagicMock:
|
|
33
23
|
gl.server = "test_server"
|
34
24
|
gl.user = create_autospec(CurrentUser)
|
35
25
|
gl.user.username = "test_name"
|
36
|
-
gl.user.id = 1234
|
37
26
|
return gl
|
38
27
|
|
39
28
|
|
@@ -60,25 +49,13 @@ def test_run_share_with_group(
|
|
60
49
|
mocker.patch(
|
61
50
|
"reconcile.gitlab_permissions.get_feature_toggle_state"
|
62
51
|
).return_value = True
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
group.shared_projects = create_autospec(SharedProjectManager)
|
67
|
-
mocked_gl.get_items.side_effect = [
|
68
|
-
[],
|
69
|
-
[],
|
70
|
-
]
|
71
|
-
mocked_gl.get_group.return_value = group
|
72
|
-
mocked_gl.get_access_level.return_value = 40
|
73
|
-
project = create_autospec(Project, web_url="https://test.com")
|
74
|
-
project.members_all = create_autospec(ProjectMemberAllManager)
|
75
|
-
project.members_all.get.return_value = create_autospec(
|
76
|
-
ProjectMember, id=mocked_gl.user.id, access_level=40
|
52
|
+
mocked_gl.get_group_id_and_shared_projects.return_value = (
|
53
|
+
1234,
|
54
|
+
{"https://test.com": {"group_access_level": 30}},
|
77
55
|
)
|
78
|
-
mocked_gl.get_project.return_value = project
|
79
56
|
gitlab_permissions.run(False, thread_pool_size=1)
|
80
57
|
mocked_gl.share_project_with_group.assert_called_once_with(
|
81
|
-
|
58
|
+
repo_url="https://test-gitlab.com", group_id=1234, dry_run=False
|
82
59
|
)
|
83
60
|
|
84
61
|
|
@@ -89,76 +66,11 @@ def test_run_reshare_with_group(
|
|
89
66
|
mocker.patch(
|
90
67
|
"reconcile.gitlab_permissions.get_feature_toggle_state"
|
91
68
|
).return_value = True
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
group.shared_projects = create_autospec(SharedProjectManager)
|
96
|
-
mocked_gl.get_items.side_effect = [
|
97
|
-
[],
|
98
|
-
[
|
99
|
-
create_autospec(
|
100
|
-
SharedProject,
|
101
|
-
web_url="https://test-gitlab.com",
|
102
|
-
shared_with_groups=[
|
103
|
-
{
|
104
|
-
"group_access_level": 30,
|
105
|
-
"group_name": "app-sre",
|
106
|
-
"group_id": 1234,
|
107
|
-
}
|
108
|
-
],
|
109
|
-
)
|
110
|
-
],
|
111
|
-
]
|
112
|
-
mocked_gl.get_group.return_value = group
|
113
|
-
mocked_gl.get_access_level.return_value = 40
|
114
|
-
project = create_autospec(Project, web_url="https://test-gitlab.com")
|
115
|
-
project.members_all = create_autospec(ProjectMemberAllManager)
|
116
|
-
project.members_all.get.return_value = create_autospec(
|
117
|
-
ProjectMember, id=mocked_gl.user.id, access_level=40
|
69
|
+
mocked_gl.get_group_id_and_shared_projects.return_value = (
|
70
|
+
1234,
|
71
|
+
{"https://test-gitlab.com": {"group_access_level": 30}},
|
118
72
|
)
|
119
|
-
mocked_gl.get_project.return_value = project
|
120
73
|
gitlab_permissions.run(False, thread_pool_size=1)
|
121
74
|
mocked_gl.share_project_with_group.assert_called_once_with(
|
122
|
-
|
123
|
-
)
|
124
|
-
|
125
|
-
|
126
|
-
def test_run_share_with_group_failed(
|
127
|
-
mocked_queries: MagicMock, mocker: MockerFixture, mocked_gl: MagicMock
|
128
|
-
) -> None:
|
129
|
-
mocker.patch("reconcile.gitlab_permissions.GitLabApi").return_value = mocked_gl
|
130
|
-
mocker.patch(
|
131
|
-
"reconcile.gitlab_permissions.get_feature_toggle_state"
|
132
|
-
).return_value = True
|
133
|
-
group = create_autospec(Group, id=1234)
|
134
|
-
group.name = "app-sre"
|
135
|
-
group.projects = create_autospec(GroupProjectManager)
|
136
|
-
group.shared_projects = create_autospec(SharedProjectManager)
|
137
|
-
group.projects = create_autospec(GroupProjectManager)
|
138
|
-
group.shared_projects = create_autospec(SharedProjectManager)
|
139
|
-
mocked_gl.get_items.side_effect = [
|
140
|
-
[],
|
141
|
-
[
|
142
|
-
create_autospec(
|
143
|
-
SharedProject,
|
144
|
-
web_url="https://test-gitlab.com",
|
145
|
-
shared_with_groups=[
|
146
|
-
{
|
147
|
-
"group_access_level": 30,
|
148
|
-
"group_name": "app-sre",
|
149
|
-
"group_id": 134,
|
150
|
-
}
|
151
|
-
],
|
152
|
-
)
|
153
|
-
],
|
154
|
-
]
|
155
|
-
mocked_gl.get_group.return_value = group
|
156
|
-
mocked_gl.get_access_level.return_value = 40
|
157
|
-
project = create_autospec(Project)
|
158
|
-
project.members_all = create_autospec(ProjectMemberAllManager)
|
159
|
-
project.members_all.get.return_value = create_autospec(
|
160
|
-
ProjectMember, id=mocked_gl.user.id, access_level=10
|
75
|
+
repo_url="https://test-gitlab.com", group_id=1234, dry_run=False, reshare=True
|
161
76
|
)
|
162
|
-
mocked_gl.get_project.return_value = project
|
163
|
-
with pytest.raises(Exception):
|
164
|
-
gitlab_permissions.run(False, thread_pool_size=1)
|
@@ -391,7 +391,7 @@ def test_resource_inventory_add_desired_resource_short_kind():
|
|
391
391
|
|
392
392
|
assert len(list(ri)) == 1
|
393
393
|
|
394
|
-
cluster_name, namespace_name, resource_type, resource =
|
394
|
+
cluster_name, namespace_name, resource_type, resource = next(iter(ri))
|
395
395
|
assert cluster_name == "cl"
|
396
396
|
assert namespace_name == "ns"
|
397
397
|
assert resource_type == "Deployment"
|
@@ -412,7 +412,7 @@ def test_resource_inventory_add_desired_resource_long_kind():
|
|
412
412
|
|
413
413
|
assert len(list(ri)) == 1
|
414
414
|
|
415
|
-
cluster_name, namespace_name, resource_type, resource =
|
415
|
+
cluster_name, namespace_name, resource_type, resource = next(iter(ri))
|
416
416
|
assert cluster_name == "cl"
|
417
417
|
assert namespace_name == "ns"
|
418
418
|
assert resource_type == "Deployment.apps"
|
@@ -155,7 +155,7 @@ def test_fetch_current_state_ri_initialized(oc_cs1: oc.OCClient, tmpl1: dict[str
|
|
155
155
|
resource_names=[],
|
156
156
|
)
|
157
157
|
|
158
|
-
_, _, _, resource =
|
158
|
+
_, _, _, resource = next(iter(ri))
|
159
159
|
assert len(resource["current"]) == 1
|
160
160
|
assert "tmpl1" in resource["current"]
|
161
161
|
assert resource["current"]["tmpl1"].kind == "Template"
|
@@ -178,7 +178,7 @@ def test_fetch_current_state_kind_not_supported(
|
|
178
178
|
resource_names=[],
|
179
179
|
)
|
180
180
|
|
181
|
-
_, _, _, resource =
|
181
|
+
_, _, _, resource = next(iter(ri))
|
182
182
|
assert len(resource["current"]) == 0
|
183
183
|
|
184
184
|
|
@@ -195,7 +195,7 @@ def test_fetch_current_state_long_kind(oc_cs1: oc.OCClient, tmpl1: dict[str, Any
|
|
195
195
|
resource_names=[],
|
196
196
|
)
|
197
197
|
|
198
|
-
_, _, _, resource =
|
198
|
+
_, _, _, resource = next(iter(ri))
|
199
199
|
assert len(resource["current"]) == 1
|
200
200
|
assert "tmpl1" in resource["current"]
|
201
201
|
assert resource["current"]["tmpl1"].kind == "Template"
|
@@ -218,7 +218,7 @@ def test_fetch_current_state_long_kind_not_supported(
|
|
218
218
|
resource_names=[],
|
219
219
|
)
|
220
220
|
|
221
|
-
_, _, _, resource =
|
221
|
+
_, _, _, resource = next(iter(ri))
|
222
222
|
assert len(resource["current"]) == 0
|
223
223
|
|
224
224
|
|
@@ -227,7 +227,7 @@ def test_fetch_states(current_state_spec: CurrentStateSpec, tmpl1: dict[str, Any
|
|
227
227
|
ri.initialize_resource_type("cs1", "ns1", "Template")
|
228
228
|
current_state_spec.oc.get_items = lambda kind, **kwargs: [tmpl1] # type: ignore[method-assign]
|
229
229
|
orb.fetch_states(ri=ri, spec=current_state_spec)
|
230
|
-
_, _, _, resource =
|
230
|
+
_, _, _, resource = next(iter(ri))
|
231
231
|
assert len(resource["current"]) == 1
|
232
232
|
assert "tmpl1" in resource["current"]
|
233
233
|
assert resource["current"]["tmpl1"].kind == "Template"
|
@@ -238,7 +238,7 @@ def test_fetch_states_unknown_kind(current_state_spec: CurrentStateSpec):
|
|
238
238
|
ri = ResourceInventory()
|
239
239
|
ri.initialize_resource_type("cs1", "ns1", "UnknownKind")
|
240
240
|
orb.fetch_states(ri=ri, spec=current_state_spec)
|
241
|
-
_, _, _, resource =
|
241
|
+
_, _, _, resource = next(iter(ri))
|
242
242
|
assert len(resource["current"]) == 0
|
243
243
|
|
244
244
|
|
@@ -250,7 +250,7 @@ def test_fetch_states_oc_error(current_state_spec: CurrentStateSpec):
|
|
250
250
|
ri.initialize_resource_type("cs1", "ns1", "Template")
|
251
251
|
orb.fetch_states(ri=ri, spec=current_state_spec)
|
252
252
|
assert ri.has_error_registered("cs1")
|
253
|
-
_, _, _, resource =
|
253
|
+
_, _, _, resource = next(iter(ri))
|
254
254
|
assert len(resource["current"]) == 0
|
255
255
|
|
256
256
|
|
@@ -264,11 +264,13 @@ def test_openshift_serviceaccount_tokens__fetch_desired_state_create_token(
|
|
264
264
|
)
|
265
265
|
== 1
|
266
266
|
)
|
267
|
-
r =
|
268
|
-
|
269
|
-
"
|
270
|
-
|
271
|
-
|
267
|
+
r = next(
|
268
|
+
iter(
|
269
|
+
ri._clusters["cluster"]["with-openshift-serviceaccount-tokens"]["Secret"][
|
270
|
+
"desired"
|
271
|
+
].values()
|
272
|
+
)
|
273
|
+
)
|
272
274
|
assert r.body["type"] == "kubernetes.io/service-account-token"
|
273
275
|
|
274
276
|
|
@@ -117,7 +117,7 @@ def build_cluster(
|
|
117
117
|
cluster["awsInfrastructureManagementAccounts"] = []
|
118
118
|
if read_only_accounts:
|
119
119
|
for acc in read_only_accounts:
|
120
|
-
cluster["awsInfrastructureManagementAccounts"].append( # type: ignore
|
120
|
+
cluster["awsInfrastructureManagementAccounts"].append( # type: ignore
|
121
121
|
{
|
122
122
|
"account": {
|
123
123
|
"name": acc,
|
@@ -131,7 +131,7 @@ def build_cluster(
|
|
131
131
|
)
|
132
132
|
if network_mgmt_accounts:
|
133
133
|
for idx, acc in enumerate(network_mgmt_accounts):
|
134
|
-
cluster["awsInfrastructureManagementAccounts"].append( # type: ignore
|
134
|
+
cluster["awsInfrastructureManagementAccounts"].append( # type: ignore
|
135
135
|
{
|
136
136
|
"account": {
|
137
137
|
"name": acc,
|
reconcile/utils/acs/base.py
CHANGED
reconcile/utils/acs/notifiers.py
CHANGED
@@ -123,7 +123,7 @@ class AcsNotifiersApi(AcsBaseApi):
|
|
123
123
|
]
|
124
124
|
|
125
125
|
def get_notifier_id_by_name(self, name: str) -> str:
|
126
|
-
return
|
126
|
+
return next(n["id"] for n in self.get_notifiers() if n["name"] == name)
|
127
127
|
|
128
128
|
def update_jira_notifier(
|
129
129
|
self, jira_notifier: JiraNotifier, jira_credentials: JiraCredentials
|
reconcile/utils/aws_api.py
CHANGED
@@ -738,7 +738,7 @@ class AWSApi: # pylint: disable=too-many-public-methods
|
|
738
738
|
|
739
739
|
def get_user_key_status(self, iam: IAMClient, user: str, key: str) -> KeyStatus:
|
740
740
|
key_list = self._get_user_key_list(iam, user)
|
741
|
-
return
|
741
|
+
return next(k["Status"] for k in key_list if k["AccessKeyId"] == key)
|
742
742
|
|
743
743
|
def get_support_cases(self):
|
744
744
|
all_support_cases = {}
|
reconcile/utils/config.py
CHANGED
@@ -35,7 +35,7 @@ def read(secret):
|
|
35
35
|
config = config[t]
|
36
36
|
return config[field]
|
37
37
|
except Exception as e:
|
38
|
-
raise SecretNotFound(f"key not found in config file {path}: {
|
38
|
+
raise SecretNotFound(f"key not found in config file {path}: {e!s}") from None
|
39
39
|
|
40
40
|
|
41
41
|
def read_all(secret):
|
@@ -47,6 +47,4 @@ def read_all(secret):
|
|
47
47
|
config = config[t]
|
48
48
|
return config
|
49
49
|
except Exception as e:
|
50
|
-
raise SecretNotFound(
|
51
|
-
f"secret {path} not found in config file: {str(e)}"
|
52
|
-
) from None
|
50
|
+
raise SecretNotFound(f"secret {path} not found in config file: {e!s}") from None
|
@@ -33,7 +33,7 @@ def get_external_resource_specs(
|
|
33
33
|
external_resources = namespace_info.get("externalResources") or []
|
34
34
|
for e in external_resources:
|
35
35
|
for r in e.get("resources", []):
|
36
|
-
if
|
36
|
+
if r.get("managed_by_erv2"):
|
37
37
|
continue
|
38
38
|
spec = ExternalResourceSpec(
|
39
39
|
provision_provider=e["provider"],
|
reconcile/utils/gitlab_api.py
CHANGED
@@ -262,16 +262,51 @@ class GitLabApi: # pylint: disable=too-many-public-methods
|
|
262
262
|
|
263
263
|
def share_project_with_group(
|
264
264
|
self,
|
265
|
-
|
265
|
+
repo_url: str,
|
266
266
|
group_id: int,
|
267
|
-
|
267
|
+
dry_run: bool,
|
268
|
+
access: str = "maintainer",
|
268
269
|
reshare: bool = False,
|
269
270
|
) -> None:
|
270
|
-
|
271
|
+
project = self.get_project(repo_url)
|
272
|
+
if project is None:
|
273
|
+
return None
|
274
|
+
access_level = self.get_access_level(access)
|
275
|
+
# check if we have 'access_level' access so we can add the group with same role.
|
276
|
+
members = self.get_items(
|
277
|
+
project.members_all.list, query_parameters={"user_ids": self.user.id}
|
278
|
+
)
|
279
|
+
if not any(
|
280
|
+
self.user.id == member.id and member.access_level >= access_level
|
281
|
+
for member in members
|
282
|
+
):
|
283
|
+
logging.error(
|
284
|
+
"%s is not shared with %s as %s",
|
285
|
+
repo_url,
|
286
|
+
self.user.username,
|
287
|
+
access,
|
288
|
+
)
|
289
|
+
return None
|
290
|
+
logging.info(["add_group_as_maintainer", repo_url, group_id])
|
291
|
+
if not dry_run:
|
292
|
+
if reshare:
|
293
|
+
gitlab_request.labels(integration=INTEGRATION_NAME).inc()
|
294
|
+
project.unshare(group_id)
|
271
295
|
gitlab_request.labels(integration=INTEGRATION_NAME).inc()
|
272
|
-
project.
|
296
|
+
project.share(group_id, access_level)
|
297
|
+
|
298
|
+
def get_group_id_and_shared_projects(
|
299
|
+
self, group_name: str
|
300
|
+
) -> tuple[int, dict[str, Any]]:
|
273
301
|
gitlab_request.labels(integration=INTEGRATION_NAME).inc()
|
274
|
-
|
302
|
+
group = self.gl.groups.get(group_name)
|
303
|
+
shared_projects = self.get_items(group.projects.list)
|
304
|
+
return group.id, {
|
305
|
+
project.web_url: shared_group
|
306
|
+
for project in shared_projects
|
307
|
+
for shared_group in project.shared_with_groups
|
308
|
+
if shared_group["group_id"] == group.id
|
309
|
+
}
|
275
310
|
|
276
311
|
@staticmethod
|
277
312
|
def _is_bot_username(username: str) -> bool:
|
@@ -379,9 +414,10 @@ class GitLabApi: # pylint: disable=too-many-public-methods
|
|
379
414
|
if access == "guest":
|
380
415
|
return GUEST_ACCESS
|
381
416
|
|
382
|
-
def
|
417
|
+
def get_group_id_and_projects(self, group_name: str) -> tuple[str, list[str]]:
|
383
418
|
gitlab_request.labels(integration=INTEGRATION_NAME).inc()
|
384
|
-
|
419
|
+
group = self.gl.groups.get(group_name)
|
420
|
+
return group.id, [p.name for p in self.get_items(group.projects.list)]
|
385
421
|
|
386
422
|
def create_project(self, group_id, project):
|
387
423
|
gitlab_request.labels(integration=INTEGRATION_NAME).inc()
|
reconcile/utils/gql.py
CHANGED
@@ -77,7 +77,7 @@ class GqlApiErrorForbiddenSchema(Exception):
|
|
77
77
|
|
78
78
|
class GqlGetResourceError(Exception):
|
79
79
|
def __init__(self, path, msg):
|
80
|
-
super().__init__(f"Error getting resource from path {path}: {
|
80
|
+
super().__init__(f"Error getting resource from path {path}: {msg!s}")
|
81
81
|
|
82
82
|
|
83
83
|
class GqlApi:
|
reconcile/utils/jinja2/utils.py
CHANGED
@@ -122,7 +122,7 @@ def lookup_graphql_query_results(query: str, **kwargs: dict[str, Any]) -> list[A
|
|
122
122
|
gqlapi = gql.get_api()
|
123
123
|
resource = gqlapi.get_resource(query)["content"]
|
124
124
|
rendered_resource = jinja2.Template(resource).render(**kwargs)
|
125
|
-
results =
|
125
|
+
results = next(iter(gqlapi.query(rendered_resource).values()))
|
126
126
|
return results
|
127
127
|
|
128
128
|
|
@@ -158,9 +158,9 @@ class K8sJobController:
|
|
158
158
|
the function will wait indefinitely. If a timeout occures, a TimeoutError will be raised.
|
159
159
|
"""
|
160
160
|
jobs_left = job_names.copy()
|
161
|
-
job_statuses: dict[str, JobStatus] =
|
162
|
-
|
163
|
-
|
161
|
+
job_statuses: dict[str, JobStatus] = dict.fromkeys(
|
162
|
+
job_names, JobStatus.NOT_EXISTS
|
163
|
+
)
|
164
164
|
|
165
165
|
start_time = self.time_module.time()
|
166
166
|
while jobs_left:
|
reconcile/utils/mr/__init__.py
CHANGED
@@ -22,19 +22,19 @@ from reconcile.utils.mr.user_maintenance import (
|
|
22
22
|
)
|
23
23
|
|
24
24
|
__all__ = [
|
25
|
-
"
|
26
|
-
"UnknownMergeRequestType",
|
27
|
-
"MergeRequestProcessingError",
|
25
|
+
"CreateAppInterfaceNotificator",
|
28
26
|
"CreateAppInterfaceReporter",
|
29
|
-
"CreateDeleteAwsAccessKey",
|
30
27
|
"CreateClustersUpdates",
|
31
|
-
"
|
32
|
-
"CreateOCMUpdateRecommendedVersion",
|
33
|
-
"CreateAppInterfaceNotificator",
|
28
|
+
"CreateDeleteAwsAccessKey",
|
34
29
|
"CreateDeleteUserAppInterface",
|
35
30
|
"CreateDeleteUserInfra",
|
36
|
-
"
|
31
|
+
"CreateOCMUpdateRecommendedVersion",
|
32
|
+
"CreateOCMUpgradeSchedulerOrgUpdates",
|
33
|
+
"MergeRequestProcessingError",
|
37
34
|
"PromoteQontractReconcileCommercial",
|
35
|
+
"PromoteQontractSchemas",
|
36
|
+
"UnknownMergeRequestType",
|
37
|
+
"init_from_sqs_message",
|
38
38
|
]
|
39
39
|
|
40
40
|
|
reconcile/utils/ocm/__init__.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1
|
-
from reconcile.utils.ocm.ocm import * # noqa:
|
2
|
-
from reconcile.utils.ocm.products import ( # noqa: F401
|
1
|
+
from reconcile.utils.ocm.ocm import * # noqa: F403
|
2
|
+
from reconcile.utils.ocm.products import ( # noqa: F401
|
3
3
|
BYTES_IN_GIGABYTE,
|
4
4
|
CS_API_BASE,
|
5
5
|
DEFAULT_OCM_MACHINE_POOL_ID,
|
@@ -888,7 +888,7 @@ class SaasHerder: # pylint: disable=too-many-public-methods
|
|
888
888
|
url=url, path=path, ref=ref, github=github
|
889
889
|
)
|
890
890
|
except Exception as e:
|
891
|
-
logging.error(f"{error_prefix} error fetching template: {
|
891
|
+
logging.error(f"{error_prefix} error fetching template: {e!s}")
|
892
892
|
raise
|
893
893
|
|
894
894
|
# add COMMIT_SHA only if it is unspecified
|
@@ -930,7 +930,7 @@ class SaasHerder: # pylint: disable=too-many-public-methods
|
|
930
930
|
logging.error(
|
931
931
|
f"{error_prefix} error generating REPO_DIGEST. "
|
932
932
|
+ "Is REGISTRY_IMG missing? "
|
933
|
-
+ f"{
|
933
|
+
+ f"{e!s}"
|
934
934
|
)
|
935
935
|
raise
|
936
936
|
|
@@ -955,7 +955,7 @@ class SaasHerder: # pylint: disable=too-many-public-methods
|
|
955
955
|
try:
|
956
956
|
resources = oc.process(template, consolidated_parameters)
|
957
957
|
except StatusCodeError as e:
|
958
|
-
logging.error(f"{error_prefix} error processing template: {
|
958
|
+
logging.error(f"{error_prefix} error processing template: {e!s}")
|
959
959
|
|
960
960
|
elif provider == "directory":
|
961
961
|
try:
|
@@ -964,7 +964,7 @@ class SaasHerder: # pylint: disable=too-many-public-methods
|
|
964
964
|
)
|
965
965
|
except Exception as e:
|
966
966
|
logging.error(
|
967
|
-
f"{error_prefix} error fetching directory: {
|
967
|
+
f"{error_prefix} error fetching directory: {e!s} "
|
968
968
|
+ "(We do not support nested directories. Do you by chance have subdirectories?)"
|
969
969
|
)
|
970
970
|
raise
|
@@ -1141,7 +1141,7 @@ class SaasHerder: # pylint: disable=too-many-public-methods
|
|
1141
1141
|
)
|
1142
1142
|
except Exception as e:
|
1143
1143
|
logging.error(
|
1144
|
-
f"{error_prefix} Image is invalid: {image}. " + f"details: {
|
1144
|
+
f"{error_prefix} Image is invalid: {image}. " + f"details: {e!s}"
|
1145
1145
|
)
|
1146
1146
|
|
1147
1147
|
return None
|
reconcile/utils/state.py
CHANGED
@@ -250,7 +250,7 @@ class State:
|
|
250
250
|
self.client.head_bucket(Bucket=self.bucket)
|
251
251
|
except ClientError as details:
|
252
252
|
raise StateInaccessibleException(
|
253
|
-
f"Bucket {self.bucket} is not accessible - {
|
253
|
+
f"Bucket {self.bucket} is not accessible - {details!s}"
|
254
254
|
) from None
|
255
255
|
|
256
256
|
def __enter__(self) -> Self:
|
@@ -301,7 +301,7 @@ class State:
|
|
301
301
|
|
302
302
|
raise StateInaccessibleException(
|
303
303
|
f"Can not access state key {key_path} "
|
304
|
-
f"in bucket {self.bucket} - {
|
304
|
+
f"in bucket {self.bucket} - {details!s}"
|
305
305
|
) from None
|
306
306
|
|
307
307
|
def ls(self) -> list[str]:
|
@@ -523,7 +523,7 @@ class TerraformClient: # pylint: disable=too-many-public-methods
|
|
523
523
|
self.OUTPUT_TYPE_PASSWORDS,
|
524
524
|
self.OUTPUT_TYPE_CONSOLEURLS,
|
525
525
|
}:
|
526
|
-
return data[
|
526
|
+
return data[next(iter(data.keys()))]
|
527
527
|
return data
|
528
528
|
|
529
529
|
def populate_terraform_output_secrets(
|
@@ -5734,7 +5734,7 @@ class TerrascriptClient: # pylint: disable=too-many-public-methods
|
|
5734
5734
|
s3_client.head_bucket(Bucket=bucket_name)
|
5735
5735
|
except ClientError as details:
|
5736
5736
|
raise StateInaccessibleException(
|
5737
|
-
f"Bucket {bucket_name} is not accessible - {
|
5737
|
+
f"Bucket {bucket_name} is not accessible - {details!s}"
|
5738
5738
|
) from None
|
5739
5739
|
|
5740
5740
|
# todo: probably remove 'RedHat' from the object/variable/filepath
|