codemie-test-harness 0.1.158__py3-none-any.whl → 0.1.160__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of codemie-test-harness might be problematic. Click here for more details.
- codemie_test_harness/tests/assistant/datasource/test_confluence_datasource.py +2 -1
- codemie_test_harness/tests/assistant/datasource/test_jira_datasource.py +2 -1
- codemie_test_harness/tests/assistant/tools/cloud/test_cloud_tools.py +0 -7
- codemie_test_harness/tests/assistant/tools/codebase/test_codebase_tools.py +0 -1
- codemie_test_harness/tests/assistant/tools/datamanagement/test_assistant_with_data_management_tools.py +4 -5
- codemie_test_harness/tests/assistant/tools/filemanagement/test_assistant_with_file_management_tools.py +2 -9
- codemie_test_harness/tests/assistant/tools/mcp/test_cli_mcp_server.py +5 -7
- codemie_test_harness/tests/assistant/tools/mcp/test_mcp_servers.py +5 -7
- codemie_test_harness/tests/assistant/tools/notification/test_assistant_notification_tools.py +3 -3
- codemie_test_harness/tests/assistant/tools/openapi/test_assistant_with_open_api_tools.py +3 -2
- codemie_test_harness/tests/assistant/tools/report_portal/__init__.py +0 -0
- codemie_test_harness/tests/assistant/tools/report_portal/test_assistant_report_portal_tools.py +32 -0
- codemie_test_harness/tests/conftest.py +17 -2
- codemie_test_harness/tests/enums/environment.py +102 -0
- codemie_test_harness/tests/enums/model_types.py +1 -0
- codemie_test_harness/tests/enums/tools.py +14 -0
- codemie_test_harness/tests/integrations/project/test_default_integrations.py +47 -12
- codemie_test_harness/tests/integrations/project/test_project_integrations.py +0 -125
- codemie_test_harness/tests/integrations/user/test_default_integrations.py +47 -11
- codemie_test_harness/tests/integrations/user/test_user_integrations.py +0 -125
- codemie_test_harness/tests/llm/assistants/test_llm.py +3 -3
- codemie_test_harness/tests/service/test_assistant_service.py +2 -2
- codemie_test_harness/tests/test_data/cloud_tools_test_data.py +32 -11
- codemie_test_harness/tests/test_data/codebase_tools_test_data.py +2 -0
- codemie_test_harness/tests/test_data/data_management_tools_test_data.py +3 -3
- codemie_test_harness/tests/test_data/direct_tools/cloud_tools_test_data.py +7 -4
- codemie_test_harness/tests/test_data/direct_tools/codebase_tools_test_data.py +2 -0
- codemie_test_harness/tests/test_data/direct_tools/data_management_tools_test_data.py +4 -5
- codemie_test_harness/tests/test_data/direct_tools/file_management_tools_test_data.py +2 -2
- codemie_test_harness/tests/test_data/direct_tools/notification_tools_test_data.py +5 -2
- codemie_test_harness/tests/test_data/direct_tools/project_management_tools_test_data.py +2 -0
- codemie_test_harness/tests/test_data/direct_tools/report_portal_tools_test_data.py +1235 -0
- codemie_test_harness/tests/test_data/direct_tools/research_tools_test_data.py +1 -0
- codemie_test_harness/tests/test_data/direct_tools/vcs_tools_test_data.py +3 -0
- codemie_test_harness/tests/test_data/file_management_tools_test_data.py +9 -5
- codemie_test_harness/tests/test_data/index_test_data.py +9 -11
- codemie_test_harness/tests/test_data/integrations_test_data.py +71 -9
- codemie_test_harness/tests/test_data/llm_test_data.py +8 -6
- codemie_test_harness/tests/test_data/project_management_test_data.py +4 -0
- codemie_test_harness/tests/test_data/report_portal_tools_test_data.py +520 -0
- codemie_test_harness/tests/test_data/vcs_tools_test_data.py +11 -2
- codemie_test_harness/tests/utils/aws_parameters_store.py +33 -2
- codemie_test_harness/tests/utils/constants.py +1 -1
- codemie_test_harness/tests/utils/env_resolver.py +119 -0
- codemie_test_harness/tests/workflow/assistant_tools/cloud/test_workflow_with_assistant_cloud_tools.py +0 -7
- codemie_test_harness/tests/workflow/assistant_tools/codebase/test_worfklow_with_assistant_codebase_tools.py +0 -1
- codemie_test_harness/tests/workflow/assistant_tools/data_management/test_workflow_with_assistant_with_data_management_tools.py +3 -5
- codemie_test_harness/tests/workflow/assistant_tools/file_management/test_workflow_with_assistant_with_file_management_tools.py +2 -9
- codemie_test_harness/tests/workflow/assistant_tools/mcp/test_workflow_with_assistant_with_mcp_server.py +5 -10
- codemie_test_harness/tests/workflow/assistant_tools/notification/test_workflow_with_assistant_notification_tools.py +3 -2
- codemie_test_harness/tests/workflow/assistant_tools/open_api/test_workflow_with_assistant_with_open_api_tools.py +3 -2
- codemie_test_harness/tests/workflow/assistant_tools/report_portal/__init__.py +0 -0
- codemie_test_harness/tests/workflow/assistant_tools/report_portal/test_workflow_with_assistant_with_report_portal_tools.py +38 -0
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_data_management_tools_sql.py +3 -2
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_open_api_tools.py +3 -2
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_report_portal_tools.py +115 -0
- codemie_test_harness/tests/workflow/virtual_assistant_tools/cloud/test_workflow_with_cloud_tools.py +0 -7
- codemie_test_harness/tests/workflow/virtual_assistant_tools/codebase/test_workflow_with_codebase_tools.py +0 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/data_management/test_workflow_with_data_management_tools.py +3 -5
- codemie_test_harness/tests/workflow/virtual_assistant_tools/file_management/test_workflow_with_file_management_tools.py +2 -9
- codemie_test_harness/tests/workflow/virtual_assistant_tools/mcp/test_workflow_with_mcp_server.py +5 -11
- codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/test_workflow_with_notification_tools.py +3 -3
- codemie_test_harness/tests/workflow/virtual_assistant_tools/open_api/test_workflow_with_open_api_tools.py +3 -3
- codemie_test_harness/tests/workflow/virtual_assistant_tools/report_portal/__init__.py +0 -0
- codemie_test_harness/tests/workflow/virtual_assistant_tools/report_portal/test_workflow_with_report_portal_tool.py +39 -0
- {codemie_test_harness-0.1.158.dist-info → codemie_test_harness-0.1.160.dist-info}/METADATA +2 -2
- {codemie_test_harness-0.1.158.dist-info → codemie_test_harness-0.1.160.dist-info}/RECORD +69 -58
- {codemie_test_harness-0.1.158.dist-info → codemie_test_harness-0.1.160.dist-info}/WHEEL +0 -0
- {codemie_test_harness-0.1.158.dist-info → codemie_test_harness-0.1.160.dist-info}/entry_points.txt +0 -0
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import pytest
|
|
2
|
+
from codemie_sdk.models.integration import CredentialTypes
|
|
2
3
|
|
|
3
4
|
from codemie_test_harness.tests.enums.tools import VcsTool, Toolkit
|
|
4
5
|
|
|
@@ -91,6 +92,7 @@ vcs_tools_test_data = [
|
|
|
91
92
|
}
|
|
92
93
|
""",
|
|
93
94
|
marks=pytest.mark.github,
|
|
95
|
+
id=f"{CredentialTypes.GIT}_github",
|
|
94
96
|
),
|
|
95
97
|
pytest.param(
|
|
96
98
|
Toolkit.VCS,
|
|
@@ -135,5 +137,6 @@ vcs_tools_test_data = [
|
|
|
135
137
|
|
|
136
138
|
""",
|
|
137
139
|
marks=pytest.mark.gitlab,
|
|
140
|
+
id=f"{CredentialTypes.GIT}_gitlab",
|
|
138
141
|
),
|
|
139
142
|
]
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import
|
|
1
|
+
from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
|
|
2
2
|
|
|
3
3
|
import pytest
|
|
4
4
|
|
|
@@ -89,29 +89,33 @@ GENERATE_IMAGE_TOOL_TASK = """
|
|
|
89
89
|
"""
|
|
90
90
|
|
|
91
91
|
file_management_tools_test_data = [
|
|
92
|
-
(
|
|
92
|
+
pytest.param(
|
|
93
93
|
FileManagementTool.PYTHON_CODE_INTERPRETER,
|
|
94
94
|
CODE_INTERPRETER_TOOL_TASK,
|
|
95
95
|
RESPONSE_FOR_CODE_INTERPRETER,
|
|
96
|
+
id=FileManagementTool.PYTHON_CODE_INTERPRETER,
|
|
96
97
|
),
|
|
97
98
|
pytest.param(
|
|
98
99
|
FileManagementTool.LIST_DIRECTORY,
|
|
99
100
|
LIST_DIR_TOOL_TASK,
|
|
100
101
|
RESPONSE_FOR_LIST_DIR,
|
|
101
102
|
marks=pytest.mark.skipif(
|
|
102
|
-
|
|
103
|
+
EnvironmentResolver.is_localhost(),
|
|
103
104
|
reason="Skipping this test on local environment",
|
|
104
105
|
),
|
|
106
|
+
id=FileManagementTool.LIST_DIRECTORY,
|
|
105
107
|
),
|
|
106
|
-
(
|
|
108
|
+
pytest.param(
|
|
107
109
|
FileManagementTool.WRITE_FILE,
|
|
108
110
|
WRITE_FILE_TASK,
|
|
109
111
|
RESPONSE_FOR_WRITE_FILE_TASK,
|
|
112
|
+
id=FileManagementTool.WRITE_FILE,
|
|
110
113
|
),
|
|
111
|
-
(
|
|
114
|
+
pytest.param(
|
|
112
115
|
FileManagementTool.RUN_COMMAND_LINE,
|
|
113
116
|
COMMAND_LINE_TOOL_TASK,
|
|
114
117
|
RESPONSE_FOR_COMMAND_LINE_TASK,
|
|
118
|
+
id=FileManagementTool.RUN_COMMAND_LINE,
|
|
115
119
|
),
|
|
116
120
|
]
|
|
117
121
|
|
|
@@ -1,32 +1,30 @@
|
|
|
1
|
-
import os
|
|
2
1
|
from dataclasses import dataclass
|
|
3
2
|
from typing import List
|
|
4
3
|
|
|
5
4
|
import pytest
|
|
6
5
|
|
|
6
|
+
from codemie_test_harness.tests.utils.env_resolver import get_environment
|
|
7
|
+
from codemie_test_harness.tests.enums.environment import Environment
|
|
8
|
+
|
|
7
9
|
|
|
8
10
|
@dataclass
|
|
9
11
|
class EmbeddingData:
|
|
10
12
|
"""Data class to store Embedding models."""
|
|
11
13
|
|
|
12
14
|
model_type: str
|
|
13
|
-
environments: List[
|
|
14
|
-
|
|
15
|
+
environments: List[Environment]
|
|
15
16
|
|
|
16
|
-
AZURE_ENVS = ["preview", "azure", "local"]
|
|
17
|
-
GCP_ENVS = ["preview", "gcp", "local"]
|
|
18
|
-
AWS_ENVS = ["preview", "aws", "local"]
|
|
19
17
|
|
|
20
18
|
MODELS = [
|
|
21
|
-
EmbeddingData("titan",
|
|
22
|
-
EmbeddingData("gecko",
|
|
23
|
-
EmbeddingData("ada-002",
|
|
19
|
+
EmbeddingData("titan", Environment.get_aws_environments()),
|
|
20
|
+
EmbeddingData("gecko", Environment.get_gcp_environments()),
|
|
21
|
+
EmbeddingData("ada-002", Environment.get_azure_environments()),
|
|
24
22
|
]
|
|
25
23
|
|
|
26
24
|
|
|
27
25
|
def generate_test_data():
|
|
28
26
|
"""Generate pytest parameters for Embedding models"""
|
|
29
|
-
env =
|
|
27
|
+
env = get_environment()
|
|
30
28
|
test_data = []
|
|
31
29
|
|
|
32
30
|
for model in MODELS:
|
|
@@ -35,7 +33,7 @@ def generate_test_data():
|
|
|
35
33
|
model.model_type,
|
|
36
34
|
marks=pytest.mark.skipif(
|
|
37
35
|
env not in model.environments,
|
|
38
|
-
reason=f"Skip on non {'/'.join(model.environments[:-1])} envs",
|
|
36
|
+
reason=f"Skip on non {'/'.join(str(env) for env in model.environments[:-1])} envs",
|
|
39
37
|
),
|
|
40
38
|
)
|
|
41
39
|
)
|
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
import os
|
|
2
|
-
|
|
3
1
|
import pytest
|
|
4
2
|
|
|
5
3
|
from codemie_sdk.models.integration import CredentialTypes
|
|
6
4
|
from codemie_test_harness.tests.enums.integrations import DataBaseDialect
|
|
7
5
|
from codemie_test_harness.tests.utils.aws_parameters_store import CredentialsUtil
|
|
6
|
+
from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
|
|
8
7
|
|
|
9
8
|
valid_integrations = [
|
|
10
9
|
pytest.param(
|
|
@@ -14,6 +13,7 @@ valid_integrations = [
|
|
|
14
13
|
pytest.mark.aws,
|
|
15
14
|
pytest.mark.cloud,
|
|
16
15
|
],
|
|
16
|
+
id=CredentialTypes.AWS,
|
|
17
17
|
),
|
|
18
18
|
pytest.param(
|
|
19
19
|
CredentialTypes.AZURE,
|
|
@@ -22,6 +22,7 @@ valid_integrations = [
|
|
|
22
22
|
pytest.mark.azure,
|
|
23
23
|
pytest.mark.cloud,
|
|
24
24
|
],
|
|
25
|
+
id=CredentialTypes.AZURE,
|
|
25
26
|
),
|
|
26
27
|
pytest.param(
|
|
27
28
|
CredentialTypes.GCP,
|
|
@@ -30,30 +31,35 @@ valid_integrations = [
|
|
|
30
31
|
pytest.mark.gcp,
|
|
31
32
|
pytest.mark.cloud,
|
|
32
33
|
pytest.mark.skipif(
|
|
33
|
-
|
|
34
|
+
EnvironmentResolver.is_azure(),
|
|
34
35
|
reason="Still have an issue with encoding long strings",
|
|
35
36
|
),
|
|
36
37
|
],
|
|
38
|
+
id=CredentialTypes.GCP,
|
|
37
39
|
),
|
|
38
40
|
pytest.param(
|
|
39
41
|
CredentialTypes.SONAR,
|
|
40
42
|
CredentialsUtil.sonar_credentials(),
|
|
41
43
|
marks=pytest.mark.sonar,
|
|
44
|
+
id=f"{CredentialTypes.SONAR}_server",
|
|
42
45
|
),
|
|
43
46
|
pytest.param(
|
|
44
47
|
CredentialTypes.SONAR,
|
|
45
48
|
CredentialsUtil.sonar_cloud_credentials(),
|
|
46
49
|
marks=pytest.mark.sonar,
|
|
50
|
+
id=f"{CredentialTypes.SONAR}_cloud",
|
|
47
51
|
),
|
|
48
52
|
pytest.param(
|
|
49
53
|
CredentialTypes.GIT,
|
|
50
54
|
CredentialsUtil.gitlab_credentials(),
|
|
51
55
|
marks=pytest.mark.gitlab,
|
|
56
|
+
id=f"{CredentialTypes.GIT}_gitlab",
|
|
52
57
|
),
|
|
53
58
|
pytest.param(
|
|
54
59
|
CredentialTypes.GIT,
|
|
55
60
|
CredentialsUtil.github_credentials(),
|
|
56
61
|
marks=pytest.mark.github,
|
|
62
|
+
id=f"{CredentialTypes.GIT}_github",
|
|
57
63
|
),
|
|
58
64
|
pytest.param(
|
|
59
65
|
CredentialTypes.CONFLUENCE,
|
|
@@ -62,6 +68,7 @@ valid_integrations = [
|
|
|
62
68
|
pytest.mark.confluence,
|
|
63
69
|
pytest.mark.project_management,
|
|
64
70
|
],
|
|
71
|
+
id=f"{CredentialTypes.CONFLUENCE}_server",
|
|
65
72
|
),
|
|
66
73
|
pytest.param(
|
|
67
74
|
CredentialTypes.CONFLUENCE,
|
|
@@ -71,6 +78,7 @@ valid_integrations = [
|
|
|
71
78
|
pytest.mark.confluence_cloud,
|
|
72
79
|
pytest.mark.project_management,
|
|
73
80
|
],
|
|
81
|
+
id=f"{CredentialTypes.CONFLUENCE}_cloud",
|
|
74
82
|
),
|
|
75
83
|
pytest.param(
|
|
76
84
|
CredentialTypes.JIRA,
|
|
@@ -79,6 +87,7 @@ valid_integrations = [
|
|
|
79
87
|
pytest.mark.jira,
|
|
80
88
|
pytest.mark.project_management,
|
|
81
89
|
],
|
|
90
|
+
id=f"{CredentialTypes.JIRA}_server",
|
|
82
91
|
),
|
|
83
92
|
pytest.param(
|
|
84
93
|
CredentialTypes.JIRA,
|
|
@@ -88,36 +97,43 @@ valid_integrations = [
|
|
|
88
97
|
pytest.mark.jira_cloud,
|
|
89
98
|
pytest.mark.project_management,
|
|
90
99
|
],
|
|
100
|
+
id=f"{CredentialTypes.JIRA}_cloud",
|
|
91
101
|
),
|
|
92
102
|
pytest.param(
|
|
93
103
|
CredentialTypes.SQL,
|
|
94
104
|
CredentialsUtil.sql_credentials(DataBaseDialect.POSTGRES),
|
|
95
105
|
marks=pytest.mark.sql,
|
|
106
|
+
id=DataBaseDialect.POSTGRES,
|
|
96
107
|
),
|
|
97
108
|
pytest.param(
|
|
98
109
|
CredentialTypes.SQL,
|
|
99
110
|
CredentialsUtil.sql_credentials(DataBaseDialect.MY_SQL),
|
|
100
111
|
marks=pytest.mark.sql,
|
|
112
|
+
id=DataBaseDialect.MY_SQL,
|
|
101
113
|
),
|
|
102
114
|
pytest.param(
|
|
103
115
|
CredentialTypes.ELASTIC,
|
|
104
116
|
CredentialsUtil.elastic_credentials(),
|
|
105
117
|
marks=pytest.mark.elastic,
|
|
118
|
+
id=CredentialTypes.ELASTIC,
|
|
106
119
|
),
|
|
107
120
|
pytest.param(
|
|
108
121
|
CredentialTypes.MCP,
|
|
109
122
|
CredentialsUtil.mcp_credentials(),
|
|
110
123
|
marks=pytest.mark.mcp,
|
|
124
|
+
id=CredentialTypes.MCP,
|
|
111
125
|
),
|
|
112
126
|
pytest.param(
|
|
113
127
|
CredentialTypes.AZURE_DEVOPS,
|
|
114
128
|
CredentialsUtil.azure_devops_credentials(),
|
|
115
129
|
marks=pytest.mark.azure,
|
|
130
|
+
id=CredentialTypes.AZURE_DEVOPS,
|
|
116
131
|
),
|
|
117
132
|
pytest.param(
|
|
118
133
|
CredentialTypes.FILESYSTEM,
|
|
119
134
|
CredentialsUtil.file_system_credentials(),
|
|
120
135
|
marks=pytest.mark.file_system,
|
|
136
|
+
id=CredentialTypes.FILESYSTEM,
|
|
121
137
|
),
|
|
122
138
|
pytest.param(
|
|
123
139
|
CredentialTypes.EMAIL,
|
|
@@ -126,6 +142,7 @@ valid_integrations = [
|
|
|
126
142
|
pytest.mark.notification,
|
|
127
143
|
pytest.mark.email,
|
|
128
144
|
],
|
|
145
|
+
id=CredentialTypes.EMAIL,
|
|
129
146
|
),
|
|
130
147
|
pytest.param(
|
|
131
148
|
CredentialTypes.TELEGRAM,
|
|
@@ -134,16 +151,19 @@ valid_integrations = [
|
|
|
134
151
|
pytest.mark.notification,
|
|
135
152
|
pytest.mark.telegram,
|
|
136
153
|
],
|
|
154
|
+
id=CredentialTypes.TELEGRAM,
|
|
137
155
|
),
|
|
138
156
|
pytest.param(
|
|
139
157
|
CredentialTypes.SERVICE_NOW,
|
|
140
158
|
CredentialsUtil.servicenow_credentials(),
|
|
141
159
|
marks=pytest.mark.servicenow,
|
|
160
|
+
id=CredentialTypes.SERVICE_NOW,
|
|
142
161
|
),
|
|
143
162
|
pytest.param(
|
|
144
163
|
CredentialTypes.KEYCLOAK,
|
|
145
164
|
CredentialsUtil.keycloak_credentials(),
|
|
146
165
|
marks=pytest.mark.keycloak,
|
|
166
|
+
id=CredentialTypes.KEYCLOAK,
|
|
147
167
|
),
|
|
148
168
|
pytest.param(
|
|
149
169
|
CredentialTypes.KUBERNETES,
|
|
@@ -152,10 +172,17 @@ valid_integrations = [
|
|
|
152
172
|
pytest.mark.kubernetes,
|
|
153
173
|
pytest.mark.cloud,
|
|
154
174
|
pytest.mark.skipif(
|
|
155
|
-
|
|
175
|
+
EnvironmentResolver.is_azure(),
|
|
156
176
|
reason="Still have an issue with encoding long strings",
|
|
157
177
|
),
|
|
158
178
|
],
|
|
179
|
+
id=CredentialTypes.KUBERNETES,
|
|
180
|
+
),
|
|
181
|
+
pytest.param(
|
|
182
|
+
CredentialTypes.REPORT_PORTAL,
|
|
183
|
+
CredentialsUtil.report_portal_credentials(),
|
|
184
|
+
marks=pytest.mark.report_portal,
|
|
185
|
+
id=CredentialTypes.REPORT_PORTAL,
|
|
159
186
|
),
|
|
160
187
|
]
|
|
161
188
|
|
|
@@ -167,6 +194,7 @@ testable_integrations = [
|
|
|
167
194
|
pytest.mark.aws,
|
|
168
195
|
pytest.mark.cloud,
|
|
169
196
|
],
|
|
197
|
+
id=CredentialTypes.AWS,
|
|
170
198
|
),
|
|
171
199
|
pytest.param(
|
|
172
200
|
CredentialTypes.AZURE,
|
|
@@ -175,6 +203,7 @@ testable_integrations = [
|
|
|
175
203
|
pytest.mark.azure,
|
|
176
204
|
pytest.mark.cloud,
|
|
177
205
|
],
|
|
206
|
+
id=CredentialTypes.AZURE,
|
|
178
207
|
),
|
|
179
208
|
pytest.param(
|
|
180
209
|
CredentialTypes.GCP,
|
|
@@ -183,20 +212,23 @@ testable_integrations = [
|
|
|
183
212
|
pytest.mark.gcp,
|
|
184
213
|
pytest.mark.cloud,
|
|
185
214
|
pytest.mark.skipif(
|
|
186
|
-
|
|
215
|
+
EnvironmentResolver.is_azure(),
|
|
187
216
|
reason="Still have an issue with encoding long strings",
|
|
188
217
|
),
|
|
189
218
|
],
|
|
219
|
+
id=CredentialTypes.GCP,
|
|
190
220
|
),
|
|
191
221
|
pytest.param(
|
|
192
222
|
CredentialTypes.SONAR,
|
|
193
223
|
CredentialsUtil.sonar_credentials(),
|
|
194
224
|
marks=pytest.mark.sonar,
|
|
225
|
+
id=f"{CredentialTypes.SONAR}_server",
|
|
195
226
|
),
|
|
196
227
|
pytest.param(
|
|
197
228
|
CredentialTypes.SONAR,
|
|
198
229
|
CredentialsUtil.sonar_cloud_credentials(),
|
|
199
230
|
marks=pytest.mark.sonar,
|
|
231
|
+
id=f"{CredentialTypes.SONAR}_cloud",
|
|
200
232
|
),
|
|
201
233
|
pytest.param(
|
|
202
234
|
CredentialTypes.CONFLUENCE,
|
|
@@ -205,6 +237,7 @@ testable_integrations = [
|
|
|
205
237
|
pytest.mark.confluence,
|
|
206
238
|
pytest.mark.project_management,
|
|
207
239
|
],
|
|
240
|
+
id=f"{CredentialTypes.CONFLUENCE}_server",
|
|
208
241
|
),
|
|
209
242
|
pytest.param(
|
|
210
243
|
CredentialTypes.CONFLUENCE,
|
|
@@ -214,6 +247,7 @@ testable_integrations = [
|
|
|
214
247
|
pytest.mark.confluence_cloud,
|
|
215
248
|
pytest.mark.project_management,
|
|
216
249
|
],
|
|
250
|
+
id=f"{CredentialTypes.CONFLUENCE}_cloud",
|
|
217
251
|
),
|
|
218
252
|
pytest.param(
|
|
219
253
|
CredentialTypes.JIRA,
|
|
@@ -222,6 +256,7 @@ testable_integrations = [
|
|
|
222
256
|
pytest.mark.jira,
|
|
223
257
|
pytest.mark.project_management,
|
|
224
258
|
],
|
|
259
|
+
id=f"{CredentialTypes.JIRA}_server",
|
|
225
260
|
),
|
|
226
261
|
pytest.param(
|
|
227
262
|
CredentialTypes.JIRA,
|
|
@@ -231,6 +266,7 @@ testable_integrations = [
|
|
|
231
266
|
pytest.mark.jira_cloud,
|
|
232
267
|
pytest.mark.project_management,
|
|
233
268
|
],
|
|
269
|
+
id=f"{CredentialTypes.JIRA}_cloud",
|
|
234
270
|
),
|
|
235
271
|
pytest.param(
|
|
236
272
|
CredentialTypes.EMAIL,
|
|
@@ -239,15 +275,17 @@ testable_integrations = [
|
|
|
239
275
|
pytest.mark.email,
|
|
240
276
|
pytest.mark.notification,
|
|
241
277
|
pytest.mark.skipif(
|
|
242
|
-
|
|
278
|
+
EnvironmentResolver.is_localhost(),
|
|
243
279
|
reason="Skipping this test on local environment",
|
|
244
280
|
),
|
|
245
281
|
],
|
|
282
|
+
id=CredentialTypes.EMAIL,
|
|
246
283
|
),
|
|
247
284
|
pytest.param(
|
|
248
285
|
CredentialTypes.SERVICE_NOW,
|
|
249
286
|
CredentialsUtil.servicenow_credentials(),
|
|
250
287
|
marks=pytest.mark.servicenow,
|
|
288
|
+
id=CredentialTypes.SERVICE_NOW,
|
|
251
289
|
),
|
|
252
290
|
pytest.param(
|
|
253
291
|
CredentialTypes.KUBERNETES,
|
|
@@ -256,10 +294,17 @@ testable_integrations = [
|
|
|
256
294
|
pytest.mark.kubernetes,
|
|
257
295
|
pytest.mark.cloud,
|
|
258
296
|
pytest.mark.skipif(
|
|
259
|
-
|
|
297
|
+
EnvironmentResolver.is_azure(),
|
|
260
298
|
reason="Still have an issue with encoding long strings",
|
|
261
299
|
),
|
|
262
300
|
],
|
|
301
|
+
id=CredentialTypes.KUBERNETES,
|
|
302
|
+
),
|
|
303
|
+
pytest.param(
|
|
304
|
+
CredentialTypes.REPORT_PORTAL,
|
|
305
|
+
CredentialsUtil.report_portal_credentials(),
|
|
306
|
+
marks=pytest.mark.report_portal,
|
|
307
|
+
id=CredentialTypes.REPORT_PORTAL,
|
|
263
308
|
),
|
|
264
309
|
]
|
|
265
310
|
|
|
@@ -272,6 +317,7 @@ invalid_integrations = [
|
|
|
272
317
|
pytest.mark.aws,
|
|
273
318
|
pytest.mark.cloud,
|
|
274
319
|
],
|
|
320
|
+
id=CredentialTypes.AWS,
|
|
275
321
|
),
|
|
276
322
|
pytest.param(
|
|
277
323
|
CredentialTypes.AZURE,
|
|
@@ -281,6 +327,7 @@ invalid_integrations = [
|
|
|
281
327
|
pytest.mark.azure,
|
|
282
328
|
pytest.mark.cloud,
|
|
283
329
|
],
|
|
330
|
+
id=CredentialTypes.AZURE,
|
|
284
331
|
),
|
|
285
332
|
pytest.param(
|
|
286
333
|
CredentialTypes.GCP,
|
|
@@ -292,22 +339,25 @@ invalid_integrations = [
|
|
|
292
339
|
pytest.mark.gcp,
|
|
293
340
|
pytest.mark.cloud,
|
|
294
341
|
pytest.mark.skipif(
|
|
295
|
-
|
|
342
|
+
EnvironmentResolver.is_azure(),
|
|
296
343
|
reason="Still have an issue with encoding long strings",
|
|
297
344
|
),
|
|
298
345
|
],
|
|
346
|
+
id=CredentialTypes.GCP,
|
|
299
347
|
),
|
|
300
348
|
pytest.param(
|
|
301
349
|
CredentialTypes.SONAR,
|
|
302
350
|
CredentialsUtil.invalid_sonar_credentials(),
|
|
303
351
|
"Invalid token",
|
|
304
352
|
marks=pytest.mark.sonar,
|
|
353
|
+
id=f"{CredentialTypes.SONAR}_server",
|
|
305
354
|
),
|
|
306
355
|
pytest.param(
|
|
307
356
|
CredentialTypes.SONAR,
|
|
308
357
|
CredentialsUtil.invalid_sonar_cloud_credentials(),
|
|
309
358
|
"Invalid token",
|
|
310
359
|
marks=pytest.mark.sonar,
|
|
360
|
+
id=f"{CredentialTypes.SONAR}_cloud",
|
|
311
361
|
),
|
|
312
362
|
pytest.param(
|
|
313
363
|
CredentialTypes.EMAIL,
|
|
@@ -317,28 +367,32 @@ invalid_integrations = [
|
|
|
317
367
|
pytest.mark.email,
|
|
318
368
|
pytest.mark.notification,
|
|
319
369
|
pytest.mark.skipif(
|
|
320
|
-
|
|
370
|
+
EnvironmentResolver.is_localhost(),
|
|
321
371
|
reason="Skipping this test on local environment",
|
|
322
372
|
),
|
|
323
373
|
],
|
|
374
|
+
id=CredentialTypes.EMAIL,
|
|
324
375
|
),
|
|
325
376
|
pytest.param(
|
|
326
377
|
CredentialTypes.JIRA,
|
|
327
378
|
CredentialsUtil.invalid_jira_credentials(),
|
|
328
379
|
"Unauthorized (401)",
|
|
329
380
|
marks=pytest.mark.jira,
|
|
381
|
+
id=CredentialTypes.JIRA,
|
|
330
382
|
),
|
|
331
383
|
pytest.param(
|
|
332
384
|
CredentialTypes.CONFLUENCE,
|
|
333
385
|
CredentialsUtil.invalid_confluence_credentials(),
|
|
334
386
|
"Access denied",
|
|
335
387
|
marks=pytest.mark.confluence,
|
|
388
|
+
id=CredentialTypes.CONFLUENCE,
|
|
336
389
|
),
|
|
337
390
|
pytest.param(
|
|
338
391
|
CredentialTypes.SERVICE_NOW,
|
|
339
392
|
CredentialsUtil.invalid_servicenow_credentials(),
|
|
340
393
|
'ServiceNow tool exception. Status: 401. Response: {"error":{"message":"User Not Authenticated","detail":"Required to provide Auth information"}',
|
|
341
394
|
marks=pytest.mark.servicenow,
|
|
395
|
+
id=CredentialTypes.SERVICE_NOW,
|
|
342
396
|
),
|
|
343
397
|
pytest.param(
|
|
344
398
|
CredentialTypes.KUBERNETES,
|
|
@@ -348,5 +402,13 @@ invalid_integrations = [
|
|
|
348
402
|
pytest.mark.kubernetes,
|
|
349
403
|
pytest.mark.cloud,
|
|
350
404
|
],
|
|
405
|
+
id=CredentialTypes.KUBERNETES,
|
|
406
|
+
),
|
|
407
|
+
pytest.param(
|
|
408
|
+
CredentialTypes.REPORT_PORTAL,
|
|
409
|
+
CredentialsUtil.invalid_report_portal_credentials(),
|
|
410
|
+
"401 Client Error: for url: https://report-portal.core.kuberocketci.io/api/v1/epm-cdme/launch?page.page=1",
|
|
411
|
+
marks=pytest.mark.report_portal,
|
|
412
|
+
id=CredentialTypes.REPORT_PORTAL,
|
|
351
413
|
),
|
|
352
414
|
]
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
from typing import List
|
|
5
5
|
from codemie_test_harness.tests.enums.model_types import ModelTypes
|
|
6
|
+
from codemie_test_harness.tests.enums.environment import Environment
|
|
6
7
|
|
|
7
8
|
|
|
8
9
|
@dataclass
|
|
@@ -10,14 +11,14 @@ class LlmResponseData:
|
|
|
10
11
|
"""Data class to store LLM model response information."""
|
|
11
12
|
|
|
12
13
|
model_type: ModelTypes
|
|
13
|
-
environments: List[
|
|
14
|
+
environments: List[Environment]
|
|
14
15
|
|
|
15
16
|
|
|
16
|
-
# Define environment sets
|
|
17
|
-
AZURE_ENVS =
|
|
18
|
-
GCP_ENVS =
|
|
19
|
-
AWS_ENVS =
|
|
20
|
-
OTHER_ENVS = [
|
|
17
|
+
# Define environment sets using centralized enum methods (type-safe enums)
|
|
18
|
+
AZURE_ENVS = Environment.get_azure_environments()
|
|
19
|
+
GCP_ENVS = Environment.get_gcp_environments()
|
|
20
|
+
AWS_ENVS = Environment.get_aws_environments()
|
|
21
|
+
OTHER_ENVS = [Environment.PREVIEW, Environment.LOCALHOST]
|
|
21
22
|
|
|
22
23
|
# Define model responses with their environment restrictions
|
|
23
24
|
MODEL_RESPONSES = [
|
|
@@ -52,6 +53,7 @@ MODEL_RESPONSES = [
|
|
|
52
53
|
LlmResponseData(ModelTypes.CLAUDE_4_SONNET, AWS_ENVS),
|
|
53
54
|
LlmResponseData(ModelTypes.CLAUDE_4_OPUS, AWS_ENVS),
|
|
54
55
|
LlmResponseData(ModelTypes.CLAUDE_4_1_OPUS, AWS_ENVS),
|
|
56
|
+
LlmResponseData(ModelTypes.CLAUDE_4_SONNET_1M, [Environment.AWS]),
|
|
55
57
|
# Other LLMs test data
|
|
56
58
|
LlmResponseData(ModelTypes.RLAB_QWQ_32B, OTHER_ENVS),
|
|
57
59
|
LlmResponseData(ModelTypes.DEEPSEEK_R1, OTHER_ENVS),
|
|
@@ -20,6 +20,7 @@ pm_tools_test_data = [
|
|
|
20
20
|
JIRA_TOOL_PROMPT,
|
|
21
21
|
RESPONSE_FOR_JIRA_TOOL,
|
|
22
22
|
marks=pytest.mark.jira,
|
|
23
|
+
id=ProjectManagementIntegrationType.JIRA,
|
|
23
24
|
),
|
|
24
25
|
pytest.param(
|
|
25
26
|
ProjectManagementTool.CONFLUENCE,
|
|
@@ -27,6 +28,7 @@ pm_tools_test_data = [
|
|
|
27
28
|
CONFLUENCE_TOOL_PROMPT,
|
|
28
29
|
RESPONSE_FOR_CONFLUENCE_TOOL,
|
|
29
30
|
marks=pytest.mark.confluence,
|
|
31
|
+
id=ProjectManagementIntegrationType.CONFLUENCE,
|
|
30
32
|
),
|
|
31
33
|
pytest.param(
|
|
32
34
|
ProjectManagementTool.JIRA,
|
|
@@ -34,6 +36,7 @@ pm_tools_test_data = [
|
|
|
34
36
|
JIRA_CLOUD_TOOL_PROMPT,
|
|
35
37
|
RESPONSE_FOR_JIRA_CLOUD_TOOL,
|
|
36
38
|
marks=[pytest.mark.jira, pytest.mark.jira_cloud],
|
|
39
|
+
id=ProjectManagementIntegrationType.JIRA_CLOUD,
|
|
37
40
|
),
|
|
38
41
|
pytest.param(
|
|
39
42
|
ProjectManagementTool.CONFLUENCE,
|
|
@@ -41,5 +44,6 @@ pm_tools_test_data = [
|
|
|
41
44
|
CONFLUENCE_CLOUD_TOOL_PROMPT,
|
|
42
45
|
RESPONSE_FOR_CONFLUENCE_CLOUD_TOOL,
|
|
43
46
|
marks=[pytest.mark.confluence, pytest.mark.confluence_cloud],
|
|
47
|
+
id=ProjectManagementIntegrationType.CONFLUENCE_CLOUD,
|
|
44
48
|
),
|
|
45
49
|
]
|