codemie-test-harness 0.1.159__py3-none-any.whl → 0.1.161__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of codemie-test-harness might be problematic. Click here for more details.

Files changed (60) hide show
  1. codemie_test_harness/tests/assistant/datasource/test_confluence_datasource.py +2 -1
  2. codemie_test_harness/tests/assistant/datasource/test_jira_datasource.py +2 -1
  3. codemie_test_harness/tests/assistant/tools/cloud/test_cloud_tools.py +0 -7
  4. codemie_test_harness/tests/assistant/tools/codebase/test_codebase_tools.py +0 -1
  5. codemie_test_harness/tests/assistant/tools/datamanagement/test_assistant_with_data_management_tools.py +4 -5
  6. codemie_test_harness/tests/assistant/tools/filemanagement/test_assistant_with_file_management_tools.py +2 -9
  7. codemie_test_harness/tests/assistant/tools/mcp/test_cli_mcp_server.py +5 -7
  8. codemie_test_harness/tests/assistant/tools/mcp/test_mcp_servers.py +5 -7
  9. codemie_test_harness/tests/assistant/tools/notification/test_assistant_notification_tools.py +3 -3
  10. codemie_test_harness/tests/assistant/tools/openapi/test_assistant_with_open_api_tools.py +3 -2
  11. codemie_test_harness/tests/conftest.py +6 -2
  12. codemie_test_harness/tests/enums/environment.py +102 -0
  13. codemie_test_harness/tests/enums/model_types.py +1 -0
  14. codemie_test_harness/tests/integrations/project/test_default_integrations.py +3 -11
  15. codemie_test_harness/tests/integrations/project/test_project_integrations.py +0 -132
  16. codemie_test_harness/tests/integrations/user/test_default_integrations.py +3 -11
  17. codemie_test_harness/tests/integrations/user/test_user_integrations.py +0 -132
  18. codemie_test_harness/tests/llm/assistants/test_lite_llm.py +96 -0
  19. codemie_test_harness/tests/llm/assistants/test_llm.py +9 -9
  20. codemie_test_harness/tests/service/test_assistant_service.py +2 -2
  21. codemie_test_harness/tests/test_data/cloud_tools_test_data.py +32 -11
  22. codemie_test_harness/tests/test_data/codebase_tools_test_data.py +2 -0
  23. codemie_test_harness/tests/test_data/data_management_tools_test_data.py +3 -3
  24. codemie_test_harness/tests/test_data/direct_tools/cloud_tools_test_data.py +7 -4
  25. codemie_test_harness/tests/test_data/direct_tools/codebase_tools_test_data.py +2 -0
  26. codemie_test_harness/tests/test_data/direct_tools/data_management_tools_test_data.py +4 -5
  27. codemie_test_harness/tests/test_data/direct_tools/file_management_tools_test_data.py +2 -2
  28. codemie_test_harness/tests/test_data/direct_tools/notification_tools_test_data.py +5 -2
  29. codemie_test_harness/tests/test_data/direct_tools/project_management_tools_test_data.py +2 -0
  30. codemie_test_harness/tests/test_data/direct_tools/research_tools_test_data.py +1 -0
  31. codemie_test_harness/tests/test_data/direct_tools/vcs_tools_test_data.py +3 -0
  32. codemie_test_harness/tests/test_data/file_management_tools_test_data.py +9 -5
  33. codemie_test_harness/tests/test_data/index_test_data.py +9 -11
  34. codemie_test_harness/tests/test_data/integrations_test_data.py +55 -9
  35. codemie_test_harness/tests/test_data/llm_test_data.py +8 -6
  36. codemie_test_harness/tests/test_data/project_management_test_data.py +4 -0
  37. codemie_test_harness/tests/test_data/vcs_tools_test_data.py +11 -2
  38. codemie_test_harness/tests/utils/aws_parameters_store.py +23 -2
  39. codemie_test_harness/tests/utils/constants.py +1 -1
  40. codemie_test_harness/tests/utils/env_resolver.py +119 -0
  41. codemie_test_harness/tests/workflow/assistant_tools/cloud/test_workflow_with_assistant_cloud_tools.py +0 -7
  42. codemie_test_harness/tests/workflow/assistant_tools/codebase/test_worfklow_with_assistant_codebase_tools.py +0 -1
  43. codemie_test_harness/tests/workflow/assistant_tools/data_management/test_workflow_with_assistant_with_data_management_tools.py +3 -5
  44. codemie_test_harness/tests/workflow/assistant_tools/file_management/test_workflow_with_assistant_with_file_management_tools.py +2 -9
  45. codemie_test_harness/tests/workflow/assistant_tools/mcp/test_workflow_with_assistant_with_mcp_server.py +5 -10
  46. codemie_test_harness/tests/workflow/assistant_tools/notification/test_workflow_with_assistant_notification_tools.py +3 -2
  47. codemie_test_harness/tests/workflow/assistant_tools/open_api/test_workflow_with_assistant_with_open_api_tools.py +3 -2
  48. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_data_management_tools_sql.py +3 -2
  49. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_open_api_tools.py +3 -2
  50. codemie_test_harness/tests/workflow/virtual_assistant_tools/cloud/test_workflow_with_cloud_tools.py +0 -7
  51. codemie_test_harness/tests/workflow/virtual_assistant_tools/codebase/test_workflow_with_codebase_tools.py +0 -1
  52. codemie_test_harness/tests/workflow/virtual_assistant_tools/data_management/test_workflow_with_data_management_tools.py +3 -5
  53. codemie_test_harness/tests/workflow/virtual_assistant_tools/file_management/test_workflow_with_file_management_tools.py +2 -9
  54. codemie_test_harness/tests/workflow/virtual_assistant_tools/mcp/test_workflow_with_mcp_server.py +5 -11
  55. codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/test_workflow_with_notification_tools.py +3 -3
  56. codemie_test_harness/tests/workflow/virtual_assistant_tools/open_api/test_workflow_with_open_api_tools.py +3 -3
  57. {codemie_test_harness-0.1.159.dist-info → codemie_test_harness-0.1.161.dist-info}/METADATA +2 -2
  58. {codemie_test_harness-0.1.159.dist-info → codemie_test_harness-0.1.161.dist-info}/RECORD +60 -57
  59. {codemie_test_harness-0.1.159.dist-info → codemie_test_harness-0.1.161.dist-info}/WHEEL +0 -0
  60. {codemie_test_harness-0.1.159.dist-info → codemie_test_harness-0.1.161.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,4 @@
1
- import os
1
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
2
2
 
3
3
  import pytest
4
4
 
@@ -89,29 +89,33 @@ GENERATE_IMAGE_TOOL_TASK = """
89
89
  """
90
90
 
91
91
  file_management_tools_test_data = [
92
- (
92
+ pytest.param(
93
93
  FileManagementTool.PYTHON_CODE_INTERPRETER,
94
94
  CODE_INTERPRETER_TOOL_TASK,
95
95
  RESPONSE_FOR_CODE_INTERPRETER,
96
+ id=FileManagementTool.PYTHON_CODE_INTERPRETER,
96
97
  ),
97
98
  pytest.param(
98
99
  FileManagementTool.LIST_DIRECTORY,
99
100
  LIST_DIR_TOOL_TASK,
100
101
  RESPONSE_FOR_LIST_DIR,
101
102
  marks=pytest.mark.skipif(
102
- os.getenv("ENV") == "local",
103
+ EnvironmentResolver.is_localhost(),
103
104
  reason="Skipping this test on local environment",
104
105
  ),
106
+ id=FileManagementTool.LIST_DIRECTORY,
105
107
  ),
106
- (
108
+ pytest.param(
107
109
  FileManagementTool.WRITE_FILE,
108
110
  WRITE_FILE_TASK,
109
111
  RESPONSE_FOR_WRITE_FILE_TASK,
112
+ id=FileManagementTool.WRITE_FILE,
110
113
  ),
111
- (
114
+ pytest.param(
112
115
  FileManagementTool.RUN_COMMAND_LINE,
113
116
  COMMAND_LINE_TOOL_TASK,
114
117
  RESPONSE_FOR_COMMAND_LINE_TASK,
118
+ id=FileManagementTool.RUN_COMMAND_LINE,
115
119
  ),
116
120
  ]
117
121
 
@@ -1,32 +1,30 @@
1
- import os
2
1
  from dataclasses import dataclass
3
2
  from typing import List
4
3
 
5
4
  import pytest
6
5
 
6
+ from codemie_test_harness.tests.utils.env_resolver import get_environment
7
+ from codemie_test_harness.tests.enums.environment import Environment
8
+
7
9
 
8
10
  @dataclass
9
11
  class EmbeddingData:
10
12
  """Data class to store Embedding models."""
11
13
 
12
14
  model_type: str
13
- environments: List[str]
14
-
15
+ environments: List[Environment]
15
16
 
16
- AZURE_ENVS = ["preview", "azure", "local"]
17
- GCP_ENVS = ["preview", "gcp", "local"]
18
- AWS_ENVS = ["preview", "aws", "local"]
19
17
 
20
18
  MODELS = [
21
- EmbeddingData("titan", AWS_ENVS),
22
- EmbeddingData("gecko", GCP_ENVS),
23
- EmbeddingData("ada-002", AZURE_ENVS),
19
+ EmbeddingData("titan", Environment.get_aws_environments()),
20
+ EmbeddingData("gecko", Environment.get_gcp_environments()),
21
+ EmbeddingData("ada-002", Environment.get_azure_environments()),
24
22
  ]
25
23
 
26
24
 
27
25
  def generate_test_data():
28
26
  """Generate pytest parameters for Embedding models"""
29
- env = os.getenv("ENV")
27
+ env = get_environment()
30
28
  test_data = []
31
29
 
32
30
  for model in MODELS:
@@ -35,7 +33,7 @@ def generate_test_data():
35
33
  model.model_type,
36
34
  marks=pytest.mark.skipif(
37
35
  env not in model.environments,
38
- reason=f"Skip on non {'/'.join(model.environments[:-1])} envs",
36
+ reason=f"Skip on non {'/'.join(str(env) for env in model.environments[:-1])} envs",
39
37
  ),
40
38
  )
41
39
  )
@@ -1,10 +1,9 @@
1
- import os
2
-
3
1
  import pytest
4
2
 
5
3
  from codemie_sdk.models.integration import CredentialTypes
6
4
  from codemie_test_harness.tests.enums.integrations import DataBaseDialect
7
5
  from codemie_test_harness.tests.utils.aws_parameters_store import CredentialsUtil
6
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
8
7
 
9
8
  valid_integrations = [
10
9
  pytest.param(
@@ -14,6 +13,7 @@ valid_integrations = [
14
13
  pytest.mark.aws,
15
14
  pytest.mark.cloud,
16
15
  ],
16
+ id=CredentialTypes.AWS,
17
17
  ),
18
18
  pytest.param(
19
19
  CredentialTypes.AZURE,
@@ -22,6 +22,7 @@ valid_integrations = [
22
22
  pytest.mark.azure,
23
23
  pytest.mark.cloud,
24
24
  ],
25
+ id=CredentialTypes.AZURE,
25
26
  ),
26
27
  pytest.param(
27
28
  CredentialTypes.GCP,
@@ -30,30 +31,35 @@ valid_integrations = [
30
31
  pytest.mark.gcp,
31
32
  pytest.mark.cloud,
32
33
  pytest.mark.skipif(
33
- os.getenv("ENV") == "azure",
34
+ EnvironmentResolver.is_azure(),
34
35
  reason="Still have an issue with encoding long strings",
35
36
  ),
36
37
  ],
38
+ id=CredentialTypes.GCP,
37
39
  ),
38
40
  pytest.param(
39
41
  CredentialTypes.SONAR,
40
42
  CredentialsUtil.sonar_credentials(),
41
43
  marks=pytest.mark.sonar,
44
+ id=f"{CredentialTypes.SONAR}_server",
42
45
  ),
43
46
  pytest.param(
44
47
  CredentialTypes.SONAR,
45
48
  CredentialsUtil.sonar_cloud_credentials(),
46
49
  marks=pytest.mark.sonar,
50
+ id=f"{CredentialTypes.SONAR}_cloud",
47
51
  ),
48
52
  pytest.param(
49
53
  CredentialTypes.GIT,
50
54
  CredentialsUtil.gitlab_credentials(),
51
55
  marks=pytest.mark.gitlab,
56
+ id=f"{CredentialTypes.GIT}_gitlab",
52
57
  ),
53
58
  pytest.param(
54
59
  CredentialTypes.GIT,
55
60
  CredentialsUtil.github_credentials(),
56
61
  marks=pytest.mark.github,
62
+ id=f"{CredentialTypes.GIT}_github",
57
63
  ),
58
64
  pytest.param(
59
65
  CredentialTypes.CONFLUENCE,
@@ -62,6 +68,7 @@ valid_integrations = [
62
68
  pytest.mark.confluence,
63
69
  pytest.mark.project_management,
64
70
  ],
71
+ id=f"{CredentialTypes.CONFLUENCE}_server",
65
72
  ),
66
73
  pytest.param(
67
74
  CredentialTypes.CONFLUENCE,
@@ -71,6 +78,7 @@ valid_integrations = [
71
78
  pytest.mark.confluence_cloud,
72
79
  pytest.mark.project_management,
73
80
  ],
81
+ id=f"{CredentialTypes.CONFLUENCE}_cloud",
74
82
  ),
75
83
  pytest.param(
76
84
  CredentialTypes.JIRA,
@@ -79,6 +87,7 @@ valid_integrations = [
79
87
  pytest.mark.jira,
80
88
  pytest.mark.project_management,
81
89
  ],
90
+ id=f"{CredentialTypes.JIRA}_server",
82
91
  ),
83
92
  pytest.param(
84
93
  CredentialTypes.JIRA,
@@ -88,36 +97,43 @@ valid_integrations = [
88
97
  pytest.mark.jira_cloud,
89
98
  pytest.mark.project_management,
90
99
  ],
100
+ id=f"{CredentialTypes.JIRA}_cloud",
91
101
  ),
92
102
  pytest.param(
93
103
  CredentialTypes.SQL,
94
104
  CredentialsUtil.sql_credentials(DataBaseDialect.POSTGRES),
95
105
  marks=pytest.mark.sql,
106
+ id=DataBaseDialect.POSTGRES,
96
107
  ),
97
108
  pytest.param(
98
109
  CredentialTypes.SQL,
99
110
  CredentialsUtil.sql_credentials(DataBaseDialect.MY_SQL),
100
111
  marks=pytest.mark.sql,
112
+ id=DataBaseDialect.MY_SQL,
101
113
  ),
102
114
  pytest.param(
103
115
  CredentialTypes.ELASTIC,
104
116
  CredentialsUtil.elastic_credentials(),
105
117
  marks=pytest.mark.elastic,
118
+ id=CredentialTypes.ELASTIC,
106
119
  ),
107
120
  pytest.param(
108
121
  CredentialTypes.MCP,
109
122
  CredentialsUtil.mcp_credentials(),
110
123
  marks=pytest.mark.mcp,
124
+ id=CredentialTypes.MCP,
111
125
  ),
112
126
  pytest.param(
113
127
  CredentialTypes.AZURE_DEVOPS,
114
128
  CredentialsUtil.azure_devops_credentials(),
115
129
  marks=pytest.mark.azure,
130
+ id=CredentialTypes.AZURE_DEVOPS,
116
131
  ),
117
132
  pytest.param(
118
133
  CredentialTypes.FILESYSTEM,
119
134
  CredentialsUtil.file_system_credentials(),
120
135
  marks=pytest.mark.file_system,
136
+ id=CredentialTypes.FILESYSTEM,
121
137
  ),
122
138
  pytest.param(
123
139
  CredentialTypes.EMAIL,
@@ -126,6 +142,7 @@ valid_integrations = [
126
142
  pytest.mark.notification,
127
143
  pytest.mark.email,
128
144
  ],
145
+ id=CredentialTypes.EMAIL,
129
146
  ),
130
147
  pytest.param(
131
148
  CredentialTypes.TELEGRAM,
@@ -134,16 +151,19 @@ valid_integrations = [
134
151
  pytest.mark.notification,
135
152
  pytest.mark.telegram,
136
153
  ],
154
+ id=CredentialTypes.TELEGRAM,
137
155
  ),
138
156
  pytest.param(
139
157
  CredentialTypes.SERVICE_NOW,
140
158
  CredentialsUtil.servicenow_credentials(),
141
159
  marks=pytest.mark.servicenow,
160
+ id=CredentialTypes.SERVICE_NOW,
142
161
  ),
143
162
  pytest.param(
144
163
  CredentialTypes.KEYCLOAK,
145
164
  CredentialsUtil.keycloak_credentials(),
146
165
  marks=pytest.mark.keycloak,
166
+ id=CredentialTypes.KEYCLOAK,
147
167
  ),
148
168
  pytest.param(
149
169
  CredentialTypes.KUBERNETES,
@@ -152,15 +172,17 @@ valid_integrations = [
152
172
  pytest.mark.kubernetes,
153
173
  pytest.mark.cloud,
154
174
  pytest.mark.skipif(
155
- os.getenv("ENV") == "azure",
175
+ EnvironmentResolver.is_azure(),
156
176
  reason="Still have an issue with encoding long strings",
157
177
  ),
158
178
  ],
179
+ id=CredentialTypes.KUBERNETES,
159
180
  ),
160
181
  pytest.param(
161
182
  CredentialTypes.REPORT_PORTAL,
162
183
  CredentialsUtil.report_portal_credentials(),
163
184
  marks=pytest.mark.report_portal,
185
+ id=CredentialTypes.REPORT_PORTAL,
164
186
  ),
165
187
  ]
166
188
 
@@ -172,6 +194,7 @@ testable_integrations = [
172
194
  pytest.mark.aws,
173
195
  pytest.mark.cloud,
174
196
  ],
197
+ id=CredentialTypes.AWS,
175
198
  ),
176
199
  pytest.param(
177
200
  CredentialTypes.AZURE,
@@ -180,6 +203,7 @@ testable_integrations = [
180
203
  pytest.mark.azure,
181
204
  pytest.mark.cloud,
182
205
  ],
206
+ id=CredentialTypes.AZURE,
183
207
  ),
184
208
  pytest.param(
185
209
  CredentialTypes.GCP,
@@ -188,20 +212,23 @@ testable_integrations = [
188
212
  pytest.mark.gcp,
189
213
  pytest.mark.cloud,
190
214
  pytest.mark.skipif(
191
- os.getenv("ENV") == "azure",
215
+ EnvironmentResolver.is_azure(),
192
216
  reason="Still have an issue with encoding long strings",
193
217
  ),
194
218
  ],
219
+ id=CredentialTypes.GCP,
195
220
  ),
196
221
  pytest.param(
197
222
  CredentialTypes.SONAR,
198
223
  CredentialsUtil.sonar_credentials(),
199
224
  marks=pytest.mark.sonar,
225
+ id=f"{CredentialTypes.SONAR}_server",
200
226
  ),
201
227
  pytest.param(
202
228
  CredentialTypes.SONAR,
203
229
  CredentialsUtil.sonar_cloud_credentials(),
204
230
  marks=pytest.mark.sonar,
231
+ id=f"{CredentialTypes.SONAR}_cloud",
205
232
  ),
206
233
  pytest.param(
207
234
  CredentialTypes.CONFLUENCE,
@@ -210,6 +237,7 @@ testable_integrations = [
210
237
  pytest.mark.confluence,
211
238
  pytest.mark.project_management,
212
239
  ],
240
+ id=f"{CredentialTypes.CONFLUENCE}_server",
213
241
  ),
214
242
  pytest.param(
215
243
  CredentialTypes.CONFLUENCE,
@@ -219,6 +247,7 @@ testable_integrations = [
219
247
  pytest.mark.confluence_cloud,
220
248
  pytest.mark.project_management,
221
249
  ],
250
+ id=f"{CredentialTypes.CONFLUENCE}_cloud",
222
251
  ),
223
252
  pytest.param(
224
253
  CredentialTypes.JIRA,
@@ -227,6 +256,7 @@ testable_integrations = [
227
256
  pytest.mark.jira,
228
257
  pytest.mark.project_management,
229
258
  ],
259
+ id=f"{CredentialTypes.JIRA}_server",
230
260
  ),
231
261
  pytest.param(
232
262
  CredentialTypes.JIRA,
@@ -236,6 +266,7 @@ testable_integrations = [
236
266
  pytest.mark.jira_cloud,
237
267
  pytest.mark.project_management,
238
268
  ],
269
+ id=f"{CredentialTypes.JIRA}_cloud",
239
270
  ),
240
271
  pytest.param(
241
272
  CredentialTypes.EMAIL,
@@ -244,15 +275,17 @@ testable_integrations = [
244
275
  pytest.mark.email,
245
276
  pytest.mark.notification,
246
277
  pytest.mark.skipif(
247
- os.getenv("ENV") == "local",
278
+ EnvironmentResolver.is_localhost(),
248
279
  reason="Skipping this test on local environment",
249
280
  ),
250
281
  ],
282
+ id=CredentialTypes.EMAIL,
251
283
  ),
252
284
  pytest.param(
253
285
  CredentialTypes.SERVICE_NOW,
254
286
  CredentialsUtil.servicenow_credentials(),
255
287
  marks=pytest.mark.servicenow,
288
+ id=CredentialTypes.SERVICE_NOW,
256
289
  ),
257
290
  pytest.param(
258
291
  CredentialTypes.KUBERNETES,
@@ -261,15 +294,17 @@ testable_integrations = [
261
294
  pytest.mark.kubernetes,
262
295
  pytest.mark.cloud,
263
296
  pytest.mark.skipif(
264
- os.getenv("ENV") == "azure",
297
+ EnvironmentResolver.is_azure(),
265
298
  reason="Still have an issue with encoding long strings",
266
299
  ),
267
300
  ],
301
+ id=CredentialTypes.KUBERNETES,
268
302
  ),
269
303
  pytest.param(
270
304
  CredentialTypes.REPORT_PORTAL,
271
305
  CredentialsUtil.report_portal_credentials(),
272
306
  marks=pytest.mark.report_portal,
307
+ id=CredentialTypes.REPORT_PORTAL,
273
308
  ),
274
309
  ]
275
310
 
@@ -282,6 +317,7 @@ invalid_integrations = [
282
317
  pytest.mark.aws,
283
318
  pytest.mark.cloud,
284
319
  ],
320
+ id=CredentialTypes.AWS,
285
321
  ),
286
322
  pytest.param(
287
323
  CredentialTypes.AZURE,
@@ -291,6 +327,7 @@ invalid_integrations = [
291
327
  pytest.mark.azure,
292
328
  pytest.mark.cloud,
293
329
  ],
330
+ id=CredentialTypes.AZURE,
294
331
  ),
295
332
  pytest.param(
296
333
  CredentialTypes.GCP,
@@ -302,22 +339,25 @@ invalid_integrations = [
302
339
  pytest.mark.gcp,
303
340
  pytest.mark.cloud,
304
341
  pytest.mark.skipif(
305
- os.getenv("ENV") == "azure",
342
+ EnvironmentResolver.is_azure(),
306
343
  reason="Still have an issue with encoding long strings",
307
344
  ),
308
345
  ],
346
+ id=CredentialTypes.GCP,
309
347
  ),
310
348
  pytest.param(
311
349
  CredentialTypes.SONAR,
312
350
  CredentialsUtil.invalid_sonar_credentials(),
313
351
  "Invalid token",
314
352
  marks=pytest.mark.sonar,
353
+ id=f"{CredentialTypes.SONAR}_server",
315
354
  ),
316
355
  pytest.param(
317
356
  CredentialTypes.SONAR,
318
357
  CredentialsUtil.invalid_sonar_cloud_credentials(),
319
358
  "Invalid token",
320
359
  marks=pytest.mark.sonar,
360
+ id=f"{CredentialTypes.SONAR}_cloud",
321
361
  ),
322
362
  pytest.param(
323
363
  CredentialTypes.EMAIL,
@@ -327,28 +367,32 @@ invalid_integrations = [
327
367
  pytest.mark.email,
328
368
  pytest.mark.notification,
329
369
  pytest.mark.skipif(
330
- os.getenv("ENV") == "local",
370
+ EnvironmentResolver.is_localhost(),
331
371
  reason="Skipping this test on local environment",
332
372
  ),
333
373
  ],
374
+ id=CredentialTypes.EMAIL,
334
375
  ),
335
376
  pytest.param(
336
377
  CredentialTypes.JIRA,
337
378
  CredentialsUtil.invalid_jira_credentials(),
338
379
  "Unauthorized (401)",
339
380
  marks=pytest.mark.jira,
381
+ id=CredentialTypes.JIRA,
340
382
  ),
341
383
  pytest.param(
342
384
  CredentialTypes.CONFLUENCE,
343
385
  CredentialsUtil.invalid_confluence_credentials(),
344
386
  "Access denied",
345
387
  marks=pytest.mark.confluence,
388
+ id=CredentialTypes.CONFLUENCE,
346
389
  ),
347
390
  pytest.param(
348
391
  CredentialTypes.SERVICE_NOW,
349
392
  CredentialsUtil.invalid_servicenow_credentials(),
350
393
  'ServiceNow tool exception. Status: 401. Response: {"error":{"message":"User Not Authenticated","detail":"Required to provide Auth information"}',
351
394
  marks=pytest.mark.servicenow,
395
+ id=CredentialTypes.SERVICE_NOW,
352
396
  ),
353
397
  pytest.param(
354
398
  CredentialTypes.KUBERNETES,
@@ -358,11 +402,13 @@ invalid_integrations = [
358
402
  pytest.mark.kubernetes,
359
403
  pytest.mark.cloud,
360
404
  ],
405
+ id=CredentialTypes.KUBERNETES,
361
406
  ),
362
407
  pytest.param(
363
408
  CredentialTypes.REPORT_PORTAL,
364
409
  CredentialsUtil.invalid_report_portal_credentials(),
365
410
  "401 Client Error: for url: https://report-portal.core.kuberocketci.io/api/v1/epm-cdme/launch?page.page=1",
366
411
  marks=pytest.mark.report_portal,
412
+ id=CredentialTypes.REPORT_PORTAL,
367
413
  ),
368
414
  ]
@@ -3,6 +3,7 @@
3
3
  from dataclasses import dataclass
4
4
  from typing import List
5
5
  from codemie_test_harness.tests.enums.model_types import ModelTypes
6
+ from codemie_test_harness.tests.enums.environment import Environment
6
7
 
7
8
 
8
9
  @dataclass
@@ -10,14 +11,14 @@ class LlmResponseData:
10
11
  """Data class to store LLM model response information."""
11
12
 
12
13
  model_type: ModelTypes
13
- environments: List[str]
14
+ environments: List[Environment]
14
15
 
15
16
 
16
- # Define environment sets for common groups
17
- AZURE_ENVS = ["preview", "azure", "local"]
18
- GCP_ENVS = ["preview", "gcp", "local"]
19
- AWS_ENVS = ["preview", "aws", "local"]
20
- OTHER_ENVS = ["preview", "local"]
17
+ # Define environment sets using centralized enum methods (type-safe enums)
18
+ AZURE_ENVS = Environment.get_azure_environments()
19
+ GCP_ENVS = Environment.get_gcp_environments()
20
+ AWS_ENVS = Environment.get_aws_environments()
21
+ OTHER_ENVS = [Environment.PREVIEW, Environment.LOCALHOST]
21
22
 
22
23
  # Define model responses with their environment restrictions
23
24
  MODEL_RESPONSES = [
@@ -52,6 +53,7 @@ MODEL_RESPONSES = [
52
53
  LlmResponseData(ModelTypes.CLAUDE_4_SONNET, AWS_ENVS),
53
54
  LlmResponseData(ModelTypes.CLAUDE_4_OPUS, AWS_ENVS),
54
55
  LlmResponseData(ModelTypes.CLAUDE_4_1_OPUS, AWS_ENVS),
56
+ LlmResponseData(ModelTypes.CLAUDE_4_SONNET_1M, [Environment.AWS]),
55
57
  # Other LLMs test data
56
58
  LlmResponseData(ModelTypes.RLAB_QWQ_32B, OTHER_ENVS),
57
59
  LlmResponseData(ModelTypes.DEEPSEEK_R1, OTHER_ENVS),
@@ -20,6 +20,7 @@ pm_tools_test_data = [
20
20
  JIRA_TOOL_PROMPT,
21
21
  RESPONSE_FOR_JIRA_TOOL,
22
22
  marks=pytest.mark.jira,
23
+ id=ProjectManagementIntegrationType.JIRA,
23
24
  ),
24
25
  pytest.param(
25
26
  ProjectManagementTool.CONFLUENCE,
@@ -27,6 +28,7 @@ pm_tools_test_data = [
27
28
  CONFLUENCE_TOOL_PROMPT,
28
29
  RESPONSE_FOR_CONFLUENCE_TOOL,
29
30
  marks=pytest.mark.confluence,
31
+ id=ProjectManagementIntegrationType.CONFLUENCE,
30
32
  ),
31
33
  pytest.param(
32
34
  ProjectManagementTool.JIRA,
@@ -34,6 +36,7 @@ pm_tools_test_data = [
34
36
  JIRA_CLOUD_TOOL_PROMPT,
35
37
  RESPONSE_FOR_JIRA_CLOUD_TOOL,
36
38
  marks=[pytest.mark.jira, pytest.mark.jira_cloud],
39
+ id=ProjectManagementIntegrationType.JIRA_CLOUD,
37
40
  ),
38
41
  pytest.param(
39
42
  ProjectManagementTool.CONFLUENCE,
@@ -41,5 +44,6 @@ pm_tools_test_data = [
41
44
  CONFLUENCE_CLOUD_TOOL_PROMPT,
42
45
  RESPONSE_FOR_CONFLUENCE_CLOUD_TOOL,
43
46
  marks=[pytest.mark.confluence, pytest.mark.confluence_cloud],
47
+ id=ProjectManagementIntegrationType.CONFLUENCE_CLOUD,
44
48
  ),
45
49
  ]
@@ -2,6 +2,7 @@ import os
2
2
 
3
3
  import pytest
4
4
 
5
+ from codemie_sdk.models.integration import CredentialTypes
5
6
  from codemie_test_harness.tests.enums.tools import VcsTool
6
7
 
7
8
  GITHUB_TOOL_TASK = (
@@ -60,9 +61,17 @@ RESPONSE_FOR_GITLAB = f"""
60
61
 
61
62
  vcs_tools_test_data = [
62
63
  pytest.param(
63
- VcsTool.GITHUB, GITHUB_TOOL_TASK, RESPONSE_FOR_GITHUB, marks=pytest.mark.github
64
+ VcsTool.GITHUB,
65
+ GITHUB_TOOL_TASK,
66
+ RESPONSE_FOR_GITHUB,
67
+ marks=pytest.mark.github,
68
+ id=f"{CredentialTypes.GIT}_github",
64
69
  ),
65
70
  pytest.param(
66
- VcsTool.GITLAB, GITLAB_TOOL_TASK, RESPONSE_FOR_GITLAB, marks=pytest.mark.gitlab
71
+ VcsTool.GITLAB,
72
+ GITLAB_TOOL_TASK,
73
+ RESPONSE_FOR_GITLAB,
74
+ marks=pytest.mark.gitlab,
75
+ id=f"{CredentialTypes.GIT}_gitlab",
67
76
  ),
68
77
  ]
@@ -11,6 +11,7 @@ from botocore.exceptions import ClientError
11
11
 
12
12
  from codemie_sdk.models.integration import CredentialValues
13
13
  from codemie_test_harness.tests.enums.integrations import DataBaseDialect
14
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
14
15
 
15
16
 
16
17
  class AwsParameterStore:
@@ -419,7 +420,7 @@ class CredentialsUtil:
419
420
  elastic_creds = AwsParameterStore.get_cloud_provider_credentials("elastic")
420
421
  elastic_creds = (
421
422
  elastic_creds.get("elasticsearch", {})
422
- if os.getenv("ENV") == "preview"
423
+ if EnvironmentResolver.is_preview()
423
424
  else elastic_creds.get("sandbox", {}).get("elasticsearch", {})
424
425
  )
425
426
  return [
@@ -434,7 +435,9 @@ class CredentialsUtil:
434
435
  def sql_credentials(db_dialect: DataBaseDialect) -> List[CredentialValues]:
435
436
  sql_creds = AwsParameterStore.get_cloud_provider_credentials("sql")
436
437
  sql_creds = (
437
- sql_creds if os.getenv("ENV") == "preview" else sql_creds.get("sandbox", {})
438
+ sql_creds
439
+ if EnvironmentResolver.is_preview()
440
+ else sql_creds.get("sandbox", {})
438
441
  )
439
442
 
440
443
  return [
@@ -620,6 +623,24 @@ class CredentialsUtil:
620
623
  cred.value = "wrong_key"
621
624
  return credentials
622
625
 
626
+ @staticmethod
627
+ def lite_llm_credentials() -> List[CredentialValues]:
628
+ lite_llm_creds = AwsParameterStore.get_cloud_provider_credentials("litellm")
629
+ return [
630
+ CredentialValues(
631
+ key="api_key",
632
+ value=lite_llm_creds.get("api_key"),
633
+ ),
634
+ ]
635
+
636
+ @staticmethod
637
+ def invalid_lite_llm_credentials() -> List[CredentialValues]:
638
+ credentials = CredentialsUtil.lite_llm_credentials()
639
+ for cred in credentials:
640
+ if cred.key == "api_key":
641
+ cred.value = "wrong_key"
642
+ return credentials
643
+
623
644
  @staticmethod
624
645
  def jira_cloud_jql() -> str:
625
646
  jira_creds = AwsParameterStore.get_cloud_provider_credentials("jira")
@@ -4,7 +4,7 @@ from pathlib import Path
4
4
  from codemie_test_harness.tests.enums.tools import VcsTool, NotificationTool
5
5
 
6
6
 
7
- class ProjectManagementIntegrationType(Enum):
7
+ class ProjectManagementIntegrationType(str, Enum):
8
8
  JIRA = "jira"
9
9
  CONFLUENCE = "confluence"
10
10
  JIRA_CLOUD = "jira_cloud"