codemie-test-harness 0.1.159__py3-none-any.whl → 0.1.161__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of codemie-test-harness might be problematic. Click here for more details.

Files changed (60) hide show
  1. codemie_test_harness/tests/assistant/datasource/test_confluence_datasource.py +2 -1
  2. codemie_test_harness/tests/assistant/datasource/test_jira_datasource.py +2 -1
  3. codemie_test_harness/tests/assistant/tools/cloud/test_cloud_tools.py +0 -7
  4. codemie_test_harness/tests/assistant/tools/codebase/test_codebase_tools.py +0 -1
  5. codemie_test_harness/tests/assistant/tools/datamanagement/test_assistant_with_data_management_tools.py +4 -5
  6. codemie_test_harness/tests/assistant/tools/filemanagement/test_assistant_with_file_management_tools.py +2 -9
  7. codemie_test_harness/tests/assistant/tools/mcp/test_cli_mcp_server.py +5 -7
  8. codemie_test_harness/tests/assistant/tools/mcp/test_mcp_servers.py +5 -7
  9. codemie_test_harness/tests/assistant/tools/notification/test_assistant_notification_tools.py +3 -3
  10. codemie_test_harness/tests/assistant/tools/openapi/test_assistant_with_open_api_tools.py +3 -2
  11. codemie_test_harness/tests/conftest.py +6 -2
  12. codemie_test_harness/tests/enums/environment.py +102 -0
  13. codemie_test_harness/tests/enums/model_types.py +1 -0
  14. codemie_test_harness/tests/integrations/project/test_default_integrations.py +3 -11
  15. codemie_test_harness/tests/integrations/project/test_project_integrations.py +0 -132
  16. codemie_test_harness/tests/integrations/user/test_default_integrations.py +3 -11
  17. codemie_test_harness/tests/integrations/user/test_user_integrations.py +0 -132
  18. codemie_test_harness/tests/llm/assistants/test_lite_llm.py +96 -0
  19. codemie_test_harness/tests/llm/assistants/test_llm.py +9 -9
  20. codemie_test_harness/tests/service/test_assistant_service.py +2 -2
  21. codemie_test_harness/tests/test_data/cloud_tools_test_data.py +32 -11
  22. codemie_test_harness/tests/test_data/codebase_tools_test_data.py +2 -0
  23. codemie_test_harness/tests/test_data/data_management_tools_test_data.py +3 -3
  24. codemie_test_harness/tests/test_data/direct_tools/cloud_tools_test_data.py +7 -4
  25. codemie_test_harness/tests/test_data/direct_tools/codebase_tools_test_data.py +2 -0
  26. codemie_test_harness/tests/test_data/direct_tools/data_management_tools_test_data.py +4 -5
  27. codemie_test_harness/tests/test_data/direct_tools/file_management_tools_test_data.py +2 -2
  28. codemie_test_harness/tests/test_data/direct_tools/notification_tools_test_data.py +5 -2
  29. codemie_test_harness/tests/test_data/direct_tools/project_management_tools_test_data.py +2 -0
  30. codemie_test_harness/tests/test_data/direct_tools/research_tools_test_data.py +1 -0
  31. codemie_test_harness/tests/test_data/direct_tools/vcs_tools_test_data.py +3 -0
  32. codemie_test_harness/tests/test_data/file_management_tools_test_data.py +9 -5
  33. codemie_test_harness/tests/test_data/index_test_data.py +9 -11
  34. codemie_test_harness/tests/test_data/integrations_test_data.py +55 -9
  35. codemie_test_harness/tests/test_data/llm_test_data.py +8 -6
  36. codemie_test_harness/tests/test_data/project_management_test_data.py +4 -0
  37. codemie_test_harness/tests/test_data/vcs_tools_test_data.py +11 -2
  38. codemie_test_harness/tests/utils/aws_parameters_store.py +23 -2
  39. codemie_test_harness/tests/utils/constants.py +1 -1
  40. codemie_test_harness/tests/utils/env_resolver.py +119 -0
  41. codemie_test_harness/tests/workflow/assistant_tools/cloud/test_workflow_with_assistant_cloud_tools.py +0 -7
  42. codemie_test_harness/tests/workflow/assistant_tools/codebase/test_worfklow_with_assistant_codebase_tools.py +0 -1
  43. codemie_test_harness/tests/workflow/assistant_tools/data_management/test_workflow_with_assistant_with_data_management_tools.py +3 -5
  44. codemie_test_harness/tests/workflow/assistant_tools/file_management/test_workflow_with_assistant_with_file_management_tools.py +2 -9
  45. codemie_test_harness/tests/workflow/assistant_tools/mcp/test_workflow_with_assistant_with_mcp_server.py +5 -10
  46. codemie_test_harness/tests/workflow/assistant_tools/notification/test_workflow_with_assistant_notification_tools.py +3 -2
  47. codemie_test_harness/tests/workflow/assistant_tools/open_api/test_workflow_with_assistant_with_open_api_tools.py +3 -2
  48. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_data_management_tools_sql.py +3 -2
  49. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_open_api_tools.py +3 -2
  50. codemie_test_harness/tests/workflow/virtual_assistant_tools/cloud/test_workflow_with_cloud_tools.py +0 -7
  51. codemie_test_harness/tests/workflow/virtual_assistant_tools/codebase/test_workflow_with_codebase_tools.py +0 -1
  52. codemie_test_harness/tests/workflow/virtual_assistant_tools/data_management/test_workflow_with_data_management_tools.py +3 -5
  53. codemie_test_harness/tests/workflow/virtual_assistant_tools/file_management/test_workflow_with_file_management_tools.py +2 -9
  54. codemie_test_harness/tests/workflow/virtual_assistant_tools/mcp/test_workflow_with_mcp_server.py +5 -11
  55. codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/test_workflow_with_notification_tools.py +3 -3
  56. codemie_test_harness/tests/workflow/virtual_assistant_tools/open_api/test_workflow_with_open_api_tools.py +3 -3
  57. {codemie_test_harness-0.1.159.dist-info → codemie_test_harness-0.1.161.dist-info}/METADATA +2 -2
  58. {codemie_test_harness-0.1.159.dist-info → codemie_test_harness-0.1.161.dist-info}/RECORD +60 -57
  59. {codemie_test_harness-0.1.159.dist-info → codemie_test_harness-0.1.161.dist-info}/WHEEL +0 -0
  60. {codemie_test_harness-0.1.159.dist-info → codemie_test_harness-0.1.161.dist-info}/entry_points.txt +0 -0
@@ -7,7 +7,6 @@ from codemie_sdk.models.integration import (
7
7
  Integration,
8
8
  IntegrationTestRequest,
9
9
  IntegrationTestResponse,
10
- CredentialTypes,
11
10
  )
12
11
  from codemie_test_harness.tests import PROJECT
13
12
  from codemie_test_harness.tests.test_data.integrations_test_data import (
@@ -28,31 +27,6 @@ from codemie_test_harness.tests.utils.base_utils import (
28
27
  @pytest.mark.parametrize(
29
28
  "credential_type, credentials",
30
29
  valid_integrations,
31
- ids=[
32
- f"User integration: {CredentialTypes.AWS}",
33
- f"User integration: {CredentialTypes.AZURE}",
34
- f"User integration: {CredentialTypes.GCP}",
35
- f"User integration: {CredentialTypes.SONAR}",
36
- f"User integration: {CredentialTypes.SONAR}",
37
- f"User integration: {CredentialTypes.GIT}",
38
- f"User integration: {CredentialTypes.GIT}",
39
- f"User integration: {CredentialTypes.CONFLUENCE}",
40
- f"User integration: {CredentialTypes.CONFLUENCE}",
41
- f"User integration: {CredentialTypes.JIRA}",
42
- f"User integration: {CredentialTypes.JIRA}",
43
- f"User integration: {CredentialTypes.SQL}",
44
- f"User integration: {CredentialTypes.SQL}",
45
- f"User integration: {CredentialTypes.ELASTIC}",
46
- f"User integration: {CredentialTypes.MCP}",
47
- f"User integration: {CredentialTypes.AZURE_DEVOPS}",
48
- f"User integration: {CredentialTypes.FILESYSTEM}",
49
- f"User integration: {CredentialTypes.EMAIL}",
50
- f"User integration: {CredentialTypes.TELEGRAM}",
51
- f"User integration: {CredentialTypes.SERVICE_NOW}",
52
- f"User integration: {CredentialTypes.KUBERNETES}",
53
- f"User integration: {CredentialTypes.KEYCLOAK}",
54
- f"User integration: {CredentialTypes.REPORT_PORTAL}",
55
- ],
56
30
  )
57
31
  def test_integration_created(
58
32
  credential_type, credentials, general_integration, integration_utils
@@ -86,21 +60,6 @@ def test_integration_created(
86
60
  @pytest.mark.parametrize(
87
61
  "credential_type, credentials",
88
62
  testable_integrations,
89
- ids=[
90
- f"User integration: {CredentialTypes.AWS}",
91
- f"User integration: {CredentialTypes.AZURE}",
92
- f"User integration: {CredentialTypes.GCP}",
93
- f"User integration: {CredentialTypes.SONAR}",
94
- f"User integration: {CredentialTypes.SONAR}",
95
- f"User integration: {CredentialTypes.CONFLUENCE}",
96
- f"User integration: {CredentialTypes.CONFLUENCE}",
97
- f"User integration: {CredentialTypes.JIRA}",
98
- f"User integration: {CredentialTypes.JIRA}",
99
- f"User integration: {CredentialTypes.EMAIL}",
100
- f"User integration: {CredentialTypes.SERVICE_NOW}",
101
- f"User integration: {CredentialTypes.KUBERNETES}",
102
- f"User integration: {CredentialTypes.REPORT_PORTAL}",
103
- ],
104
63
  )
105
64
  def test_integration_after_creation(
106
65
  credential_type, credentials, general_integration, integration_utils
@@ -139,31 +98,6 @@ def test_integration_after_creation(
139
98
  @pytest.mark.parametrize(
140
99
  "credential_type, credentials",
141
100
  valid_integrations,
142
- ids=[
143
- f"User integration: {CredentialTypes.AWS}",
144
- f"User integration: {CredentialTypes.AZURE}",
145
- f"User integration: {CredentialTypes.GCP}",
146
- f"User integration: {CredentialTypes.SONAR}",
147
- f"User integration: {CredentialTypes.SONAR}",
148
- f"User integration: {CredentialTypes.GIT}",
149
- f"User integration: {CredentialTypes.GIT}",
150
- f"User integration: {CredentialTypes.CONFLUENCE}",
151
- f"User integration: {CredentialTypes.CONFLUENCE}",
152
- f"User integration: {CredentialTypes.JIRA}",
153
- f"User integration: {CredentialTypes.JIRA}",
154
- f"User integration: {CredentialTypes.SQL}",
155
- f"User integration: {CredentialTypes.SQL}",
156
- f"User integration: {CredentialTypes.ELASTIC}",
157
- f"User integration: {CredentialTypes.MCP}",
158
- f"User integration: {CredentialTypes.AZURE_DEVOPS}",
159
- f"User integration: {CredentialTypes.FILESYSTEM}",
160
- f"User integration: {CredentialTypes.EMAIL}",
161
- f"User integration: {CredentialTypes.TELEGRAM}",
162
- f"User integration: {CredentialTypes.SERVICE_NOW}",
163
- f"User integration: {CredentialTypes.KUBERNETES}",
164
- f"User integration: {CredentialTypes.KEYCLOAK}",
165
- f"User integration: {CredentialTypes.REPORT_PORTAL}",
166
- ],
167
101
  )
168
102
  def test_update_integration(
169
103
  credential_type, credentials, general_integration, integration_utils
@@ -209,19 +143,6 @@ def test_update_integration(
209
143
  @pytest.mark.parametrize(
210
144
  "credential_type, credentials, error_message",
211
145
  invalid_integrations,
212
- ids=[
213
- f"User integration: {CredentialTypes.AWS}",
214
- f"User integration: {CredentialTypes.AZURE}",
215
- f"User integration: {CredentialTypes.GCP}",
216
- f"User integration: {CredentialTypes.SONAR}",
217
- f"User integration: {CredentialTypes.SONAR}",
218
- f"User integration: {CredentialTypes.EMAIL}",
219
- f"User integration: {CredentialTypes.JIRA}",
220
- f"User integration: {CredentialTypes.CONFLUENCE}",
221
- f"User integration: {CredentialTypes.SERVICE_NOW}",
222
- f"User integration: {CredentialTypes.KUBERNETES}",
223
- f"User integration: {CredentialTypes.REPORT_PORTAL}",
224
- ],
225
146
  )
226
147
  def test_integration_with_invalid_credentials(
227
148
  credential_type, credentials, error_message, integration_utils
@@ -263,21 +184,6 @@ def test_integration_with_invalid_credentials(
263
184
  @pytest.mark.parametrize(
264
185
  "credential_type, credentials",
265
186
  testable_integrations,
266
- ids=[
267
- f"User integration: {CredentialTypes.AWS}",
268
- f"User integration: {CredentialTypes.AZURE}",
269
- f"User integration: {CredentialTypes.GCP}",
270
- f"User integration: {CredentialTypes.SONAR}",
271
- f"User integration: {CredentialTypes.SONAR}",
272
- f"User integration: {CredentialTypes.CONFLUENCE}",
273
- f"User integration: {CredentialTypes.CONFLUENCE}",
274
- f"User integration: {CredentialTypes.JIRA}",
275
- f"User integration: {CredentialTypes.JIRA}",
276
- f"User integration: {CredentialTypes.EMAIL}",
277
- f"User integration: {CredentialTypes.SERVICE_NOW}",
278
- f"User integration: {CredentialTypes.KUBERNETES}",
279
- f"User integration: {CredentialTypes.REPORT_PORTAL}",
280
- ],
281
187
  )
282
188
  def test_integration_during_creation(credential_type, credentials, integration_utils):
283
189
  integration_test_model = IntegrationTestRequest(
@@ -302,19 +208,6 @@ def test_integration_during_creation(credential_type, credentials, integration_u
302
208
  @pytest.mark.parametrize(
303
209
  "credential_type, credentials, error_message",
304
210
  invalid_integrations,
305
- ids=[
306
- f"User integration: {CredentialTypes.AWS}",
307
- f"User integration: {CredentialTypes.AZURE}",
308
- f"User integration: {CredentialTypes.GCP}",
309
- f"User integration: {CredentialTypes.SONAR}",
310
- f"User integration: {CredentialTypes.SONAR}",
311
- f"User integration: {CredentialTypes.EMAIL}",
312
- f"User integration: {CredentialTypes.JIRA}",
313
- f"User integration: {CredentialTypes.CONFLUENCE}",
314
- f"User integration: {CredentialTypes.SERVICE_NOW}",
315
- f"User integration: {CredentialTypes.KUBERNETES}",
316
- f"User integration: {CredentialTypes.REPORT_PORTAL}",
317
- ],
318
211
  )
319
212
  def test_integration_during_creation_with_invalid_credentials(
320
213
  credential_type, credentials, error_message, integration_utils
@@ -338,31 +231,6 @@ def test_integration_during_creation_with_invalid_credentials(
338
231
  @pytest.mark.parametrize(
339
232
  "credential_type, credentials",
340
233
  valid_integrations,
341
- ids=[
342
- f"User integration: {CredentialTypes.AWS}",
343
- f"User integration: {CredentialTypes.AZURE}",
344
- f"User integration: {CredentialTypes.GCP}",
345
- f"User integration: {CredentialTypes.SONAR}",
346
- f"User integration: {CredentialTypes.SONAR}",
347
- f"User integration: {CredentialTypes.GIT}",
348
- f"User integration: {CredentialTypes.GIT}",
349
- f"User integration: {CredentialTypes.CONFLUENCE}",
350
- f"User integration: {CredentialTypes.CONFLUENCE}",
351
- f"User integration: {CredentialTypes.JIRA}",
352
- f"User integration: {CredentialTypes.JIRA}",
353
- f"User integration: {CredentialTypes.SQL}",
354
- f"User integration: {CredentialTypes.SQL}",
355
- f"User integration: {CredentialTypes.ELASTIC}",
356
- f"User integration: {CredentialTypes.MCP}",
357
- f"User integration: {CredentialTypes.AZURE_DEVOPS}",
358
- f"User integration: {CredentialTypes.FILESYSTEM}",
359
- f"User integration: {CredentialTypes.EMAIL}",
360
- f"User integration: {CredentialTypes.TELEGRAM}",
361
- f"User integration: {CredentialTypes.SERVICE_NOW}",
362
- f"User integration: {CredentialTypes.KUBERNETES}",
363
- f"User integration: {CredentialTypes.KEYCLOAK}",
364
- f"User integration: {CredentialTypes.REPORT_PORTAL}",
365
- ],
366
234
  )
367
235
  def test_delete_integration(
368
236
  credential_type, credentials, general_integration, integration_utils
@@ -0,0 +1,96 @@
1
+ import pytest
2
+ from codemie_sdk.models.integration import CredentialTypes
3
+ from hamcrest import assert_that, has_item
4
+
5
+ from codemie_test_harness.tests.enums.model_types import ModelTypes
6
+ from codemie_test_harness.tests.test_data.llm_test_data import MODEL_RESPONSES
7
+ from codemie_test_harness.tests.utils.aws_parameters_store import CredentialsUtil
8
+ from codemie_test_harness.tests.utils.client_factory import get_client
9
+ from codemie_test_harness.tests.utils.constants import test_project_name
10
+ from codemie_test_harness.tests.utils.env_resolver import get_environment
11
+ from codemie_test_harness.tests.utils.pytest_utils import check_mark
12
+
13
+
14
+ @pytest.fixture(scope="module")
15
+ def lite_llm_integration(integration_utils):
16
+ credential_values = CredentialsUtil.lite_llm_credentials()
17
+ integration = integration_utils.create_user_integration(
18
+ CredentialTypes.LITE_LLM,
19
+ credential_values,
20
+ )
21
+ yield integration
22
+ if integration:
23
+ integration_utils.delete_integration(integration)
24
+
25
+
26
+ @pytest.fixture(scope="function")
27
+ def invalid_lite_llm_integration(integration_utils):
28
+ credential_values = CredentialsUtil.invalid_lite_llm_credentials()
29
+ integration = integration_utils.create_user_integration(
30
+ CredentialTypes.LITE_LLM,
31
+ credential_values,
32
+ project_name=test_project_name,
33
+ )
34
+ yield integration
35
+ if integration:
36
+ integration_utils.delete_integration(integration)
37
+
38
+
39
+ def pytest_generate_tests(metafunc):
40
+ if "model_type" in metafunc.fixturenames:
41
+ is_smoke = check_mark(metafunc, "smoke")
42
+ test_data = []
43
+ env = get_environment()
44
+ if is_smoke:
45
+ available_models = get_client().llms.list()
46
+ for model in available_models:
47
+ test_data.append(pytest.param(model.base_name))
48
+ else:
49
+ for model_data in MODEL_RESPONSES:
50
+ test_data.append(
51
+ pytest.param(
52
+ model_data.model_type,
53
+ marks=pytest.mark.skipif(
54
+ env not in model_data.environments,
55
+ reason=f"Skip on non {'/'.join(str(env) for env in model_data.environments[:-1])} envs",
56
+ ),
57
+ )
58
+ )
59
+
60
+ metafunc.parametrize("model_type", test_data)
61
+
62
+
63
+ @pytest.mark.assistant
64
+ @pytest.mark.lite_llm
65
+ @pytest.mark.regression
66
+ def test_assistant_with_different_models_in_lite_llm(
67
+ llm_utils,
68
+ lite_llm_integration,
69
+ assistant_utils,
70
+ model_type,
71
+ similarity_check,
72
+ ):
73
+ assert_that(
74
+ [row.base_name for row in llm_utils.list_llm_models()],
75
+ has_item(model_type),
76
+ f"{model_type} is missing in backend response",
77
+ )
78
+ assistant = assistant_utils.create_assistant(model_type)
79
+ response = assistant_utils.ask_assistant(assistant, "Just say one word: 'Hello'")
80
+
81
+ if model_type in [ModelTypes.DEEPSEEK_R1, ModelTypes.RLAB_QWQ_32B]:
82
+ response = "\n".join(response.split("\n")[-3:])
83
+ similarity_check.check_similarity(response, "Hello")
84
+
85
+
86
+ @pytest.mark.assistant
87
+ @pytest.mark.lite_llm
88
+ @pytest.mark.regression
89
+ def test_assistant_with_invalid_lite_llm(
90
+ invalid_lite_llm_integration,
91
+ assistant,
92
+ assistant_utils,
93
+ ):
94
+ assistant = assistant(project_name=test_project_name)
95
+ response = assistant_utils.ask_assistant(assistant, "Just say one word: 'Hello'")
96
+ assert_that(response.startswith("AI Agent run failed with error: Error code: 401"))
@@ -1,9 +1,9 @@
1
- import os
2
1
  import pytest
3
2
  from hamcrest import assert_that, has_item
4
3
  from codemie_test_harness.tests.enums.model_types import ModelTypes
5
4
  from codemie_test_harness.tests.test_data.llm_test_data import MODEL_RESPONSES
6
5
  from codemie_test_harness.tests.utils.client_factory import get_client
6
+ from codemie_test_harness.tests.utils.env_resolver import get_environment
7
7
  from codemie_test_harness.tests.utils.pytest_utils import check_mark
8
8
 
9
9
 
@@ -11,7 +11,7 @@ def pytest_generate_tests(metafunc):
11
11
  if "model_type" in metafunc.fixturenames:
12
12
  is_smoke = check_mark(metafunc, "smoke")
13
13
  test_data = []
14
- env = os.getenv("ENV")
14
+ env = get_environment()
15
15
  if is_smoke:
16
16
  available_models = get_client().llms.list()
17
17
  for model in available_models:
@@ -23,7 +23,7 @@ def pytest_generate_tests(metafunc):
23
23
  model_data.model_type,
24
24
  marks=pytest.mark.skipif(
25
25
  env not in model_data.environments,
26
- reason=f"Skip on non {'/'.join(model_data.environments[:-1])} envs",
26
+ reason=f"Skip on non {'/'.join(str(env) for env in model_data.environments[:-1])} envs",
27
27
  ),
28
28
  )
29
29
  )
@@ -36,10 +36,10 @@ def pytest_generate_tests(metafunc):
36
36
  @pytest.mark.regression
37
37
  @pytest.mark.smoke
38
38
  def test_assistant_with_different_models(
39
- client, assistant_utils, model_type, similarity_check
39
+ llm_utils, assistant_utils, model_type, similarity_check
40
40
  ):
41
41
  assert_that(
42
- [row.base_name for row in client.llms.list()],
42
+ [row.base_name for row in llm_utils.list_llm_models()],
43
43
  has_item(model_type),
44
44
  f"{model_type} is missing in backend response",
45
45
  )
@@ -57,10 +57,10 @@ def test_assistant_with_different_models(
57
57
  @pytest.mark.regression
58
58
  @pytest.mark.smoke
59
59
  def test_assistant_with_different_models_with_top_p_parameter(
60
- client, assistant_utils, model_type, similarity_check
60
+ llm_utils, assistant_utils, model_type, similarity_check
61
61
  ):
62
62
  assert_that(
63
- [row.base_name for row in client.llms.list()],
63
+ [row.base_name for row in llm_utils.list_llm_models()],
64
64
  has_item(model_type),
65
65
  f"{model_type} is missing in backend response",
66
66
  )
@@ -78,10 +78,10 @@ def test_assistant_with_different_models_with_top_p_parameter(
78
78
  @pytest.mark.regression
79
79
  @pytest.mark.smoke
80
80
  def test_assistant_with_different_models_with_temperature_parameter(
81
- client, assistant_utils, model_type, similarity_check
81
+ llm_utils, assistant_utils, model_type, similarity_check
82
82
  ):
83
83
  assert_that(
84
- [row.base_name for row in client.llms.list()],
84
+ [row.base_name for row in llm_utils.list_llm_models()],
85
85
  has_item(model_type),
86
86
  f"{model_type} is missing in backend response",
87
87
  )
@@ -1,4 +1,3 @@
1
- import os
2
1
  import uuid
3
2
  from datetime import datetime
4
3
 
@@ -34,6 +33,7 @@ from hamcrest import (
34
33
 
35
34
  from codemie_test_harness.tests import PROJECT
36
35
  from codemie_test_harness.tests.utils.base_utils import get_random_name
36
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
37
37
 
38
38
 
39
39
  def validate_assistant_full_response(assistant):
@@ -498,7 +498,7 @@ def test_get_prebuilt_assistants(assistant_utils):
498
498
 
499
499
 
500
500
  @pytest.mark.skipif(
501
- os.getenv("ENV") != "preview", reason="valid_assistant_id is for preview env"
501
+ not EnvironmentResolver.is_preview(), reason="valid_assistant_id is for preview env"
502
502
  )
503
503
  def test_assistant_evaluate(assistant_utils):
504
504
  valid_assistant_id = "05959338-06de-477d-9cc3-08369f858057"
@@ -1,10 +1,13 @@
1
- import os
2
-
3
1
  import pytest
4
2
 
5
3
  from codemie_test_harness.tests.enums.tools import Toolkit, CloudTool
6
4
  from codemie_sdk.models.integration import CredentialTypes
7
5
  from codemie_test_harness.tests.utils.aws_parameters_store import CredentialsUtil
6
+ from codemie_test_harness.tests.utils.env_resolver import (
7
+ EnvironmentResolver,
8
+ get_environment,
9
+ )
10
+ from codemie_test_harness.tests.enums.environment import Environment
8
11
 
9
12
  cloud_test_data = [
10
13
  pytest.param(
@@ -17,15 +20,30 @@ cloud_test_data = [
17
20
  Query example: {'query': {'service': 's3', 'method_name': 'list_buckets', 'method_arguments': {}}}
18
21
  """,
19
22
  """
20
- Okay, here are the names of your S3 buckets:
21
-
22
- * codemie-bucket
23
- * codemie-terraform-states-025066278959
24
- * epam-cloud-s3-access-logs-025066278959-eu-central-1
25
- * epam-cloud-s3-access-logs-025066278959-us-east-1
26
- * terraform-states-025066278959
23
+ Here are the names of the S3 buckets:
24
+
25
+ - az-codemie-terraform-states-025066278959
26
+ - codemie-bucket
27
+ - codemie-terraform-states-025066278959
28
+ - codemie-terraform-states-da-025066278959
29
+ - codemie-terraform-states-dar-025066278959
30
+ - codemie-terraform-states-ihorb-025066278959
31
+ - codemie-terraform-states-sh-test-025066278959
32
+ - da-codemie-user-data-025066278959
33
+ - epam-cloud-s3-access-logs-025066278959-eu-central-1
34
+ - epam-cloud-s3-access-logs-025066278959-eu-north-1
35
+ - epam-cloud-s3-access-logs-025066278959-eu-west-2
36
+ - epam-cloud-s3-access-logs-025066278959-us-east-1
37
+ - ggg-codemie-terraform-states-025066278959
38
+ - gggcodemie-user-data-025066278959
39
+ - pc-codemie-terraform-states-025066278959
40
+ - pc-codemie-user-data-025066278959
41
+ - sk-codemie-terraform-states-025066278959
42
+ - sk-codemie-user-data-025066278959
43
+ - terraform-states-025066278959
27
44
  """,
28
45
  marks=pytest.mark.aws,
46
+ id=CredentialTypes.AWS,
29
47
  ),
30
48
  pytest.param(
31
49
  Toolkit.CLOUD,
@@ -45,6 +63,7 @@ cloud_test_data = [
45
63
  Provisioning State: Succeeded
46
64
  """,
47
65
  marks=pytest.mark.azure,
66
+ id=CredentialTypes.AZURE,
48
67
  ),
49
68
  pytest.param(
50
69
  Toolkit.CLOUD,
@@ -106,10 +125,11 @@ cloud_test_data = [
106
125
  marks=[
107
126
  pytest.mark.gcp,
108
127
  pytest.mark.skipif(
109
- os.getenv("ENV") in ["azure", "gcp"],
128
+ get_environment() in [Environment.AZURE, Environment.GCP],
110
129
  reason="Still have an issue with encoding long strings",
111
130
  ),
112
131
  ],
132
+ id=CredentialTypes.GCP,
113
133
  ),
114
134
  pytest.param(
115
135
  Toolkit.CLOUD,
@@ -134,9 +154,10 @@ cloud_test_data = [
134
154
  marks=[
135
155
  pytest.mark.kubernetes,
136
156
  pytest.mark.skipif(
137
- os.getenv("ENV") == "azure",
157
+ EnvironmentResolver.is_azure(),
138
158
  reason="Still have an issue with encoding long strings",
139
159
  ),
140
160
  ],
161
+ id=CredentialTypes.KUBERNETES,
141
162
  ),
142
163
  ]
@@ -57,6 +57,7 @@ sonar_tools_test_data = [
57
57
  """,
58
58
  "Yes, I have an access to SonarQube",
59
59
  marks=pytest.mark.sonar,
60
+ id=CodeBaseTool.SONAR,
60
61
  ),
61
62
  pytest.param(
62
63
  Toolkit.CODEBASE_TOOLS,
@@ -71,5 +72,6 @@ sonar_tools_test_data = [
71
72
  """,
72
73
  "Yes, I have an access to SonarCloud",
73
74
  marks=pytest.mark.sonar,
75
+ id=CodeBaseTool.SONAR_CLOUD,
74
76
  ),
75
77
  ]
@@ -1,8 +1,7 @@
1
- import os
2
-
3
1
  import pytest
4
2
 
5
3
  from codemie_test_harness.tests.enums.integrations import DataBaseDialect
4
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
6
5
 
7
6
  ELASTIC_TOOL_TASK = """
8
7
  send the query to Elastic:
@@ -59,8 +58,9 @@ sql_tools_test_data = [
59
58
  pytest.param(
60
59
  DataBaseDialect.MS_SQL,
61
60
  marks=pytest.mark.skipif(
62
- os.getenv("ENV") not in ("aws", "azure", "gcp"),
61
+ not EnvironmentResolver.is_sandbox(),
63
62
  reason="MS SQL is only available in staging environments",
64
63
  ),
64
+ id=DataBaseDialect.MS_SQL,
65
65
  ),
66
66
  ]
@@ -1,10 +1,9 @@
1
- import os
2
-
3
1
  import pytest
4
2
 
5
3
  from codemie_test_harness.tests.enums.tools import Toolkit, CloudTool
6
4
  from codemie_sdk.models.integration import CredentialTypes
7
5
  from codemie_test_harness.tests.utils.aws_parameters_store import CredentialsUtil
6
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
8
7
 
9
8
  cloud_test_data = [
10
9
  pytest.param(
@@ -100,6 +99,7 @@ cloud_test_data = [
100
99
  }
101
100
  """,
102
101
  marks=pytest.mark.aws,
102
+ id=CredentialTypes.AWS,
103
103
  ),
104
104
  pytest.param(
105
105
  Toolkit.CLOUD,
@@ -125,6 +125,7 @@ cloud_test_data = [
125
125
  }
126
126
  """,
127
127
  marks=pytest.mark.azure,
128
+ id=CredentialTypes.AZURE,
128
129
  ),
129
130
  pytest.param(
130
131
  Toolkit.CLOUD,
@@ -170,10 +171,11 @@ cloud_test_data = [
170
171
  marks=[
171
172
  pytest.mark.gcp,
172
173
  pytest.mark.skipif(
173
- os.getenv("ENV") == "azure",
174
+ EnvironmentResolver.is_azure(),
174
175
  reason="Still have an issue with encoding long strings",
175
176
  ),
176
177
  ],
178
+ id=CredentialTypes.GCP,
177
179
  ),
178
180
  pytest.param(
179
181
  Toolkit.CLOUD,
@@ -844,9 +846,10 @@ cloud_test_data = [
844
846
  marks=[
845
847
  pytest.mark.kubernetes,
846
848
  pytest.mark.skipif(
847
- os.getenv("ENV") == "azure",
849
+ EnvironmentResolver.is_azure(),
848
850
  reason="Still have an issue with encoding long strings",
849
851
  ),
850
852
  ],
853
+ id=CredentialTypes.KUBERNETES,
851
854
  ),
852
855
  ]
@@ -77,6 +77,7 @@ sonar_tools_test_data = [
77
77
  }
78
78
  """,
79
79
  marks=pytest.mark.sonar,
80
+ id=CodeBaseTool.SONAR,
80
81
  ),
81
82
  pytest.param(
82
83
  Toolkit.CODEBASE_TOOLS,
@@ -160,5 +161,6 @@ sonar_tools_test_data = [
160
161
  }
161
162
  """,
162
163
  marks=pytest.mark.sonar,
164
+ id=CodeBaseTool.SONAR_CLOUD,
163
165
  ),
164
166
  ]
@@ -1,9 +1,8 @@
1
- import os
2
-
3
1
  import pytest
4
2
 
5
- from codemie_test_harness.tests.enums.integrations import DataBaseDialect
6
3
  from codemie_test_harness.tests.enums.tools import DataManagementTool, Toolkit
4
+ from codemie_test_harness.tests.enums.integrations import DataBaseDialect
5
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
7
6
 
8
7
  sql_tools_test_data = [
9
8
  (
@@ -40,9 +39,9 @@ sql_tools_test_data = [
40
39
  },
41
40
  [{"table_name": "Users"}, {"table_name": "Products"}],
42
41
  marks=pytest.mark.skipif(
43
- os.getenv("ENV") not in ("aws", "azure", "gcp"),
42
+ not EnvironmentResolver.is_sandbox(),
44
43
  reason="MS SQL is only available in staging environments",
45
44
  ),
46
- id="DataBaseDialect.MS_SQL",
45
+ id=DataBaseDialect.MS_SQL,
47
46
  ),
48
47
  ]
@@ -1,4 +1,4 @@
1
- import os
1
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
2
2
 
3
3
  import pytest
4
4
 
@@ -41,7 +41,7 @@ file_management_tools_test_data = [
41
41
  codemie-ui
42
42
  """,
43
43
  marks=pytest.mark.skipif(
44
- os.getenv("ENV") == "local",
44
+ EnvironmentResolver.is_localhost(),
45
45
  reason="Skipping this test on local environment",
46
46
  ),
47
47
  ),
@@ -1,4 +1,5 @@
1
- import os
1
+ from codemie_test_harness.tests.enums.environment import Environment
2
+ from codemie_test_harness.tests.utils.env_resolver import get_environment
2
3
 
3
4
  import pytest
4
5
 
@@ -21,10 +22,11 @@ notification_tools_test_data = [
21
22
  marks=[
22
23
  pytest.mark.email,
23
24
  pytest.mark.skipif(
24
- os.getenv("ENV") in ["local", "gcp"],
25
+ get_environment() in [Environment.LOCALHOST, Environment.GCP],
25
26
  reason="Skipping this test on local environment",
26
27
  ),
27
28
  ],
29
+ id=NotificationTool.EMAIL,
28
30
  ),
29
31
  pytest.param(
30
32
  NotificationTool.TELEGRAM,
@@ -54,5 +56,6 @@ notification_tools_test_data = [
54
56
  },
55
57
  },
56
58
  marks=pytest.mark.telegram,
59
+ id=NotificationTool.TELEGRAM,
57
60
  ),
58
61
  ]
@@ -158,6 +158,7 @@ project_management_tools_data = [
158
158
  _17713":null,"customfield_27500":null}}
159
159
  """,
160
160
  marks=pytest.mark.jira,
161
+ id=ProjectManagementIntegrationType.JIRA,
161
162
  ),
162
163
  pytest.param(
163
164
  Toolkit.PROJECT_MANAGEMENT,
@@ -188,5 +189,6 @@ project_management_tools_data = [
188
189
  expand=body.storage&cql=title+~+%22AQA+backlog+estimation%22","base":"https://kb.epam.com","context":""}}
189
190
  """,
190
191
  marks=pytest.mark.confluence,
192
+ id=ProjectManagementIntegrationType.CONFLUENCE,
191
193
  ),
192
194
  ]
@@ -34,6 +34,7 @@ research_tools_test_data = [
34
34
  marks=pytest.mark.skip(
35
35
  reason="Temporarily skipping Tavily test until it is fixed"
36
36
  ),
37
+ id=ResearchToolName.TAVILY_SEARCH,
37
38
  ),
38
39
  (
39
40
  ResearchToolName.GOOGLE_PLACES,
@@ -1,4 +1,5 @@
1
1
  import pytest
2
+ from codemie_sdk.models.integration import CredentialTypes
2
3
 
3
4
  from codemie_test_harness.tests.enums.tools import VcsTool, Toolkit
4
5
 
@@ -91,6 +92,7 @@ vcs_tools_test_data = [
91
92
  }
92
93
  """,
93
94
  marks=pytest.mark.github,
95
+ id=f"{CredentialTypes.GIT}_github",
94
96
  ),
95
97
  pytest.param(
96
98
  Toolkit.VCS,
@@ -135,5 +137,6 @@ vcs_tools_test_data = [
135
137
 
136
138
  """,
137
139
  marks=pytest.mark.gitlab,
140
+ id=f"{CredentialTypes.GIT}_gitlab",
138
141
  ),
139
142
  ]