codemie-test-harness 0.1.158__py3-none-any.whl → 0.1.160__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of codemie-test-harness might be problematic. Click here for more details.

Files changed (69) hide show
  1. codemie_test_harness/tests/assistant/datasource/test_confluence_datasource.py +2 -1
  2. codemie_test_harness/tests/assistant/datasource/test_jira_datasource.py +2 -1
  3. codemie_test_harness/tests/assistant/tools/cloud/test_cloud_tools.py +0 -7
  4. codemie_test_harness/tests/assistant/tools/codebase/test_codebase_tools.py +0 -1
  5. codemie_test_harness/tests/assistant/tools/datamanagement/test_assistant_with_data_management_tools.py +4 -5
  6. codemie_test_harness/tests/assistant/tools/filemanagement/test_assistant_with_file_management_tools.py +2 -9
  7. codemie_test_harness/tests/assistant/tools/mcp/test_cli_mcp_server.py +5 -7
  8. codemie_test_harness/tests/assistant/tools/mcp/test_mcp_servers.py +5 -7
  9. codemie_test_harness/tests/assistant/tools/notification/test_assistant_notification_tools.py +3 -3
  10. codemie_test_harness/tests/assistant/tools/openapi/test_assistant_with_open_api_tools.py +3 -2
  11. codemie_test_harness/tests/assistant/tools/report_portal/__init__.py +0 -0
  12. codemie_test_harness/tests/assistant/tools/report_portal/test_assistant_report_portal_tools.py +32 -0
  13. codemie_test_harness/tests/conftest.py +17 -2
  14. codemie_test_harness/tests/enums/environment.py +102 -0
  15. codemie_test_harness/tests/enums/model_types.py +1 -0
  16. codemie_test_harness/tests/enums/tools.py +14 -0
  17. codemie_test_harness/tests/integrations/project/test_default_integrations.py +47 -12
  18. codemie_test_harness/tests/integrations/project/test_project_integrations.py +0 -125
  19. codemie_test_harness/tests/integrations/user/test_default_integrations.py +47 -11
  20. codemie_test_harness/tests/integrations/user/test_user_integrations.py +0 -125
  21. codemie_test_harness/tests/llm/assistants/test_llm.py +3 -3
  22. codemie_test_harness/tests/service/test_assistant_service.py +2 -2
  23. codemie_test_harness/tests/test_data/cloud_tools_test_data.py +32 -11
  24. codemie_test_harness/tests/test_data/codebase_tools_test_data.py +2 -0
  25. codemie_test_harness/tests/test_data/data_management_tools_test_data.py +3 -3
  26. codemie_test_harness/tests/test_data/direct_tools/cloud_tools_test_data.py +7 -4
  27. codemie_test_harness/tests/test_data/direct_tools/codebase_tools_test_data.py +2 -0
  28. codemie_test_harness/tests/test_data/direct_tools/data_management_tools_test_data.py +4 -5
  29. codemie_test_harness/tests/test_data/direct_tools/file_management_tools_test_data.py +2 -2
  30. codemie_test_harness/tests/test_data/direct_tools/notification_tools_test_data.py +5 -2
  31. codemie_test_harness/tests/test_data/direct_tools/project_management_tools_test_data.py +2 -0
  32. codemie_test_harness/tests/test_data/direct_tools/report_portal_tools_test_data.py +1235 -0
  33. codemie_test_harness/tests/test_data/direct_tools/research_tools_test_data.py +1 -0
  34. codemie_test_harness/tests/test_data/direct_tools/vcs_tools_test_data.py +3 -0
  35. codemie_test_harness/tests/test_data/file_management_tools_test_data.py +9 -5
  36. codemie_test_harness/tests/test_data/index_test_data.py +9 -11
  37. codemie_test_harness/tests/test_data/integrations_test_data.py +71 -9
  38. codemie_test_harness/tests/test_data/llm_test_data.py +8 -6
  39. codemie_test_harness/tests/test_data/project_management_test_data.py +4 -0
  40. codemie_test_harness/tests/test_data/report_portal_tools_test_data.py +520 -0
  41. codemie_test_harness/tests/test_data/vcs_tools_test_data.py +11 -2
  42. codemie_test_harness/tests/utils/aws_parameters_store.py +33 -2
  43. codemie_test_harness/tests/utils/constants.py +1 -1
  44. codemie_test_harness/tests/utils/env_resolver.py +119 -0
  45. codemie_test_harness/tests/workflow/assistant_tools/cloud/test_workflow_with_assistant_cloud_tools.py +0 -7
  46. codemie_test_harness/tests/workflow/assistant_tools/codebase/test_worfklow_with_assistant_codebase_tools.py +0 -1
  47. codemie_test_harness/tests/workflow/assistant_tools/data_management/test_workflow_with_assistant_with_data_management_tools.py +3 -5
  48. codemie_test_harness/tests/workflow/assistant_tools/file_management/test_workflow_with_assistant_with_file_management_tools.py +2 -9
  49. codemie_test_harness/tests/workflow/assistant_tools/mcp/test_workflow_with_assistant_with_mcp_server.py +5 -10
  50. codemie_test_harness/tests/workflow/assistant_tools/notification/test_workflow_with_assistant_notification_tools.py +3 -2
  51. codemie_test_harness/tests/workflow/assistant_tools/open_api/test_workflow_with_assistant_with_open_api_tools.py +3 -2
  52. codemie_test_harness/tests/workflow/assistant_tools/report_portal/__init__.py +0 -0
  53. codemie_test_harness/tests/workflow/assistant_tools/report_portal/test_workflow_with_assistant_with_report_portal_tools.py +38 -0
  54. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_data_management_tools_sql.py +3 -2
  55. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_open_api_tools.py +3 -2
  56. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_report_portal_tools.py +115 -0
  57. codemie_test_harness/tests/workflow/virtual_assistant_tools/cloud/test_workflow_with_cloud_tools.py +0 -7
  58. codemie_test_harness/tests/workflow/virtual_assistant_tools/codebase/test_workflow_with_codebase_tools.py +0 -1
  59. codemie_test_harness/tests/workflow/virtual_assistant_tools/data_management/test_workflow_with_data_management_tools.py +3 -5
  60. codemie_test_harness/tests/workflow/virtual_assistant_tools/file_management/test_workflow_with_file_management_tools.py +2 -9
  61. codemie_test_harness/tests/workflow/virtual_assistant_tools/mcp/test_workflow_with_mcp_server.py +5 -11
  62. codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/test_workflow_with_notification_tools.py +3 -3
  63. codemie_test_harness/tests/workflow/virtual_assistant_tools/open_api/test_workflow_with_open_api_tools.py +3 -3
  64. codemie_test_harness/tests/workflow/virtual_assistant_tools/report_portal/__init__.py +0 -0
  65. codemie_test_harness/tests/workflow/virtual_assistant_tools/report_portal/test_workflow_with_report_portal_tool.py +39 -0
  66. {codemie_test_harness-0.1.158.dist-info → codemie_test_harness-0.1.160.dist-info}/METADATA +2 -2
  67. {codemie_test_harness-0.1.158.dist-info → codemie_test_harness-0.1.160.dist-info}/RECORD +69 -58
  68. {codemie_test_harness-0.1.158.dist-info → codemie_test_harness-0.1.160.dist-info}/WHEEL +0 -0
  69. {codemie_test_harness-0.1.158.dist-info → codemie_test_harness-0.1.160.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,119 @@
1
+ """
2
+ Environment resolver utility that determines environment from CODEMIE_API_DOMAIN.
3
+
4
+ This module provides a robust way to resolve the environment based on the
5
+ API domain, eliminating the need for hardcoded ENV variables and preventing
6
+ configuration drift between domain and environment settings.
7
+ """
8
+
9
+ import os
10
+ import re
11
+
12
+ from codemie_test_harness.tests.enums.environment import Environment
13
+
14
+
15
+ class EnvironmentResolver:
16
+ """
17
+ Resolves environment configuration based on CODEMIE_API_DOMAIN.
18
+
19
+ Supported environments and their domain patterns:
20
+ - prod: Production domains (*.lab.epam.com)
21
+ - preview: Preview domains (*-preview.lab.epam.com)
22
+ - localhost: Local development (localhost, 127.0.0.1)
23
+ - azure: Azure sandbox environments (*-azure.eks-sandbox.*)
24
+ - gcp: GCP sandbox environments (*-gcp.eks-sandbox.*)
25
+ - aws: AWS sandbox environments (*-aws.eks-sandbox.*)
26
+ - unknown: Any unrecognized domain
27
+ """
28
+
29
+ # Regex patterns for environment detection (order matters - more specific first)
30
+ DOMAIN_PATTERNS = {
31
+ # Production environments
32
+ r"^https?://.*codemie\.lab\.epam\.com(/.*)?$": Environment.PRODUCTION,
33
+ # Preview environments
34
+ r"^https?://.*codemie-preview\.lab\.epam\.com(/.*)?$": Environment.PREVIEW,
35
+ # Specific cloud sandbox environments
36
+ r"^https?://.*codemie-azure\.eks-sandbox\.aws\.main\.edp\.projects\.epam\.com(/.*)?$": Environment.AZURE,
37
+ r"^https?://.*codemie-gcp\.eks-sandbox\.aws\.main\.edp\.projects\.epam\.com(/.*)?$": Environment.GCP,
38
+ r"^https?://.*codemie-aws\.eks-sandbox\.aws\.main\.edp\.projects\.epam\.com(/.*)?$": Environment.AWS,
39
+ # Local development patterns
40
+ r"^https?://localhost(:\d+)?(/.*)?$": Environment.LOCALHOST,
41
+ r"^https?://127\.0\.0\.1(:\d+)?(/.*)?$": Environment.LOCALHOST,
42
+ }
43
+
44
+ @classmethod
45
+ def get_environment(cls) -> Environment:
46
+ """
47
+ Get the current environment based on CODEMIE_API_DOMAIN.
48
+
49
+ This is the main method that should be used throughout the codebase
50
+ to replace os.getenv("ENV") calls.
51
+
52
+ Returns:
53
+ Environment: The resolved environment enum value
54
+
55
+ Raises:
56
+ ValueError: If CODEMIE_API_DOMAIN is not set
57
+ """
58
+ codemie_api_domain = os.getenv("CODEMIE_API_DOMAIN")
59
+
60
+ if not codemie_api_domain:
61
+ raise ValueError("CODEMIE_API_DOMAIN environment variable is not set")
62
+
63
+ # Clean up the domain (remove trailing slashes)
64
+ domain = codemie_api_domain.rstrip("/")
65
+
66
+ # Try pattern matching (order matters - more specific patterns first)
67
+ for pattern, environment in cls.DOMAIN_PATTERNS.items():
68
+ if re.match(pattern, domain, re.IGNORECASE):
69
+ return environment
70
+
71
+ # If no match found, return 'unknown'
72
+ return Environment.UNKNOWN
73
+
74
+ @classmethod
75
+ def is_production(cls) -> bool:
76
+ """Check if current environment is production."""
77
+ return cls.get_environment().is_production
78
+
79
+ @classmethod
80
+ def is_preview(cls) -> bool:
81
+ """Check if current environment is preview."""
82
+ return cls.get_environment().is_preview
83
+
84
+ @classmethod
85
+ def is_localhost(cls) -> bool:
86
+ """Check if current environment is localhost."""
87
+ return cls.get_environment().is_localhost
88
+
89
+ @classmethod
90
+ def is_sandbox(cls) -> bool:
91
+ """Check if current environment is any sandbox environment."""
92
+ return cls.get_environment().is_sandbox
93
+
94
+ @classmethod
95
+ def is_azure(cls) -> bool:
96
+ """Check if current environment is Azure."""
97
+ return cls.get_environment().is_azure
98
+
99
+ @classmethod
100
+ def is_gcp(cls) -> bool:
101
+ """Check if current environment is GCP."""
102
+ return cls.get_environment().is_gcp
103
+
104
+ @classmethod
105
+ def is_aws(cls) -> bool:
106
+ """Check if current environment is AWS."""
107
+ return cls.get_environment().is_aws
108
+
109
+
110
+ def get_environment() -> Environment:
111
+ """
112
+ Convenience function to get the current environment.
113
+
114
+ This is the primary function that should replace os.getenv("ENV") throughout the codebase.
115
+
116
+ Returns:
117
+ Environment: The resolved environment enum value
118
+ """
119
+ return EnvironmentResolver.get_environment()
@@ -1,6 +1,5 @@
1
1
  import pytest
2
2
 
3
- from codemie_test_harness.tests.enums.tools import Toolkit, CloudTool
4
3
  from codemie_test_harness.tests.test_data.cloud_tools_test_data import cloud_test_data
5
4
 
6
5
 
@@ -11,12 +10,6 @@ from codemie_test_harness.tests.test_data.cloud_tools_test_data import cloud_tes
11
10
  @pytest.mark.parametrize(
12
11
  "toolkit,tool_name,credential_type,credentials,prompt,expected_response",
13
12
  cloud_test_data,
14
- ids=[
15
- f"{Toolkit.CLOUD}_{CloudTool.AWS}",
16
- f"{Toolkit.CLOUD}_{CloudTool.AZURE}",
17
- f"{Toolkit.CLOUD}_{CloudTool.GCP}",
18
- f"{Toolkit.CLOUD}_{CloudTool.KUBERNETES}",
19
- ],
20
13
  )
21
14
  def test_workflow_with_cloud_tools(
22
15
  assistant,
@@ -44,7 +44,6 @@ def test_workflow_with_codebase_tools(
44
44
  @pytest.mark.parametrize(
45
45
  "toolkit, tool_name, credentials, prompt,expected",
46
46
  sonar_tools_test_data,
47
- ids=[f"{row[0]}_{row[1]}" for row in sonar_tools_test_data],
48
47
  )
49
48
  def test_workflow_with_sonar_tools(
50
49
  assistant,
@@ -1,9 +1,6 @@
1
- import os
2
-
3
1
  import pytest
4
2
  from codemie_sdk.models.integration import CredentialTypes
5
3
 
6
- from codemie_test_harness.tests.enums.integrations import DataBaseDialect
7
4
  from codemie_test_harness.tests.enums.tools import DataManagementTool, Toolkit
8
5
  from codemie_test_harness.tests.test_data.data_management_tools_test_data import (
9
6
  ELASTIC_TOOL_TASK,
@@ -16,9 +13,11 @@ from codemie_test_harness.tests.test_data.data_management_tools_test_data import
16
13
  RESPONSE_FOR_SQL,
17
14
  )
18
15
  from codemie_test_harness.tests.utils.aws_parameters_store import CredentialsUtil
16
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
19
17
 
20
18
  pytestmark = pytest.mark.skipif(
21
- os.getenv("ENV") == "local", reason="Skipping this tests on local environment"
19
+ EnvironmentResolver.is_localhost(),
20
+ reason="Skipping this tests on local environment",
22
21
  )
23
22
 
24
23
 
@@ -56,7 +55,6 @@ def test_workflow_with_assistant_with_elastic_tools(
56
55
  @pytest.mark.parametrize(
57
56
  "db_dialect",
58
57
  sql_tools_test_data,
59
- ids=[DataBaseDialect.MY_SQL, DataBaseDialect.POSTGRES, DataBaseDialect.MS_SQL],
60
58
  )
61
59
  def test_workflow_with_assistant_with_sql_tools(
62
60
  assistant,
@@ -1,5 +1,3 @@
1
- import os
2
-
3
1
  import pytest
4
2
  from hamcrest import assert_that, contains_string, is_not, all_of
5
3
 
@@ -18,6 +16,7 @@ from codemie_test_harness.tests.test_data.file_management_tools_test_data import
18
16
  RESPONSE_FOR_FILE_EDITOR,
19
17
  )
20
18
  from codemie_test_harness.tests.utils.base_utils import get_random_name
19
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
21
20
 
22
21
 
23
22
  @pytest.mark.workflow
@@ -27,12 +26,6 @@ from codemie_test_harness.tests.utils.base_utils import get_random_name
27
26
  @pytest.mark.parametrize(
28
27
  "tool_name, prompt, expected_response",
29
28
  file_management_tools_test_data,
30
- ids=[
31
- FileManagementTool.PYTHON_CODE_INTERPRETER,
32
- FileManagementTool.LIST_DIRECTORY,
33
- FileManagementTool.WRITE_FILE,
34
- FileManagementTool.RUN_COMMAND_LINE,
35
- ],
36
29
  )
37
30
  def test_workflow_with_assistant_with_file_management_tools(
38
31
  assistant,
@@ -58,7 +51,7 @@ def test_workflow_with_assistant_with_file_management_tools(
58
51
  @pytest.mark.file_management
59
52
  @pytest.mark.regression
60
53
  @pytest.mark.skipif(
61
- os.getenv("ENV") == "local", reason="Skipping this test on local environment"
54
+ EnvironmentResolver.is_localhost(), reason="Skipping this test on local environment"
62
55
  )
63
56
  def test_workflow_with_assistant_with_generate_image_tool(
64
57
  assistant,
@@ -1,4 +1,4 @@
1
- import os
1
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
2
2
 
3
3
  import pytest
4
4
 
@@ -15,14 +15,15 @@ from codemie_test_harness.tests.test_data.mcp_server_test_data import (
15
15
  CLI_MCP_SERVER,
16
16
  )
17
17
 
18
+ pytestmark = pytest.mark.skipif(
19
+ EnvironmentResolver.is_localhost(), reason="Skipping this test on local environment"
20
+ )
21
+
18
22
 
19
23
  @pytest.mark.workflow
20
24
  @pytest.mark.workflow_with_assistant
21
25
  @pytest.mark.mcp
22
26
  @pytest.mark.regression
23
- @pytest.mark.skipif(
24
- os.getenv("ENV") == "local", reason="Skipping this test on local environment"
25
- )
26
27
  def test_workflow_with_assistant_with_time_mcp_server(
27
28
  assistant,
28
29
  workflow_with_assistant,
@@ -49,9 +50,6 @@ def test_workflow_with_assistant_with_time_mcp_server(
49
50
  cli_mcp_server_test_data,
50
51
  ids=[f"{row[0]}" for row in cli_mcp_server_test_data],
51
52
  )
52
- @pytest.mark.skipif(
53
- os.getenv("ENV") == "local", reason="Skipping this test on local environment"
54
- )
55
53
  def test_workflow_with_assistant_with_cli_mcp_server(
56
54
  assistant,
57
55
  workflow_with_assistant,
@@ -77,9 +75,6 @@ def test_workflow_with_assistant_with_cli_mcp_server(
77
75
  @pytest.mark.workflow_with_assistant
78
76
  @pytest.mark.mcp
79
77
  @pytest.mark.regression
80
- @pytest.mark.skipif(
81
- os.getenv("ENV") == "local", reason="Skipping this test on local environment"
82
- )
83
78
  def test_workflow_with_assistant_with_fetch_mcp_server(
84
79
  assistant,
85
80
  workflow_with_assistant,
@@ -1,4 +1,5 @@
1
- import os
1
+ from codemie_test_harness.tests.enums.environment import Environment
2
+ from codemie_test_harness.tests.utils.env_resolver import get_environment
2
3
 
3
4
  import pytest
4
5
  from hamcrest import assert_that, equal_to
@@ -20,7 +21,7 @@ from codemie_test_harness.tests.test_data.notification_tools_test_data import (
20
21
  @pytest.mark.email
21
22
  @pytest.mark.regression
22
23
  @pytest.mark.skipif(
23
- os.getenv("ENV") in ["local", "gcp"],
24
+ get_environment() in [Environment.LOCALHOST, Environment.GCP],
24
25
  reason="Skipping this test on local environment",
25
26
  )
26
27
  def test_workflow_with_email_tool(
@@ -1,4 +1,4 @@
1
- import os
1
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
2
2
 
3
3
  import pytest
4
4
 
@@ -18,7 +18,8 @@ from codemie_test_harness.tests.test_data.open_api_tools_test_data import (
18
18
  ids=[f"{row[0]}" for row in open_api_tools_test_data],
19
19
  )
20
20
  @pytest.mark.skipif(
21
- os.getenv("ENV") == "azure", reason="Still have an issue with encoding long strings"
21
+ EnvironmentResolver.is_azure(),
22
+ reason="Still have an issue with encoding long strings",
22
23
  )
23
24
  def test_workflow_with_assistant_with_open_api_tools(
24
25
  assistant,
@@ -0,0 +1,38 @@
1
+ import pytest
2
+
3
+ from codemie_test_harness.tests.test_data.report_portal_tools_test_data import (
4
+ rp_test_data,
5
+ )
6
+
7
+
8
+ @pytest.mark.workflow
9
+ @pytest.mark.workflow_with_assistant
10
+ @pytest.mark.report_portal
11
+ @pytest.mark.regression
12
+ @pytest.mark.parametrize(
13
+ "toolkit,tool_name,prompt,expected_response",
14
+ rp_test_data,
15
+ ids=[f"{row[0]}_{row[1]}" for row in rp_test_data],
16
+ )
17
+ def test_workflow_with_assistant_with_report_portal_tools(
18
+ assistant,
19
+ workflow_with_assistant,
20
+ workflow_utils,
21
+ report_portal_integration,
22
+ similarity_check,
23
+ toolkit,
24
+ tool_name,
25
+ prompt,
26
+ expected_response,
27
+ ):
28
+ assistant = assistant(
29
+ toolkit,
30
+ tool_name,
31
+ settings=report_portal_integration,
32
+ )
33
+
34
+ workflow_with_assistant = workflow_with_assistant(assistant, prompt)
35
+ response = workflow_utils.execute_workflow(
36
+ workflow_with_assistant.id, assistant.name
37
+ )
38
+ similarity_check.check_similarity(response, expected_response)
@@ -1,6 +1,5 @@
1
1
  import copy
2
2
  import json
3
- import os
4
3
  import random
5
4
 
6
5
  import pytest
@@ -11,10 +10,12 @@ from codemie_test_harness.tests.test_data.direct_tools.data_management_tools_tes
11
10
  sql_tools_test_data,
12
11
  )
13
12
  from codemie_test_harness.tests.utils.aws_parameters_store import CredentialsUtil
13
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
14
14
  from codemie_test_harness.tests.utils.base_utils import get_random_name
15
15
 
16
16
  pytestmark = pytest.mark.skipif(
17
- os.getenv("ENV") == "local", reason="Skipping this tests on local environment"
17
+ EnvironmentResolver.is_localhost(),
18
+ reason="Skipping this tests on local environment",
18
19
  )
19
20
 
20
21
 
@@ -1,6 +1,5 @@
1
1
  import copy
2
2
  import json
3
- import os
4
3
  import random
5
4
 
6
5
  import pytest
@@ -9,9 +8,11 @@ from codemie_test_harness.tests.test_data.direct_tools.open_api_tools_test_data
9
8
  open_api_tools_test_data,
10
9
  )
11
10
  from codemie_test_harness.tests.utils.base_utils import get_random_name
11
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
12
12
 
13
13
  pytestmark = pytest.mark.skipif(
14
- os.getenv("ENV") == "azure", reason="Still have an issue with encoding long strings"
14
+ EnvironmentResolver.is_azure(),
15
+ reason="Still have an issue with encoding long strings",
15
16
  )
16
17
 
17
18
 
@@ -0,0 +1,115 @@
1
+ import copy
2
+ import json
3
+ import random
4
+
5
+ import pytest
6
+
7
+ from codemie_test_harness.tests.test_data.direct_tools.report_portal_tools_test_data import (
8
+ report_portal_tools_test_data,
9
+ )
10
+ from codemie_test_harness.tests.utils.base_utils import get_random_name
11
+
12
+
13
+ @pytest.mark.workflow
14
+ @pytest.mark.direct_tool
15
+ @pytest.mark.report_portal
16
+ @pytest.mark.regression
17
+ @pytest.mark.parametrize(
18
+ "toolkit,tool_name,prompt,expected_response",
19
+ report_portal_tools_test_data,
20
+ ids=[f"{row[0]}_{row[1]}" for row in report_portal_tools_test_data],
21
+ )
22
+ def test_workflow_with_report_portal_tool_direct(
23
+ report_portal_integration,
24
+ workflow_utils,
25
+ workflow_with_tool,
26
+ similarity_check,
27
+ toolkit,
28
+ tool_name,
29
+ prompt,
30
+ expected_response,
31
+ ):
32
+ assistant_and_state_name = get_random_name()
33
+
34
+ test_workflow = workflow_with_tool(
35
+ assistant_and_state_name,
36
+ tool_name,
37
+ integration=report_portal_integration,
38
+ )
39
+ response = workflow_utils.execute_workflow(
40
+ test_workflow.id,
41
+ assistant_and_state_name,
42
+ user_input=json.dumps(prompt),
43
+ )
44
+ similarity_check.check_similarity(response, expected_response)
45
+
46
+
47
+ @pytest.mark.workflow
48
+ @pytest.mark.direct_tool
49
+ @pytest.mark.report_portal
50
+ @pytest.mark.regression
51
+ @pytest.mark.parametrize(
52
+ "toolkit,tool_name,prompt,expected_response",
53
+ report_portal_tools_test_data,
54
+ ids=[f"{row[0]}_{row[1]}" for row in report_portal_tools_test_data],
55
+ )
56
+ def test_workflow_with_report_portal_tool_with_hardcoded_args(
57
+ report_portal_integration,
58
+ workflow_utils,
59
+ workflow_with_tool,
60
+ similarity_check,
61
+ toolkit,
62
+ tool_name,
63
+ prompt,
64
+ expected_response,
65
+ ):
66
+ assistant_and_state_name = get_random_name()
67
+
68
+ test_workflow = workflow_with_tool(
69
+ assistant_and_state_name,
70
+ tool_name,
71
+ integration=report_portal_integration,
72
+ tool_args=prompt,
73
+ )
74
+ response = workflow_utils.execute_workflow(
75
+ test_workflow.id, assistant_and_state_name
76
+ )
77
+ similarity_check.check_similarity(response, expected_response)
78
+
79
+
80
+ @pytest.mark.workflow
81
+ @pytest.mark.direct_tool
82
+ @pytest.mark.report_portal
83
+ @pytest.mark.regression
84
+ @pytest.mark.parametrize(
85
+ "toolkit,tool_name,prompt,expected_response",
86
+ report_portal_tools_test_data,
87
+ ids=[f"{row[0]}_{row[1]}" for row in report_portal_tools_test_data],
88
+ )
89
+ def test_workflow_with_report_portal_tool_with_overriding_args(
90
+ report_portal_integration,
91
+ workflow_utils,
92
+ workflow_with_tool,
93
+ similarity_check,
94
+ toolkit,
95
+ tool_name,
96
+ prompt,
97
+ expected_response,
98
+ ):
99
+ assistant_and_state_name = get_random_name()
100
+
101
+ args_copy = copy.deepcopy(prompt)
102
+ args_copy = {key: random.randint(1, 10) for key in args_copy}
103
+
104
+ test_workflow = workflow_with_tool(
105
+ assistant_and_state_name,
106
+ tool_name,
107
+ integration=report_portal_integration,
108
+ tool_args=args_copy,
109
+ )
110
+ response = workflow_utils.execute_workflow(
111
+ test_workflow.id,
112
+ assistant_and_state_name,
113
+ user_input=json.dumps(prompt),
114
+ )
115
+ similarity_check.check_similarity(response, expected_response)
@@ -1,6 +1,5 @@
1
1
  import pytest
2
2
 
3
- from codemie_test_harness.tests.enums.tools import Toolkit, CloudTool
4
3
  from codemie_test_harness.tests.test_data.cloud_tools_test_data import cloud_test_data
5
4
  from codemie_test_harness.tests.utils.base_utils import get_random_name
6
5
 
@@ -15,12 +14,6 @@ from codemie_test_harness.tests.utils.base_utils import get_random_name
15
14
  @pytest.mark.parametrize(
16
15
  "toolkit, tool_name, credential_type, credentials, prompt, expected_response",
17
16
  cloud_test_data,
18
- ids=[
19
- f"{Toolkit.CLOUD}_{CloudTool.AWS}",
20
- f"{Toolkit.CLOUD}_{CloudTool.AZURE}",
21
- f"{Toolkit.CLOUD}_{CloudTool.GCP}",
22
- f"{Toolkit.CLOUD}_{CloudTool.KUBERNETES}",
23
- ],
24
17
  )
25
18
  def test_workflow_with_cloud_tools(
26
19
  workflow_with_virtual_assistant,
@@ -70,7 +70,6 @@ def test_workflow_with_codebase_tools(
70
70
  @pytest.mark.parametrize(
71
71
  "toolkit, tool_name, credentials, prompt, expected_response",
72
72
  sonar_tools_test_data,
73
- ids=[f"{row[1]}" for row in sonar_tools_test_data],
74
73
  )
75
74
  def test_workflow_with_sonar_tools(
76
75
  workflow_with_virtual_assistant,
@@ -1,9 +1,6 @@
1
- import os
2
-
3
1
  import pytest
4
2
  from codemie_sdk.models.integration import CredentialTypes
5
3
 
6
- from codemie_test_harness.tests.enums.integrations import DataBaseDialect
7
4
  from codemie_test_harness.tests.enums.tools import DataManagementTool
8
5
  from codemie_test_harness.tests.test_data.data_management_tools_test_data import (
9
6
  ELASTIC_TOOL_TASK,
@@ -16,10 +13,12 @@ from codemie_test_harness.tests.test_data.data_management_tools_test_data import
16
13
  RESPONSE_FOR_SQL,
17
14
  )
18
15
  from codemie_test_harness.tests.utils.aws_parameters_store import CredentialsUtil
16
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
19
17
  from codemie_test_harness.tests.utils.base_utils import get_random_name
20
18
 
21
19
  pytestmark = pytest.mark.skipif(
22
- os.getenv("ENV") == "local", reason="Skipping this tests on local environment"
20
+ EnvironmentResolver.is_localhost(),
21
+ reason="Skipping this tests on local environment",
23
22
  )
24
23
 
25
24
 
@@ -61,7 +60,6 @@ def test_workflow_with_elastic_tools(
61
60
  @pytest.mark.parametrize(
62
61
  "db_dialect",
63
62
  sql_tools_test_data,
64
- ids=[DataBaseDialect.MY_SQL, DataBaseDialect.POSTGRES, DataBaseDialect.MS_SQL],
65
63
  )
66
64
  def test_workflow_with_sql_tools(
67
65
  workflow_with_virtual_assistant,
@@ -1,5 +1,3 @@
1
- import os
2
-
3
1
  import pytest
4
2
  from hamcrest import assert_that, contains_string, is_not, all_of
5
3
 
@@ -18,6 +16,7 @@ from codemie_test_harness.tests.test_data.file_management_tools_test_data import
18
16
  RESPONSE_FOR_FILE_EDITOR,
19
17
  )
20
18
  from codemie_test_harness.tests.utils.base_utils import get_random_name
19
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
21
20
 
22
21
 
23
22
  @pytest.mark.workflow
@@ -28,12 +27,6 @@ from codemie_test_harness.tests.utils.base_utils import get_random_name
28
27
  @pytest.mark.parametrize(
29
28
  "tool_name, prompt, expected_response",
30
29
  file_management_tools_test_data,
31
- ids=[
32
- FileManagementTool.PYTHON_CODE_INTERPRETER,
33
- FileManagementTool.LIST_DIRECTORY,
34
- FileManagementTool.WRITE_FILE,
35
- FileManagementTool.RUN_COMMAND_LINE,
36
- ],
37
30
  )
38
31
  def test_workflow_with_file_management_tools(
39
32
  workflow_with_virtual_assistant,
@@ -65,7 +58,7 @@ def test_workflow_with_file_management_tools(
65
58
  @pytest.mark.regression
66
59
  @pytest.mark.testcase("EPMCDME-6561")
67
60
  @pytest.mark.skipif(
68
- os.getenv("ENV") == "local", reason="Skipping this test on local environment"
61
+ EnvironmentResolver.is_localhost(), reason="Skipping this test on local environment"
69
62
  )
70
63
  def test_workflow_with_generate_image_tool(
71
64
  workflow_with_virtual_assistant, filesystem_integration, workflow_utils
@@ -1,5 +1,3 @@
1
- import os
2
-
3
1
  import pytest
4
2
 
5
3
  from codemie_test_harness.tests.test_data.mcp_server_test_data import (
@@ -15,6 +13,11 @@ from codemie_test_harness.tests.test_data.mcp_server_test_data import (
15
13
  CLI_MCP_SERVER,
16
14
  )
17
15
  from codemie_test_harness.tests.utils.base_utils import get_random_name
16
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
17
+
18
+ pytestmark = pytest.mark.skipif(
19
+ EnvironmentResolver.is_localhost(), reason="Skipping this test on local environment"
20
+ )
18
21
 
19
22
 
20
23
  @pytest.mark.workflow
@@ -22,9 +25,6 @@ from codemie_test_harness.tests.utils.base_utils import get_random_name
22
25
  @pytest.mark.mcp
23
26
  @pytest.mark.regression
24
27
  @pytest.mark.testcase("EPMCDME-6419")
25
- @pytest.mark.skipif(
26
- os.getenv("ENV") == "local", reason="Skipping this test on local environment"
27
- )
28
28
  def test_workflow_with_time_mcp_server(
29
29
  workflow_with_virtual_assistant,
30
30
  workflow_utils,
@@ -51,9 +51,6 @@ def test_workflow_with_time_mcp_server(
51
51
  @pytest.mark.mcp
52
52
  @pytest.mark.regression
53
53
  @pytest.mark.testcase("EPMCDME-6419")
54
- @pytest.mark.skipif(
55
- os.getenv("ENV") == "local", reason="Skipping this test on local environment"
56
- )
57
54
  @pytest.mark.parametrize(
58
55
  "command, expected_answer",
59
56
  cli_mcp_server_test_data,
@@ -86,9 +83,6 @@ def test_workflow_with_cli_mcp_server(
86
83
  @pytest.mark.mcp
87
84
  @pytest.mark.regression
88
85
  @pytest.mark.testcase("EPMCDME-6419")
89
- @pytest.mark.skipif(
90
- os.getenv("ENV") == "local", reason="Skipping this test on local environment"
91
- )
92
86
  def test_workflow_with_fetch_mcp_server(
93
87
  workflow_with_virtual_assistant,
94
88
  workflow_utils,
@@ -1,5 +1,3 @@
1
- import os
2
-
3
1
  import pytest
4
2
  from hamcrest import assert_that, equal_to
5
3
 
@@ -13,6 +11,8 @@ from codemie_test_harness.tests.test_data.notification_tools_test_data import (
13
11
  TELEGRAM_RESPONSE,
14
12
  )
15
13
  from codemie_test_harness.tests.utils.base_utils import get_random_name
14
+ from codemie_test_harness.tests.enums.environment import Environment
15
+ from codemie_test_harness.tests.utils.env_resolver import get_environment
16
16
 
17
17
 
18
18
  @pytest.mark.workflow
@@ -22,7 +22,7 @@ from codemie_test_harness.tests.utils.base_utils import get_random_name
22
22
  @pytest.mark.regression
23
23
  @pytest.mark.tescase("EPMCDME-6652")
24
24
  @pytest.mark.skipif(
25
- os.getenv("ENV") in ["local", "gcp"],
25
+ get_environment() in [Environment.LOCALHOST, Environment.GCP],
26
26
  reason="Skipping this test on local environment",
27
27
  )
28
28
  def test_workflow_with_notification_email_tool(