codemie-test-harness 0.1.184__py3-none-any.whl → 0.1.198__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of codemie-test-harness might be problematic. Click here for more details.
- codemie_test_harness/cli/cli.py +42 -6
- codemie_test_harness/cli/commands/config_cmd.py +1 -1
- codemie_test_harness/cli/commands/run_cmd.py +24 -1
- codemie_test_harness/cli/constants.py +1 -0
- codemie_test_harness/cli/runner.py +17 -3
- codemie_test_harness/cli/utils.py +12 -2
- codemie_test_harness/pytest.ini +3 -0
- codemie_test_harness/tests/assistant/test_assistants.py +17 -1
- codemie_test_harness/tests/assistant/tools/datamanagement/test_assistant_with_data_management_tools.py +47 -6
- codemie_test_harness/tests/assistant/tools/mcp/test_cli_mcp_server.py +0 -4
- codemie_test_harness/tests/assistant/tools/mcp/test_mcp_servers.py +0 -4
- codemie_test_harness/tests/conftest.py +19 -3
- codemie_test_harness/tests/enums/environment.py +3 -3
- codemie_test_harness/tests/enums/integrations.py +1 -0
- codemie_test_harness/tests/enums/model_types.py +1 -0
- codemie_test_harness/tests/integrations/project/test_default_integrations.py +41 -15
- codemie_test_harness/tests/integrations/project/test_project_integrations.py +42 -0
- codemie_test_harness/tests/integrations/user/test_default_integrations.py +41 -15
- codemie_test_harness/tests/llm/assistants/test_llm.py +45 -2
- codemie_test_harness/tests/test_data/assistant_test_data.py +171 -171
- codemie_test_harness/tests/test_data/codebase_tools_test_data.py +2 -0
- codemie_test_harness/tests/test_data/data_management_tools_test_data.py +18 -0
- codemie_test_harness/tests/test_data/direct_tools/data_management_tools_test_data.py +18 -1
- codemie_test_harness/tests/test_data/direct_tools/report_portal_tools_test_data.py +189 -197
- codemie_test_harness/tests/test_data/integrations_test_data.py +163 -2
- codemie_test_harness/tests/test_data/llm_test_data.py +1 -0
- codemie_test_harness/tests/test_data/open_api_tools_test_data.py +22 -1
- codemie_test_harness/tests/test_data/report_portal_tools_test_data.py +89 -112
- codemie_test_harness/tests/test_data/research_tools_test_data.py +29 -7
- codemie_test_harness/tests/utils/assistant_utils.py +22 -12
- codemie_test_harness/tests/utils/credentials_manager.py +66 -8
- codemie_test_harness/tests/utils/workflow_utils.py +91 -0
- codemie_test_harness/tests/workflow/assistant_tools/access_management/test_workflow_with_assistant_with_keycloak_tool.py +7 -0
- codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_test_plan_tools.py +50 -1
- codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_wiki_tools.py +39 -1
- codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_work_item_tools.py +34 -1
- codemie_test_harness/tests/workflow/assistant_tools/cloud/test_workflow_with_assistant_cloud_tools.py +6 -0
- codemie_test_harness/tests/workflow/assistant_tools/codebase/test_worfklow_with_assistant_codebase_tools.py +11 -0
- codemie_test_harness/tests/workflow/assistant_tools/data_management/test_workflow_with_assistant_with_data_management_tools.py +72 -1
- codemie_test_harness/tests/workflow/assistant_tools/default_integrations/test_default_integrations_for_tool.py +31 -0
- codemie_test_harness/tests/workflow/assistant_tools/default_integrations/test_default_integrations_for_tool_kit.py +31 -0
- codemie_test_harness/tests/workflow/assistant_tools/default_integrations/test_default_integrations_for_tool_with_datasource.py +26 -0
- codemie_test_harness/tests/workflow/assistant_tools/file_management/test_workflow_with_assistant_with_file_management_tools.py +63 -1
- codemie_test_harness/tests/workflow/assistant_tools/git/test_workflow_with_assistant_git_tools.py +82 -7
- codemie_test_harness/tests/workflow/assistant_tools/mcp/test_workflow_with_assistant_with_mcp_server.py +23 -4
- codemie_test_harness/tests/workflow/assistant_tools/notification/test_workflow_with_assistant_notification_tools.py +12 -0
- codemie_test_harness/tests/workflow/assistant_tools/open_api/test_workflow_with_assistant_with_open_api_tools.py +6 -0
- codemie_test_harness/tests/workflow/assistant_tools/plugin/test_workflow_with_assistant_with_development_plugin.py +29 -2
- codemie_test_harness/tests/workflow/assistant_tools/plugin/test_workflow_with_assistant_with_plugin_and_mcp_servers.py +14 -1
- codemie_test_harness/tests/workflow/assistant_tools/project_management/test_workflow_with_assistant_pm_tools.py +7 -0
- codemie_test_harness/tests/workflow/assistant_tools/report_portal/test_workflow_with_assistant_with_report_portal_tools.py +7 -0
- codemie_test_harness/tests/workflow/assistant_tools/research/test_workflow_with_assistant_research_tools.py +14 -1
- codemie_test_harness/tests/workflow/assistant_tools/servicenow/test_workflow_with_servicenow_tools.py +6 -0
- codemie_test_harness/tests/workflow/assistant_tools/vcs/workflow_with_assistant_vcs_tools.py +6 -0
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_cloud_tools.py +12 -9
- codemie_test_harness/tests/workflow/virtual_assistant_tools/access_management/test_workflow_with_keycloak_tool.py +8 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/ado/test_workflow_with_ado_test_plan_tools.py +28 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/ado/test_workflow_with_ado_wiki_tools.py +24 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/ado/test_workflow_with_ado_work_item_tools.py +20 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/cloud/test_workflow_with_cloud_tools.py +13 -4
- codemie_test_harness/tests/workflow/virtual_assistant_tools/codebase/test_workflow_with_codebase_tools.py +16 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/data_management/test_workflow_with_data_management_tools.py +73 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/default_integrations/test_default_integrations_for_tool.py +34 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/default_integrations/test_default_integrations_for_tool_kit.py +34 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/default_integrations/test_default_integrations_for_tool_with_datasource.py +34 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/file_management/test_workflow_with_file_management_tools.py +101 -49
- codemie_test_harness/tests/workflow/virtual_assistant_tools/git/test_workflow_with_git_tools.py +42 -3
- codemie_test_harness/tests/workflow/virtual_assistant_tools/mcp/test_workflow_with_mcp_server.py +27 -5
- codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/test_workflow_with_notification_tools.py +13 -0
- codemie_test_harness/tests/workflow/virtual_assistant_tools/open_api/test_workflow_with_open_api_tools.py +10 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/plugin/test_workflow_with_development_plugin.py +20 -0
- codemie_test_harness/tests/workflow/virtual_assistant_tools/plugin/test_workflow_with_plugin_and_mcp_servers.py +14 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/project_management/test_workflow_with_project_management_tools.py +10 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/report_portal/test_workflow_with_report_portal_tool.py +10 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/research/test_workflow_with_research_tools.py +9 -0
- codemie_test_harness/tests/workflow/virtual_assistant_tools/servicenow/test_workflow_with_servicenow_tools.py +10 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/test_workflow_with_vcs_tools.py +9 -1
- {codemie_test_harness-0.1.184.dist-info → codemie_test_harness-0.1.198.dist-info}/METADATA +134 -3
- {codemie_test_harness-0.1.184.dist-info → codemie_test_harness-0.1.198.dist-info}/RECORD +81 -81
- {codemie_test_harness-0.1.184.dist-info → codemie_test_harness-0.1.198.dist-info}/WHEEL +0 -0
- {codemie_test_harness-0.1.184.dist-info → codemie_test_harness-0.1.198.dist-info}/entry_points.txt +0 -0
|
@@ -3,6 +3,7 @@ import os
|
|
|
3
3
|
import pytest
|
|
4
4
|
|
|
5
5
|
from codemie_test_harness.tests.enums.tools import PluginTool
|
|
6
|
+
from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
|
|
6
7
|
from codemie_test_harness.tests.test_data.plugin_tools_test_data import (
|
|
7
8
|
list_files_plugin_tools_test_data,
|
|
8
9
|
CREATE_READ_DELETE_FILE_TEST_DATA,
|
|
@@ -37,6 +38,10 @@ def test_workflow_with_list_files_plugin_tools(
|
|
|
37
38
|
response = workflow_utils.execute_workflow(
|
|
38
39
|
workflow.id, assistant_and_state_name, user_input=prompt
|
|
39
40
|
)
|
|
41
|
+
|
|
42
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(workflow)
|
|
43
|
+
assert_tool_triggered(tool_name, triggered_tools)
|
|
44
|
+
|
|
40
45
|
similarity_check.check_similarity(response, expected_response)
|
|
41
46
|
|
|
42
47
|
|
|
@@ -71,6 +76,11 @@ def test_workflow_with_modify_files_plugin_tools(
|
|
|
71
76
|
),
|
|
72
77
|
)
|
|
73
78
|
|
|
79
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
80
|
+
workflow
|
|
81
|
+
)
|
|
82
|
+
assert_tool_triggered(PluginTool.WRITE_FILE_TO_FILE_SYSTEM, triggered_tools)
|
|
83
|
+
|
|
74
84
|
similarity_check.check_similarity(
|
|
75
85
|
response,
|
|
76
86
|
CREATE_READ_DELETE_FILE_TEST_DATA["create_file_response"].format(
|
|
@@ -86,6 +96,11 @@ def test_workflow_with_modify_files_plugin_tools(
|
|
|
86
96
|
),
|
|
87
97
|
)
|
|
88
98
|
|
|
99
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
100
|
+
workflow
|
|
101
|
+
)
|
|
102
|
+
assert_tool_triggered(PluginTool.GENERIC_GIT_TOOL, triggered_tools)
|
|
103
|
+
|
|
89
104
|
similarity_check.check_similarity(
|
|
90
105
|
response,
|
|
91
106
|
CREATE_READ_DELETE_FILE_TEST_DATA["git_command_response"].format(file_name),
|
|
@@ -99,6 +114,11 @@ def test_workflow_with_modify_files_plugin_tools(
|
|
|
99
114
|
].format(file_name),
|
|
100
115
|
)
|
|
101
116
|
|
|
117
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
118
|
+
workflow
|
|
119
|
+
)
|
|
120
|
+
assert_tool_triggered(PluginTool.READ_FILE_FROM_FILE_SYSTEM, triggered_tools)
|
|
121
|
+
|
|
102
122
|
similarity_check.check_similarity(
|
|
103
123
|
response,
|
|
104
124
|
CREATE_READ_DELETE_FILE_TEST_DATA["show_file_content_response"].format(
|
|
@@ -2,7 +2,8 @@ import os
|
|
|
2
2
|
|
|
3
3
|
import pytest
|
|
4
4
|
|
|
5
|
-
from codemie_test_harness.tests.enums.tools import PluginTool
|
|
5
|
+
from codemie_test_harness.tests.enums.tools import PluginTool, CliMcpServer
|
|
6
|
+
from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
|
|
6
7
|
from codemie_test_harness.tests.test_data.mcp_server_test_data import (
|
|
7
8
|
cli_mcp_server_with_plugin_test_data,
|
|
8
9
|
filesystem_mcp_server_with_plugin_test_data,
|
|
@@ -40,6 +41,10 @@ def test_workflow_with_plugin_and_cli_mcp_server(
|
|
|
40
41
|
response = workflow_utils.execute_workflow(
|
|
41
42
|
workflow.id, assistant_and_state_name, user_input=prompt
|
|
42
43
|
)
|
|
44
|
+
|
|
45
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(workflow)
|
|
46
|
+
assert_tool_triggered(CliMcpServer.RUN_COMMAND, triggered_tools)
|
|
47
|
+
|
|
43
48
|
similarity_check.check_similarity(response, expected_response)
|
|
44
49
|
|
|
45
50
|
|
|
@@ -74,6 +79,14 @@ def test_workflow_with_plugin_and_filesystem_mcp_server(
|
|
|
74
79
|
response = workflow_utils.execute_workflow(
|
|
75
80
|
workflow.id, assistant_and_state_name, user_input=prompt
|
|
76
81
|
)
|
|
82
|
+
|
|
83
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
84
|
+
workflow
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
if tool_name != PluginTool.READ_FILE:
|
|
88
|
+
assert_tool_triggered(tool_name, triggered_tools)
|
|
89
|
+
|
|
77
90
|
similarity_check.check_similarity(response, expected_response)
|
|
78
91
|
finally:
|
|
79
92
|
file_to_remove = f"{str(TESTS_PATH / 'sdk_tests')}.properties"
|
|
@@ -3,7 +3,10 @@ import pytest
|
|
|
3
3
|
from codemie_test_harness.tests.test_data.project_management_test_data import (
|
|
4
4
|
pm_tools_test_data,
|
|
5
5
|
)
|
|
6
|
-
from codemie_test_harness.tests.utils.base_utils import
|
|
6
|
+
from codemie_test_harness.tests.utils.base_utils import (
|
|
7
|
+
get_random_name,
|
|
8
|
+
assert_tool_triggered,
|
|
9
|
+
)
|
|
7
10
|
from codemie_test_harness.tests.utils.constants import (
|
|
8
11
|
project_management_integrations,
|
|
9
12
|
)
|
|
@@ -39,4 +42,10 @@ def test_workflow_with_virtual_assistant_with_project_management_tools(
|
|
|
39
42
|
response = workflow_utils.execute_workflow(
|
|
40
43
|
test_workflow.id, assistant_and_state_name, user_input=prompt
|
|
41
44
|
)
|
|
45
|
+
|
|
46
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
47
|
+
test_workflow
|
|
48
|
+
)
|
|
49
|
+
assert_tool_triggered(tool_name, triggered_tools)
|
|
50
|
+
|
|
42
51
|
similarity_check.check_similarity(response, expected_response)
|
|
@@ -3,7 +3,10 @@ import pytest
|
|
|
3
3
|
from codemie_test_harness.tests.test_data.report_portal_tools_test_data import (
|
|
4
4
|
rp_test_data,
|
|
5
5
|
)
|
|
6
|
-
from codemie_test_harness.tests.utils.base_utils import
|
|
6
|
+
from codemie_test_harness.tests.utils.base_utils import (
|
|
7
|
+
get_random_name,
|
|
8
|
+
assert_tool_triggered,
|
|
9
|
+
)
|
|
7
10
|
|
|
8
11
|
|
|
9
12
|
@pytest.mark.workflow
|
|
@@ -36,4 +39,10 @@ def test_workflow_with_virtual_assistant_with_report_portal_tools(
|
|
|
36
39
|
response = workflow_utils.execute_workflow(
|
|
37
40
|
test_workflow.id, assistant_and_state_name
|
|
38
41
|
)
|
|
42
|
+
|
|
43
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
44
|
+
test_workflow
|
|
45
|
+
)
|
|
46
|
+
assert_tool_triggered(tool_name, triggered_tools)
|
|
47
|
+
|
|
39
48
|
similarity_check.check_similarity(response, expected_response)
|
|
@@ -9,6 +9,7 @@ from codemie_test_harness.tests.test_data.research_tools_test_data import (
|
|
|
9
9
|
from codemie_test_harness.tests.utils.base_utils import (
|
|
10
10
|
get_random_name,
|
|
11
11
|
percent_of_relevant_titles,
|
|
12
|
+
assert_tool_triggered,
|
|
12
13
|
)
|
|
13
14
|
|
|
14
15
|
|
|
@@ -38,6 +39,10 @@ def test_workflow_with_search_tools(
|
|
|
38
39
|
response = workflow_utils.execute_workflow(
|
|
39
40
|
test_workflow.id, assistant_and_state_name
|
|
40
41
|
)
|
|
42
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
43
|
+
test_workflow
|
|
44
|
+
)
|
|
45
|
+
assert_tool_triggered(tool_name, triggered_tools)
|
|
41
46
|
percent = percent_of_relevant_titles(response)
|
|
42
47
|
|
|
43
48
|
assert_that(
|
|
@@ -74,5 +79,9 @@ def test_workflow_with_interaction_tools(
|
|
|
74
79
|
response = workflow_utils.execute_workflow(
|
|
75
80
|
test_workflow.id, assistant_and_state_name
|
|
76
81
|
)
|
|
82
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
83
|
+
test_workflow
|
|
84
|
+
)
|
|
85
|
+
assert_tool_triggered(tool_name, triggered_tools)
|
|
77
86
|
|
|
78
87
|
similarity_check.check_similarity(response, expected_response)
|
|
@@ -5,7 +5,10 @@ from codemie_test_harness.tests.test_data.servicenow_tools_test_data import (
|
|
|
5
5
|
PROMPT,
|
|
6
6
|
EXPECTED_RESPONSE,
|
|
7
7
|
)
|
|
8
|
-
from codemie_test_harness.tests.utils.base_utils import
|
|
8
|
+
from codemie_test_harness.tests.utils.base_utils import (
|
|
9
|
+
get_random_name,
|
|
10
|
+
assert_tool_triggered,
|
|
11
|
+
)
|
|
9
12
|
|
|
10
13
|
|
|
11
14
|
@pytest.mark.workflow
|
|
@@ -29,4 +32,10 @@ def test_workflow_with_virtual_assistant_with_servicenow_tools(
|
|
|
29
32
|
response = workflow_utils.execute_workflow(
|
|
30
33
|
test_workflow.id, assistant_and_state_name
|
|
31
34
|
)
|
|
35
|
+
|
|
36
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
37
|
+
test_workflow
|
|
38
|
+
)
|
|
39
|
+
assert_tool_triggered(ServiceNowTool.SERVICE_NOW, triggered_tools)
|
|
40
|
+
|
|
32
41
|
similarity_check.check_similarity(response, EXPECTED_RESPONSE, 80)
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/test_workflow_with_vcs_tools.py
CHANGED
|
@@ -3,7 +3,10 @@ import pytest
|
|
|
3
3
|
from codemie_test_harness.tests.test_data.vcs_tools_test_data import (
|
|
4
4
|
vcs_tools_test_data,
|
|
5
5
|
)
|
|
6
|
-
from codemie_test_harness.tests.utils.base_utils import
|
|
6
|
+
from codemie_test_harness.tests.utils.base_utils import (
|
|
7
|
+
get_random_name,
|
|
8
|
+
assert_tool_triggered,
|
|
9
|
+
)
|
|
7
10
|
from codemie_test_harness.tests.utils.constants import vcs_integrations
|
|
8
11
|
|
|
9
12
|
|
|
@@ -37,4 +40,9 @@ def test_workflow_with_vcs_tool(
|
|
|
37
40
|
test_workflow.id, assistant_and_state_name, user_input=prompt
|
|
38
41
|
)
|
|
39
42
|
|
|
43
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
44
|
+
test_workflow
|
|
45
|
+
)
|
|
46
|
+
assert_tool_triggered(tool_name, triggered_tools)
|
|
47
|
+
|
|
40
48
|
similarity_check.check_similarity(response, expected_response)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: codemie-test-harness
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.198
|
|
4
4
|
Summary: Autotest for CodeMie backend and UI
|
|
5
5
|
Author: Anton Yeromin
|
|
6
6
|
Author-email: anton_yeromin@epam.com
|
|
@@ -13,11 +13,13 @@ Requires-Dist: aws-assume-role-lib (>=2.10.0,<3.0.0)
|
|
|
13
13
|
Requires-Dist: boto3 (>=1.39.8,<2.0.0)
|
|
14
14
|
Requires-Dist: click (>=8.1.7,<9.0.0)
|
|
15
15
|
Requires-Dist: codemie-plugins (>=0.1.123,<0.2.0)
|
|
16
|
-
Requires-Dist: codemie-sdk-python (==0.1.
|
|
16
|
+
Requires-Dist: codemie-sdk-python (==0.1.198)
|
|
17
17
|
Requires-Dist: pytest (>=8.4.1,<9.0.0)
|
|
18
18
|
Requires-Dist: pytest-playwright (>=0.7.0,<0.8.0)
|
|
19
|
+
Requires-Dist: pytest-repeat (>=0.9.3,<0.10.0)
|
|
19
20
|
Requires-Dist: pytest-reportportal (>=5.5.2,<6.0.0)
|
|
20
21
|
Requires-Dist: pytest-rerunfailures (>=15.1,<16.0)
|
|
22
|
+
Requires-Dist: pytest-timeout (>=2.4.0,<3.0.0)
|
|
21
23
|
Requires-Dist: pytest-xdist (>=3.6.1,<4.0.0)
|
|
22
24
|
Requires-Dist: python-dotenv (>=1.1.0,<2.0.0)
|
|
23
25
|
Requires-Dist: python-gitlab (>=5.6.0,<6.0.0)
|
|
@@ -96,6 +98,7 @@ Optional defaults for pytest:
|
|
|
96
98
|
codemie-test-harness config set PYTEST_MARKS "smoke"
|
|
97
99
|
codemie-test-harness config set PYTEST_N 8
|
|
98
100
|
codemie-test-harness config set PYTEST_RERUNS 2
|
|
101
|
+
codemie-test-harness config set PYTEST_COUNT 10 # For performance testing (optional)
|
|
99
102
|
```
|
|
100
103
|
|
|
101
104
|
#### Integration Categories & Management
|
|
@@ -293,6 +296,80 @@ codemie-test-harness --git-env github run --marks git
|
|
|
293
296
|
codemie-test-harness run --marks ui --headless
|
|
294
297
|
```
|
|
295
298
|
|
|
299
|
+
#### Performance and Load Testing
|
|
300
|
+
|
|
301
|
+
Run tests multiple times in parallel to simulate load and measure performance:
|
|
302
|
+
|
|
303
|
+
```shell
|
|
304
|
+
# Performance test: Run test 50 times with 10 parallel workers
|
|
305
|
+
codemie-test-harness run --marks excel_generation --count 50 -n 10
|
|
306
|
+
|
|
307
|
+
# Heavy load test: 100 iterations with 20 workers
|
|
308
|
+
codemie-test-harness run --marks excel_generation --count 100 -n 20 -v
|
|
309
|
+
|
|
310
|
+
# Light load test with retries for stability
|
|
311
|
+
codemie-test-harness run --marks smoke --count 25 -n 5 --reruns 2
|
|
312
|
+
|
|
313
|
+
# Set default count in config for repeated use
|
|
314
|
+
codemie-test-harness config set PYTEST_COUNT 30
|
|
315
|
+
codemie-test-harness run --marks excel_generation -n 10 # Uses count=30 from config
|
|
316
|
+
|
|
317
|
+
# Override config default for a specific run
|
|
318
|
+
codemie-test-harness run --marks excel_generation --count 100 -n 20 # Overrides config
|
|
319
|
+
```
|
|
320
|
+
|
|
321
|
+
**Note:** The `--count` parameter requires the `pytest-repeat` plugin, which is included in the dependencies.
|
|
322
|
+
|
|
323
|
+
#### Test Timeout Control
|
|
324
|
+
|
|
325
|
+
Control per-test timeout to prevent tests from running indefinitely. Tests exceeding the timeout will be **automatically terminated and marked as FAILED**.
|
|
326
|
+
|
|
327
|
+
**Configuration Priority**: CLI args → Environment variable → Config file → Default (300 seconds)
|
|
328
|
+
|
|
329
|
+
**Usage Examples:**
|
|
330
|
+
|
|
331
|
+
```shell
|
|
332
|
+
# Via CLI argument (600 seconds = 10 minutes)
|
|
333
|
+
codemie-test-harness run --timeout 600 --marks smoke
|
|
334
|
+
|
|
335
|
+
# Via config file (persistent)
|
|
336
|
+
codemie-test-harness config set TEST_TIMEOUT 900
|
|
337
|
+
codemie-test-harness run --marks api
|
|
338
|
+
|
|
339
|
+
# Via environment variable
|
|
340
|
+
export TEST_TIMEOUT=300
|
|
341
|
+
codemie-test-harness run --marks smoke
|
|
342
|
+
|
|
343
|
+
# Disable timeout for debugging (use 0)
|
|
344
|
+
codemie-test-harness run --timeout 0 --marks smoke
|
|
345
|
+
|
|
346
|
+
# Override config default for specific run
|
|
347
|
+
codemie-test-harness config set TEST_TIMEOUT 600
|
|
348
|
+
codemie-test-harness run --timeout 1200 --marks slow # Uses 1200, not 600
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
**Default**: 300 seconds (5 minutes) per test
|
|
352
|
+
|
|
353
|
+
**What Happens on Timeout?**
|
|
354
|
+
|
|
355
|
+
When a test exceeds the configured timeout:
|
|
356
|
+
1. ✅ **Test is automatically terminated** - Execution stops immediately
|
|
357
|
+
2. ✅ **Marked as FAILED** - Test result shows as failed with clear timeout message
|
|
358
|
+
3. ✅ **Error details displayed** - Shows which test timed out and the configured limit
|
|
359
|
+
4. ✅ **Remaining tests continue** - Other tests proceed normally
|
|
360
|
+
5. ✅ **Stack trace captured** - Shows where the test was when timeout occurred
|
|
361
|
+
|
|
362
|
+
**Example timeout error output:**
|
|
363
|
+
```
|
|
364
|
+
FAILED tests/test_slow_operation.py::test_data_processing - Failed: Timeout >300.0s
|
|
365
|
+
```
|
|
366
|
+
|
|
367
|
+
**Notes:**
|
|
368
|
+
- Timeout applies to **individual test functions**, not the entire test run
|
|
369
|
+
- Useful for preventing hanging tests in CI/CD pipelines
|
|
370
|
+
- Consider increasing timeout for legitimate long-running operations (data processing, large file operations)
|
|
371
|
+
- Timeout of 0 disables the timeout (use for debugging only)
|
|
372
|
+
|
|
296
373
|
#### Assistant Chat Interface
|
|
297
374
|
|
|
298
375
|
Interact directly with CodeMie assistants through the CLI:
|
|
@@ -368,6 +445,9 @@ codemie-test-harness config vars data-management
|
|
|
368
445
|
|
|
369
446
|
# Test execution with multiple overrides
|
|
370
447
|
codemie-test-harness run --marks "smoke and not ui" -n 10 --reruns 3 --headless
|
|
448
|
+
|
|
449
|
+
# Performance testing with count parameter
|
|
450
|
+
codemie-test-harness run --marks excel_generation --count 50 -n 10
|
|
371
451
|
```
|
|
372
452
|
|
|
373
453
|
#### Common Test Markers
|
|
@@ -508,9 +588,60 @@ pytest -m not_for_parallel_run --reruns 2
|
|
|
508
588
|
# API tests
|
|
509
589
|
pytest -n 10 -m "api and not not_for_parallel_run" --reruns 2
|
|
510
590
|
pytest -m not_for_parallel_run --reruns 3
|
|
591
|
+
|
|
592
|
+
# Performance/Load testing: Run test multiple times in parallel
|
|
593
|
+
pytest -n 10 --count 50 -m excel_generation # Run 50 times with 10 workers
|
|
594
|
+
pytest -n 20 --count 100 -m smoke --reruns 2 # Heavy load with retries
|
|
595
|
+
```
|
|
596
|
+
|
|
597
|
+
**Notes:**
|
|
598
|
+
- `--reruns 2` uses pytest-rerunfailures to improve resiliency in flaky environments
|
|
599
|
+
- `--count N` uses pytest-repeat to run each test N times (useful for performance/load testing)
|
|
600
|
+
|
|
601
|
+
#### Test Timeout Configuration
|
|
602
|
+
|
|
603
|
+
Tests have a configurable timeout to prevent hanging. Default is **300 seconds (5 minutes)** per test.
|
|
604
|
+
|
|
605
|
+
**Configure in .env file:**
|
|
606
|
+
```properties
|
|
607
|
+
TEST_TIMEOUT=600 # 10 minutes
|
|
608
|
+
```
|
|
609
|
+
|
|
610
|
+
**Override via pytest CLI:**
|
|
611
|
+
```shell
|
|
612
|
+
# Set timeout for this run
|
|
613
|
+
pytest -n 10 -m smoke --timeout 900 # 15 minutes
|
|
614
|
+
|
|
615
|
+
# Disable timeout (debugging)
|
|
616
|
+
pytest -m slow_tests --timeout 0
|
|
617
|
+
|
|
618
|
+
# Use default from .env or pytest.ini
|
|
619
|
+
pytest -n 10 -m api # Uses TEST_TIMEOUT from .env or 300s default
|
|
620
|
+
```
|
|
621
|
+
|
|
622
|
+
**Timeout Behavior:**
|
|
623
|
+
|
|
624
|
+
When a test exceeds the configured timeout:
|
|
625
|
+
- Test execution is **terminated immediately**
|
|
626
|
+
- Test is marked as **FAILED** with a timeout error message
|
|
627
|
+
- Stack trace shows where the test was when timeout occurred
|
|
628
|
+
- Remaining tests continue execution normally
|
|
629
|
+
|
|
630
|
+
**Example timeout failure:**
|
|
631
|
+
```
|
|
632
|
+
================================== FAILURES ===================================
|
|
633
|
+
_________________ test_slow_workflow_execution ________________
|
|
634
|
+
|
|
635
|
+
E Failed: Timeout >300.0s
|
|
636
|
+
|
|
637
|
+
tests/workflow/test_workflows.py:145: Failed
|
|
511
638
|
```
|
|
512
639
|
|
|
513
|
-
|
|
640
|
+
**Best Practices:**
|
|
641
|
+
- Keep default timeout reasonable (5-10 minutes for E2E tests)
|
|
642
|
+
- Increase timeout for specific slow tests using `@pytest.mark.timeout(900)`
|
|
643
|
+
- Use timeout=0 only for debugging hanging tests
|
|
644
|
+
- Consider if a test legitimately needs > 5 minutes (optimize if possible)
|
|
514
645
|
|
|
515
646
|
### UI tests (Playwright)
|
|
516
647
|
|