codemie-test-harness 0.1.179__py3-none-any.whl → 0.1.181__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of codemie-test-harness might be problematic. Click here for more details.
- codemie_test_harness/cli/cli.py +2 -0
- codemie_test_harness/cli/commands/marks_cmd.py +39 -0
- codemie_test_harness/cli/marks_utils.py +338 -0
- codemie_test_harness/cli/runner.py +40 -0
- codemie_test_harness/tests/assistant/datasource/test_code_datasource.py +20 -7
- codemie_test_harness/tests/assistant/datasource/test_confluence_datasource.py +20 -7
- codemie_test_harness/tests/assistant/datasource/test_file_indexing.py +20 -9
- codemie_test_harness/tests/assistant/datasource/test_google_datasource.py +21 -7
- codemie_test_harness/tests/assistant/datasource/test_jira_datasource.py +22 -9
- codemie_test_harness/tests/assistant/default_integrations/test_default_integrations_for_tool.py +29 -13
- codemie_test_harness/tests/assistant/default_integrations/test_default_integrations_for_tool_kit.py +29 -13
- codemie_test_harness/tests/assistant/default_integrations/test_default_integrations_for_tool_with_datasource.py +29 -13
- codemie_test_harness/tests/assistant/test_assistants.py +53 -35
- codemie_test_harness/tests/assistant/tools/access_management/test_keycloak_tool.py +7 -2
- codemie_test_harness/tests/assistant/tools/ado/test_assistant_for_ado_test_plan_tools.py +39 -11
- codemie_test_harness/tests/assistant/tools/ado/test_assistant_for_ado_wiki_tools.py +27 -8
- codemie_test_harness/tests/assistant/tools/ado/test_assistant_for_ado_work_item_tools.py +27 -7
- codemie_test_harness/tests/assistant/tools/cloud/test_cloud_tools.py +9 -3
- codemie_test_harness/tests/assistant/tools/codebase/test_codebase_tools.py +13 -4
- codemie_test_harness/tests/assistant/tools/datamanagement/test_assistant_with_data_management_tools.py +32 -11
- codemie_test_harness/tests/assistant/tools/filemanagement/test_assistant_with_file_management_tools.py +37 -12
- codemie_test_harness/tests/assistant/tools/git/test_assistant_with_git_tools.py +48 -17
- codemie_test_harness/tests/assistant/tools/mcp/test_cli_mcp_server.py +16 -9
- codemie_test_harness/tests/assistant/tools/mcp/test_mcp_servers.py +14 -8
- codemie_test_harness/tests/assistant/tools/notification/test_assistant_notification_tools.py +11 -4
- codemie_test_harness/tests/assistant/tools/openapi/test_assistant_with_open_api_tools.py +6 -2
- codemie_test_harness/tests/assistant/tools/plugin/test_assistant_with_development_plugin.py +27 -8
- codemie_test_harness/tests/assistant/tools/plugin/test_assistant_with_plugin_and_mcp_servers.py +17 -5
- codemie_test_harness/tests/assistant/tools/plugin/test_single_assistant_dual_time_plugins.py +12 -3
- codemie_test_harness/tests/assistant/tools/project_management/test_assistant_pm_tools.py +36 -17
- codemie_test_harness/tests/assistant/tools/report_portal/test_assistant_report_portal_tools.py +7 -2
- codemie_test_harness/tests/assistant/tools/research/test_assistant_research_tools.py +17 -5
- codemie_test_harness/tests/assistant/tools/servicenow/test_servicenow_tools.py +6 -2
- codemie_test_harness/tests/assistant/tools/vcs/test_assistant_with_vcs_tools.py +7 -2
- codemie_test_harness/tests/conversations/test_conversations_endpoints.py +5 -5
- codemie_test_harness/tests/e2e/test_e2e.py +6 -6
- codemie_test_harness/tests/enums/tools.py +32 -0
- codemie_test_harness/tests/integrations/project/test_default_integrations.py +14 -14
- codemie_test_harness/tests/integrations/project/test_project_integrations.py +9 -9
- codemie_test_harness/tests/integrations/user/test_default_integrations.py +14 -14
- codemie_test_harness/tests/integrations/user/test_user_integrations.py +12 -12
- codemie_test_harness/tests/llm/assistants/test_lite_llm.py +2 -2
- codemie_test_harness/tests/llm/assistants/test_llm.py +3 -3
- codemie_test_harness/tests/providers/test_providers_endpoints.py +11 -11
- codemie_test_harness/tests/search/test_search_assistant.py +1 -1
- codemie_test_harness/tests/search/test_search_datasource.py +5 -5
- codemie_test_harness/tests/search/test_search_integration.py +2 -2
- codemie_test_harness/tests/search/test_search_workflow.py +1 -1
- codemie_test_harness/tests/service/test_assistant_service.py +1 -1
- codemie_test_harness/tests/test_data/codebase_tools_test_data.py +11 -5
- codemie_test_harness/tests/test_data/file_test_data.py +17 -0
- codemie_test_harness/tests/ui/assistants/test_create_assistant.py +11 -11
- codemie_test_harness/tests/ui/datasource/test_create_datasource.py +7 -7
- codemie_test_harness/tests/ui/datasource/test_datasource_page.py +3 -3
- codemie_test_harness/tests/ui/datasource/test_edit_datasource.py +5 -5
- codemie_test_harness/tests/ui/datasource/test_view_datasource.py +5 -5
- codemie_test_harness/tests/ui/integrations/test_create_integration.py +9 -9
- codemie_test_harness/tests/ui/workflows/test_create_workflow.py +12 -12
- codemie_test_harness/tests/ui/workflows/test_edit_workflow.py +15 -15
- codemie_test_harness/tests/ui/workflows/test_workflow_details.py +19 -19
- codemie_test_harness/tests/ui/workflows/test_workflow_executions_page.py +36 -36
- codemie_test_harness/tests/ui/workflows/test_workflow_templates.py +8 -8
- codemie_test_harness/tests/ui/workflows/test_workflows.py +8 -8
- codemie_test_harness/tests/utils/assistant_utils.py +34 -1
- codemie_test_harness/tests/utils/base_utils.py +61 -0
- codemie_test_harness/tests/workflow/assistant_tools/access_management/test_workflow_with_assistant_with_keycloak_tool.py +1 -1
- codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_test_plan_tools.py +2 -2
- codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_wiki_tools.py +2 -2
- codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_work_item_tools.py +2 -2
- codemie_test_harness/tests/workflow/assistant_tools/cloud/test_workflow_with_assistant_cloud_tools.py +1 -1
- codemie_test_harness/tests/workflow/assistant_tools/codebase/test_worfklow_with_assistant_codebase_tools.py +2 -2
- codemie_test_harness/tests/workflow/assistant_tools/data_management/test_workflow_with_assistant_with_data_management_tools.py +2 -2
- codemie_test_harness/tests/workflow/assistant_tools/default_integrations/test_default_integrations_for_tool.py +8 -8
- codemie_test_harness/tests/workflow/assistant_tools/default_integrations/test_default_integrations_for_tool_kit.py +8 -8
- codemie_test_harness/tests/workflow/assistant_tools/default_integrations/test_default_integrations_for_tool_with_datasource.py +8 -8
- codemie_test_harness/tests/workflow/assistant_tools/file_management/test_workflow_with_assistant_with_file_management_tools.py +5 -5
- codemie_test_harness/tests/workflow/assistant_tools/git/test_workflow_with_assistant_git_tools.py +7 -7
- codemie_test_harness/tests/workflow/assistant_tools/mcp/test_workflow_with_assistant_with_mcp_server.py +3 -3
- codemie_test_harness/tests/workflow/assistant_tools/notification/test_workflow_with_assistant_notification_tools.py +2 -2
- codemie_test_harness/tests/workflow/assistant_tools/open_api/test_workflow_with_assistant_with_open_api_tools.py +1 -1
- codemie_test_harness/tests/workflow/assistant_tools/plugin/test_workflow_with_assistant_with_development_plugin.py +2 -2
- codemie_test_harness/tests/workflow/assistant_tools/plugin/test_workflow_with_assistant_with_plugin_and_mcp_servers.py +2 -2
- codemie_test_harness/tests/workflow/assistant_tools/project_management/test_workflow_with_assistant_pm_tools.py +1 -1
- codemie_test_harness/tests/workflow/assistant_tools/report_portal/test_workflow_with_assistant_with_report_portal_tools.py +1 -1
- codemie_test_harness/tests/workflow/assistant_tools/research/test_workflow_with_assistant_research_tools.py +2 -2
- codemie_test_harness/tests/workflow/assistant_tools/servicenow/test_workflow_with_servicenow_tools.py +1 -1
- codemie_test_harness/tests/workflow/assistant_tools/vcs/workflow_with_assistant_vcs_tools.py +1 -1
- codemie_test_harness/tests/workflow/config_validation/test_config_validation.py +1 -1
- codemie_test_harness/tests/workflow/direct_tools_calling/default_integrations/test_default_integrations_for_tool.py +8 -8
- codemie_test_harness/tests/workflow/direct_tools_calling/default_integrations/test_default_integrations_for_tool_kit.py +8 -8
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_access_management_tool.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_ado_test_plan_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_ado_wiki_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_ado_work_item_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_cloud_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_codebase_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_data_management_tools_elastic.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_data_management_tools_sql.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_file_management_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_notification_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_open_api_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_plugin_tools.py +4 -4
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_project_management_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_report_portal_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_research_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_servicenow_tools.py +3 -3
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_vcs_tools.py +3 -3
- codemie_test_harness/tests/workflow/test_workflows.py +1 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/access_management/test_workflow_with_keycloak_tool.py +1 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/ado/test_workflow_with_ado_test_plan_tools.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/ado/test_workflow_with_ado_wiki_tools.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/ado/test_workflow_with_ado_work_item_tools.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/cloud/test_workflow_with_cloud_tools.py +4 -4
- codemie_test_harness/tests/workflow/virtual_assistant_tools/codebase/test_workflow_with_codebase_tools.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/data_management/test_workflow_with_data_management_tools.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/default_integrations/test_default_integrations_for_tool.py +8 -8
- codemie_test_harness/tests/workflow/virtual_assistant_tools/default_integrations/test_default_integrations_for_tool_kit.py +8 -8
- codemie_test_harness/tests/workflow/virtual_assistant_tools/default_integrations/test_default_integrations_for_tool_with_datasource.py +8 -8
- codemie_test_harness/tests/workflow/virtual_assistant_tools/file_management/test_workflow_with_file_management_tools.py +5 -5
- codemie_test_harness/tests/workflow/virtual_assistant_tools/git/test_workflow_with_git_tools.py +7 -7
- codemie_test_harness/tests/workflow/virtual_assistant_tools/mcp/test_workflow_with_mcp_server.py +3 -3
- codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/test_workflow_with_notification_tools.py +4 -4
- codemie_test_harness/tests/workflow/virtual_assistant_tools/open_api/test_workflow_with_open_api_tools.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/plugin/test_workflow_with_development_plugin.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/plugin/test_workflow_with_plugin_and_mcp_servers.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/project_management/test_workflow_with_project_management_tools.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/report_portal/test_workflow_with_report_portal_tool.py +1 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/research/test_workflow_with_research_tools.py +2 -2
- codemie_test_harness/tests/workflow/virtual_assistant_tools/servicenow/test_workflow_with_servicenow_tools.py +1 -1
- codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/test_workflow_with_vcs_tools.py +1 -1
- {codemie_test_harness-0.1.179.dist-info → codemie_test_harness-0.1.181.dist-info}/METADATA +8 -8
- {codemie_test_harness-0.1.179.dist-info → codemie_test_harness-0.1.181.dist-info}/RECORD +134 -132
- {codemie_test_harness-0.1.179.dist-info → codemie_test_harness-0.1.181.dist-info}/WHEEL +0 -0
- {codemie_test_harness-0.1.179.dist-info → codemie_test_harness-0.1.181.dist-info}/entry_points.txt +0 -0
codemie_test_harness/cli/cli.py
CHANGED
|
@@ -35,6 +35,7 @@ from .commands.config_cmd import config_cmd
|
|
|
35
35
|
from .commands.run_cmd import run_cmd
|
|
36
36
|
from .commands.assistant_cmd import assistant_cmd
|
|
37
37
|
from .commands.workflow_cmd import workflow_cmd
|
|
38
|
+
from .commands.marks_cmd import marks_cmd
|
|
38
39
|
|
|
39
40
|
|
|
40
41
|
@click.group(context_settings=CONTEXT_SETTINGS)
|
|
@@ -130,6 +131,7 @@ cli.add_command(config_cmd)
|
|
|
130
131
|
cli.add_command(run_cmd)
|
|
131
132
|
cli.add_command(assistant_cmd)
|
|
132
133
|
cli.add_command(workflow_cmd)
|
|
134
|
+
cli.add_command(marks_cmd)
|
|
133
135
|
|
|
134
136
|
|
|
135
137
|
if __name__ == "__main__": # pragma: no cover
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""CLI command for listing pytest marks."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
import click
|
|
5
|
+
from ..marks_utils import discover_all_marks, print_marks_list
|
|
6
|
+
from ..constants import CONSOLE
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@click.command(name="marks")
|
|
10
|
+
@click.option(
|
|
11
|
+
"--verbose",
|
|
12
|
+
"-v",
|
|
13
|
+
is_flag=True,
|
|
14
|
+
help="Show detailed information in table format with line numbers",
|
|
15
|
+
)
|
|
16
|
+
@click.option(
|
|
17
|
+
"--count", "-c", is_flag=True, help="Show only the count of available marks"
|
|
18
|
+
)
|
|
19
|
+
def marks_cmd(verbose: bool, count: bool):
|
|
20
|
+
"""List all available pytest marks in the test suite.
|
|
21
|
+
|
|
22
|
+
Examples:
|
|
23
|
+
codemie-test-harness marks # List all marks
|
|
24
|
+
codemie-test-harness marks --verbose # List marks with file details in table format
|
|
25
|
+
codemie-test-harness marks --count # Show only count
|
|
26
|
+
"""
|
|
27
|
+
try:
|
|
28
|
+
if count:
|
|
29
|
+
# For count mode, we don't need detailed information
|
|
30
|
+
marks_info = discover_all_marks(include_details=False)
|
|
31
|
+
CONSOLE.print(f"[green]{len(marks_info)}[/green] pytest marks available")
|
|
32
|
+
return
|
|
33
|
+
|
|
34
|
+
# Let print_marks_list handle the discovery with appropriate details
|
|
35
|
+
print_marks_list(marks_info=None, show_files=verbose)
|
|
36
|
+
|
|
37
|
+
except Exception as e:
|
|
38
|
+
CONSOLE.print(f"[red]Error discovering marks: {str(e)}[/red]")
|
|
39
|
+
raise click.ClickException("Failed to discover pytest marks")
|
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
"""Utilities for handling pytest marks in the test harness."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
import ast
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Set, List, Dict
|
|
7
|
+
from .constants import CONSOLE
|
|
8
|
+
from rich.table import Table
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from importlib.resources import files as pkg_files
|
|
12
|
+
except Exception:
|
|
13
|
+
pkg_files = None
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def extract_marks_from_file(
|
|
17
|
+
file_path: Path, include_lines: bool = False
|
|
18
|
+
) -> Set[str] | Dict[str, List[int]]:
|
|
19
|
+
"""Extract pytest marks from a Python test file.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
file_path: Path to the Python file to analyze
|
|
23
|
+
include_lines: If True, return dict with mark names and line numbers
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Set of mark names found in the file, or dict mapping marks to line numbers
|
|
27
|
+
"""
|
|
28
|
+
if include_lines:
|
|
29
|
+
marks_with_lines = {}
|
|
30
|
+
else:
|
|
31
|
+
marks = set()
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
35
|
+
content = f.read()
|
|
36
|
+
|
|
37
|
+
# Parse the file to AST for reliable extraction
|
|
38
|
+
try:
|
|
39
|
+
tree = ast.parse(content)
|
|
40
|
+
for node in ast.walk(tree):
|
|
41
|
+
if isinstance(node, ast.FunctionDef):
|
|
42
|
+
for decorator in node.decorator_list:
|
|
43
|
+
mark_name = _extract_mark_from_decorator(decorator)
|
|
44
|
+
if mark_name:
|
|
45
|
+
if include_lines:
|
|
46
|
+
if mark_name not in marks_with_lines:
|
|
47
|
+
marks_with_lines[mark_name] = []
|
|
48
|
+
marks_with_lines[mark_name].append(decorator.lineno)
|
|
49
|
+
else:
|
|
50
|
+
marks.add(mark_name)
|
|
51
|
+
except SyntaxError:
|
|
52
|
+
# If AST parsing fails, fall back to regex
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
# Additional regex-based extraction for edge cases
|
|
56
|
+
if include_lines:
|
|
57
|
+
regex_marks_with_lines = _extract_marks_with_regex_and_lines(content)
|
|
58
|
+
for mark, lines in regex_marks_with_lines.items():
|
|
59
|
+
if mark not in marks_with_lines:
|
|
60
|
+
marks_with_lines[mark] = []
|
|
61
|
+
marks_with_lines[mark].extend(lines)
|
|
62
|
+
else:
|
|
63
|
+
regex_marks = _extract_marks_with_regex(content)
|
|
64
|
+
marks.update(regex_marks)
|
|
65
|
+
|
|
66
|
+
except (IOError, UnicodeDecodeError):
|
|
67
|
+
# Skip files that can't be read
|
|
68
|
+
pass
|
|
69
|
+
|
|
70
|
+
return marks_with_lines if include_lines else marks
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _extract_mark_from_decorator(decorator) -> str | None:
|
|
74
|
+
"""Extract mark name from AST decorator node."""
|
|
75
|
+
mark_name = None
|
|
76
|
+
|
|
77
|
+
if isinstance(decorator, ast.Attribute):
|
|
78
|
+
# @pytest.mark.some_mark
|
|
79
|
+
if (
|
|
80
|
+
isinstance(decorator.value, ast.Attribute)
|
|
81
|
+
and isinstance(decorator.value.value, ast.Name)
|
|
82
|
+
and decorator.value.value.id == "pytest"
|
|
83
|
+
and decorator.value.attr == "mark"
|
|
84
|
+
):
|
|
85
|
+
mark_name = decorator.attr
|
|
86
|
+
elif isinstance(decorator, ast.Call):
|
|
87
|
+
# @pytest.mark.some_mark(...) with arguments
|
|
88
|
+
if (
|
|
89
|
+
isinstance(decorator.func, ast.Attribute)
|
|
90
|
+
and isinstance(decorator.func.value, ast.Attribute)
|
|
91
|
+
and isinstance(decorator.func.value.value, ast.Name)
|
|
92
|
+
and decorator.func.value.value.id == "pytest"
|
|
93
|
+
and decorator.func.value.attr == "mark"
|
|
94
|
+
):
|
|
95
|
+
mark_name = decorator.func.attr
|
|
96
|
+
|
|
97
|
+
# Filter out pytest built-in decorators that aren't marks
|
|
98
|
+
if mark_name in {
|
|
99
|
+
"parametrize",
|
|
100
|
+
"fixture",
|
|
101
|
+
"skip",
|
|
102
|
+
"skipif",
|
|
103
|
+
"xfail",
|
|
104
|
+
"usefixtures",
|
|
105
|
+
"testcase",
|
|
106
|
+
"todo",
|
|
107
|
+
}:
|
|
108
|
+
return None
|
|
109
|
+
|
|
110
|
+
return mark_name
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _extract_marks_with_regex(content: str) -> Set[str]:
|
|
114
|
+
"""Extract marks using regex as fallback."""
|
|
115
|
+
marks = set()
|
|
116
|
+
|
|
117
|
+
# Pattern to match @pytest.mark.mark_name
|
|
118
|
+
pattern = r"@pytest\.mark\.(\w+)"
|
|
119
|
+
matches = re.findall(pattern, content)
|
|
120
|
+
|
|
121
|
+
for match in matches:
|
|
122
|
+
# Filter out pytest built-in decorators that aren't marks
|
|
123
|
+
if match not in {
|
|
124
|
+
"parametrize",
|
|
125
|
+
"fixture",
|
|
126
|
+
"skip",
|
|
127
|
+
"skipif",
|
|
128
|
+
"xfail",
|
|
129
|
+
"usefixtures",
|
|
130
|
+
"todo",
|
|
131
|
+
}:
|
|
132
|
+
marks.add(match)
|
|
133
|
+
|
|
134
|
+
return marks
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def _extract_marks_with_regex_and_lines(content: str) -> Dict[str, List[int]]:
|
|
138
|
+
"""Extract marks with line numbers using regex as fallback."""
|
|
139
|
+
marks_with_lines = {}
|
|
140
|
+
lines = content.split("\n")
|
|
141
|
+
|
|
142
|
+
# Pattern to match @pytest.mark.mark_name
|
|
143
|
+
pattern = r"@pytest\.mark\.(\w+)"
|
|
144
|
+
|
|
145
|
+
for line_num, line in enumerate(lines, 1):
|
|
146
|
+
matches = re.findall(pattern, line)
|
|
147
|
+
for match in matches:
|
|
148
|
+
# Filter out pytest built-in decorators that aren't marks
|
|
149
|
+
if match not in {
|
|
150
|
+
"parametrize",
|
|
151
|
+
"fixture",
|
|
152
|
+
"skip",
|
|
153
|
+
"skipif",
|
|
154
|
+
"xfail",
|
|
155
|
+
"usefixtures",
|
|
156
|
+
"todo",
|
|
157
|
+
}:
|
|
158
|
+
if match not in marks_with_lines:
|
|
159
|
+
marks_with_lines[match] = []
|
|
160
|
+
marks_with_lines[match].append(line_num)
|
|
161
|
+
|
|
162
|
+
return marks_with_lines
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _resolve_tests_path() -> str:
|
|
166
|
+
"""Resolve tests path for mark discovery.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Path to tests directory
|
|
170
|
+
"""
|
|
171
|
+
# 1) Use importlib.resources to locate installed package "tests"
|
|
172
|
+
if pkg_files is not None:
|
|
173
|
+
try:
|
|
174
|
+
# Try to locate codemie_test_harness.tests package
|
|
175
|
+
tests_dir = Path(str(pkg_files("codemie_test_harness.tests")))
|
|
176
|
+
return str(tests_dir)
|
|
177
|
+
except Exception:
|
|
178
|
+
pass
|
|
179
|
+
|
|
180
|
+
# 2) Fallback to repo layout when running from source
|
|
181
|
+
# marks_utils.py -> cli -> codemie_test_harness -> <repo_root>
|
|
182
|
+
codemie_test_harness_root = (
|
|
183
|
+
Path(__file__).resolve().parents[1]
|
|
184
|
+
) # codemie_test_harness directory
|
|
185
|
+
tests_path = str(codemie_test_harness_root / "tests")
|
|
186
|
+
return tests_path
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def discover_all_marks(
|
|
190
|
+
include_details: bool = False,
|
|
191
|
+
) -> Dict[str, List[str]] | Dict[str, List[Dict]]:
|
|
192
|
+
"""Discover all pytest marks used in the test suite.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
include_details: If True, return detailed info with line numbers
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
Dictionary with mark names as keys and list of files (or detailed info) as values
|
|
199
|
+
"""
|
|
200
|
+
tests_path = _resolve_tests_path()
|
|
201
|
+
tests_dir = Path(tests_path)
|
|
202
|
+
|
|
203
|
+
if not tests_dir.exists():
|
|
204
|
+
return {}
|
|
205
|
+
|
|
206
|
+
marks_info = {}
|
|
207
|
+
|
|
208
|
+
# Find all Python test files
|
|
209
|
+
for py_file in tests_dir.rglob("*.py"):
|
|
210
|
+
if py_file.name.startswith("test_") or py_file.name.endswith("_test.py"):
|
|
211
|
+
rel_path = str(py_file.relative_to(tests_dir))
|
|
212
|
+
|
|
213
|
+
if include_details:
|
|
214
|
+
marks_with_lines = extract_marks_from_file(py_file, include_lines=True)
|
|
215
|
+
|
|
216
|
+
for mark, lines in marks_with_lines.items():
|
|
217
|
+
if mark not in marks_info:
|
|
218
|
+
marks_info[mark] = []
|
|
219
|
+
|
|
220
|
+
for line in lines:
|
|
221
|
+
marks_info[mark].append({"file": rel_path, "line": line})
|
|
222
|
+
else:
|
|
223
|
+
marks = extract_marks_from_file(py_file)
|
|
224
|
+
|
|
225
|
+
for mark in marks:
|
|
226
|
+
if mark not in marks_info:
|
|
227
|
+
marks_info[mark] = []
|
|
228
|
+
# Store relative path for readability
|
|
229
|
+
if rel_path not in marks_info[mark]:
|
|
230
|
+
marks_info[mark].append(rel_path)
|
|
231
|
+
|
|
232
|
+
return marks_info
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def get_all_available_marks() -> List[str]:
|
|
236
|
+
"""Get sorted list of all available pytest marks."""
|
|
237
|
+
marks_to_files = discover_all_marks()
|
|
238
|
+
return sorted(marks_to_files.keys())
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def is_valid_mark_expression(
|
|
242
|
+
expression: str, available_marks: List[str]
|
|
243
|
+
) -> tuple[bool, List[str]]:
|
|
244
|
+
"""Validate if a mark expression contains only known marks.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
expression: The pytest mark expression (e.g., "smoke and ui", "not integration")
|
|
248
|
+
available_marks: List of all available marks
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Tuple of (is_valid, list_of_unknown_marks)
|
|
252
|
+
"""
|
|
253
|
+
if not expression or not expression.strip():
|
|
254
|
+
return True, []
|
|
255
|
+
|
|
256
|
+
# Extract mark names from the expression
|
|
257
|
+
# This regex finds word tokens that could be mark names
|
|
258
|
+
potential_marks = re.findall(r"\b(\w+)\b", expression)
|
|
259
|
+
|
|
260
|
+
# Filter out logical operators and known keywords
|
|
261
|
+
logical_keywords = {"and", "or", "not", "true", "false"}
|
|
262
|
+
unknown_marks = []
|
|
263
|
+
|
|
264
|
+
available_marks_set = set(available_marks)
|
|
265
|
+
|
|
266
|
+
for mark in potential_marks:
|
|
267
|
+
if mark not in logical_keywords and mark not in available_marks_set:
|
|
268
|
+
unknown_marks.append(mark)
|
|
269
|
+
|
|
270
|
+
return len(unknown_marks) == 0, unknown_marks
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def print_marks_list(marks_info=None, show_files: bool = False):
|
|
274
|
+
"""Print formatted list of available marks.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
marks_info: Optional pre-computed marks dictionary
|
|
278
|
+
show_files: Whether to show detailed information in table format
|
|
279
|
+
"""
|
|
280
|
+
if marks_info is None:
|
|
281
|
+
marks_info = discover_all_marks(include_details=show_files)
|
|
282
|
+
|
|
283
|
+
if not marks_info:
|
|
284
|
+
CONSOLE.print("[yellow]No pytest marks found in test files.[/yellow]")
|
|
285
|
+
return
|
|
286
|
+
|
|
287
|
+
if show_files:
|
|
288
|
+
# Verbose mode - show table with detailed information
|
|
289
|
+
CONSOLE.print(
|
|
290
|
+
f"\n[bold cyan]Available pytest marks ({len(marks_info)} total):[/bold cyan]\n"
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
table = Table(show_header=True, header_style="bold magenta")
|
|
294
|
+
table.add_column("Mark", style="bold green", width=25)
|
|
295
|
+
table.add_column("File", style="cyan", width=110)
|
|
296
|
+
table.add_column("Usage Count", style="blue", justify="right", width=12)
|
|
297
|
+
|
|
298
|
+
for mark in sorted(marks_info.keys()):
|
|
299
|
+
details = marks_info[mark]
|
|
300
|
+
|
|
301
|
+
# Group by file and count occurrences
|
|
302
|
+
file_counts = {}
|
|
303
|
+
for detail in details:
|
|
304
|
+
file_path = detail["file"]
|
|
305
|
+
if file_path not in file_counts:
|
|
306
|
+
file_counts[file_path] = []
|
|
307
|
+
file_counts[file_path].append(detail["line"])
|
|
308
|
+
|
|
309
|
+
# Add rows for each file
|
|
310
|
+
first_row = True
|
|
311
|
+
for file_path, lines in sorted(file_counts.items()):
|
|
312
|
+
mark_display = (
|
|
313
|
+
mark if first_row else ""
|
|
314
|
+
) # Just the mark name, no @pytest.mark. prefix
|
|
315
|
+
usage_count = str(len(lines))
|
|
316
|
+
|
|
317
|
+
table.add_row(mark_display, file_path, usage_count)
|
|
318
|
+
first_row = False
|
|
319
|
+
|
|
320
|
+
# Add separator row if not the last mark
|
|
321
|
+
if mark != sorted(marks_info.keys())[-1]:
|
|
322
|
+
table.add_row("", "", "", style="dim")
|
|
323
|
+
|
|
324
|
+
CONSOLE.print(table)
|
|
325
|
+
CONSOLE.print(f"\n[dim]Total: {len(marks_info)} unique marks found[/dim]")
|
|
326
|
+
else:
|
|
327
|
+
# Simple mode - just list marks
|
|
328
|
+
CONSOLE.print(
|
|
329
|
+
f"\n[bold cyan]Available pytest marks ({len(marks_info)} total):[/bold cyan]\n"
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
for mark in sorted(marks_info.keys()):
|
|
333
|
+
files = marks_info[mark]
|
|
334
|
+
CONSOLE.print(f"[green]• {mark}[/green] [dim]({len(files)} files)[/dim]")
|
|
335
|
+
|
|
336
|
+
CONSOLE.print(
|
|
337
|
+
"\n[dim]Use --verbose to see detailed information in table format.[/dim]"
|
|
338
|
+
)
|
|
@@ -10,6 +10,11 @@ except Exception:
|
|
|
10
10
|
pkg_files = None
|
|
11
11
|
|
|
12
12
|
from .constants import CONSOLE
|
|
13
|
+
from .marks_utils import (
|
|
14
|
+
get_all_available_marks,
|
|
15
|
+
is_valid_mark_expression,
|
|
16
|
+
print_marks_list,
|
|
17
|
+
)
|
|
13
18
|
|
|
14
19
|
|
|
15
20
|
def resolve_tests_path_and_root() -> tuple[str, str]:
|
|
@@ -59,9 +64,44 @@ def build_pytest_cmd(
|
|
|
59
64
|
return cmd, root_dir
|
|
60
65
|
|
|
61
66
|
|
|
67
|
+
def validate_marks_expression(marks: str) -> None:
|
|
68
|
+
"""Validate that the marks expression contains only known marks.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
marks: The pytest marks expression to validate
|
|
72
|
+
|
|
73
|
+
Raises:
|
|
74
|
+
SystemExit: If unknown marks are found
|
|
75
|
+
"""
|
|
76
|
+
if not marks or not marks.strip():
|
|
77
|
+
return
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
available_marks = get_all_available_marks()
|
|
81
|
+
is_valid, unknown_marks = is_valid_mark_expression(marks, available_marks)
|
|
82
|
+
|
|
83
|
+
if not is_valid:
|
|
84
|
+
CONSOLE.print(
|
|
85
|
+
f"[red]Error: Unknown pytest mark(s) found: {', '.join(unknown_marks)}[/red]"
|
|
86
|
+
)
|
|
87
|
+
CONSOLE.print(f"\n[yellow]Expression used:[/yellow] {marks}")
|
|
88
|
+
print_marks_list(show_files=False)
|
|
89
|
+
CONSOLE.print(
|
|
90
|
+
"\n[dim]Use 'codemie-test-harness marks' to see all available marks[/dim]"
|
|
91
|
+
)
|
|
92
|
+
raise SystemExit(1)
|
|
93
|
+
|
|
94
|
+
except Exception as e:
|
|
95
|
+
# If mark validation fails for any reason, issue a warning but continue
|
|
96
|
+
CONSOLE.print(f"[yellow]Warning: Could not validate marks: {str(e)}[/yellow]")
|
|
97
|
+
|
|
98
|
+
|
|
62
99
|
def run_pytest(
|
|
63
100
|
workers: int, marks: str, reruns: int, extra: Iterable[str] | None = None
|
|
64
101
|
) -> None:
|
|
102
|
+
# Validate marks before running pytest
|
|
103
|
+
validate_marks_expression(marks)
|
|
104
|
+
|
|
65
105
|
cmd, root_dir = build_pytest_cmd(workers, marks, reruns, extra)
|
|
66
106
|
CONSOLE.print(f"[cyan]Running:[/] {' '.join(cmd)} (cwd={root_dir})")
|
|
67
107
|
raise SystemExit(subprocess.call(cmd, cwd=root_dir))
|
|
@@ -4,13 +4,18 @@ import pytest
|
|
|
4
4
|
from hamcrest import assert_that, equal_to
|
|
5
5
|
from requests import HTTPError
|
|
6
6
|
|
|
7
|
+
from codemie_test_harness.tests.enums.tools import Default
|
|
7
8
|
from codemie_test_harness.tests.test_data.index_test_data import index_test_data
|
|
8
|
-
from codemie_test_harness.tests.utils.base_utils import
|
|
9
|
+
from codemie_test_harness.tests.utils.base_utils import (
|
|
10
|
+
get_random_name,
|
|
11
|
+
assert_response,
|
|
12
|
+
assert_tool_triggered,
|
|
13
|
+
)
|
|
9
14
|
|
|
10
15
|
|
|
11
16
|
@pytest.mark.datasource
|
|
12
17
|
@pytest.mark.gitlab
|
|
13
|
-
@pytest.mark.
|
|
18
|
+
@pytest.mark.api
|
|
14
19
|
@pytest.mark.parametrize(
|
|
15
20
|
"embedding_model",
|
|
16
21
|
index_test_data,
|
|
@@ -35,22 +40,30 @@ def test_create_index_application_with_embedding_model(
|
|
|
35
40
|
setting_id=git_integration.id, embeddings_model=embedding_model
|
|
36
41
|
)
|
|
37
42
|
|
|
38
|
-
assistant = assistant(
|
|
43
|
+
assistant = assistant(
|
|
44
|
+
context=code_context(datasource), system_prompt="Run tools on each user prompt"
|
|
45
|
+
)
|
|
39
46
|
|
|
40
|
-
answer = assistant_utils.ask_assistant(
|
|
47
|
+
answer, triggered_tools = assistant_utils.ask_assistant(
|
|
48
|
+
assistant, question, minimal_response=False
|
|
49
|
+
)
|
|
50
|
+
assert_tool_triggered(Default.GET_REPOSITORY_FILE_TREE, triggered_tools)
|
|
41
51
|
similarity_check.check_similarity(answer, expected_answer)
|
|
42
52
|
|
|
43
53
|
datasource_utils.update_code_datasource(
|
|
44
54
|
datasource.id, full_reindex=True, skip_reindex=False
|
|
45
55
|
)
|
|
46
56
|
|
|
47
|
-
answer = assistant_utils.ask_assistant(
|
|
57
|
+
answer, triggered_tools = assistant_utils.ask_assistant(
|
|
58
|
+
assistant, question, minimal_response=False
|
|
59
|
+
)
|
|
60
|
+
assert_tool_triggered(Default.GET_REPOSITORY_FILE_TREE, triggered_tools)
|
|
48
61
|
similarity_check.check_similarity(answer, expected_answer)
|
|
49
62
|
|
|
50
63
|
|
|
51
64
|
@pytest.mark.datasource
|
|
52
65
|
@pytest.mark.gitlab
|
|
53
|
-
@pytest.mark.
|
|
66
|
+
@pytest.mark.api
|
|
54
67
|
def test_edit_description_for_code_data_source(
|
|
55
68
|
client,
|
|
56
69
|
gitlab_datasource,
|
|
@@ -68,7 +81,7 @@ def test_edit_description_for_code_data_source(
|
|
|
68
81
|
|
|
69
82
|
@pytest.mark.datasource
|
|
70
83
|
@pytest.mark.gitlab
|
|
71
|
-
@pytest.mark.
|
|
84
|
+
@pytest.mark.api
|
|
72
85
|
def test_create_code_datasource_with_existing_name(gitlab_datasource, datasource_utils):
|
|
73
86
|
datasource = datasource_utils.get_datasource(gitlab_datasource.id)
|
|
74
87
|
|
|
@@ -13,7 +13,12 @@ from codemie_test_harness.tests.test_data.pm_tools_test_data import (
|
|
|
13
13
|
RESPONSE_FOR_CONFLUENCE_CLOUD_TOOL,
|
|
14
14
|
)
|
|
15
15
|
from codemie_test_harness.tests.utils.credentials_manager import CredentialsManager
|
|
16
|
-
from codemie_test_harness.tests.utils.base_utils import
|
|
16
|
+
from codemie_test_harness.tests.utils.base_utils import (
|
|
17
|
+
get_random_name,
|
|
18
|
+
assert_response,
|
|
19
|
+
assert_tool_triggered,
|
|
20
|
+
)
|
|
21
|
+
from codemie_test_harness.tests.enums.tools import Default
|
|
17
22
|
|
|
18
23
|
|
|
19
24
|
@pytest.fixture(scope="function")
|
|
@@ -29,7 +34,7 @@ def confluence_cloud_datasource(datasource_utils, confluence_cloud_integration):
|
|
|
29
34
|
|
|
30
35
|
@pytest.mark.datasource
|
|
31
36
|
@pytest.mark.project_management
|
|
32
|
-
@pytest.mark.
|
|
37
|
+
@pytest.mark.api
|
|
33
38
|
@pytest.mark.parametrize(
|
|
34
39
|
"datasource_fixture, prompt, expected_response",
|
|
35
40
|
[
|
|
@@ -61,22 +66,30 @@ def test_create_datasource_with_confluence_and_confluence_cloud_integration(
|
|
|
61
66
|
expected_response,
|
|
62
67
|
):
|
|
63
68
|
datasource = request.getfixturevalue(datasource_fixture)
|
|
64
|
-
assistant = assistant(
|
|
69
|
+
assistant = assistant(
|
|
70
|
+
context=kb_context(datasource), system_prompt="Run tools on each user prompt"
|
|
71
|
+
)
|
|
65
72
|
|
|
66
|
-
response = assistant_utils.ask_assistant(
|
|
73
|
+
response, triggered_tools = assistant_utils.ask_assistant(
|
|
74
|
+
assistant, prompt, minimal_response=False
|
|
75
|
+
)
|
|
76
|
+
assert_tool_triggered(Default.SEARCH_KB, triggered_tools)
|
|
67
77
|
similarity_check.check_similarity(response, expected_response)
|
|
68
78
|
|
|
69
79
|
datasource_utils.update_confluence_datasource(
|
|
70
80
|
datasource.id, full_reindex=True, skip_reindex=False
|
|
71
81
|
)
|
|
72
82
|
|
|
73
|
-
response = assistant_utils.ask_assistant(
|
|
83
|
+
response, triggered_tools = assistant_utils.ask_assistant(
|
|
84
|
+
assistant, prompt, minimal_response=False
|
|
85
|
+
)
|
|
86
|
+
assert_tool_triggered(Default.SEARCH_KB, triggered_tools)
|
|
74
87
|
similarity_check.check_similarity(response, expected_response)
|
|
75
88
|
|
|
76
89
|
|
|
77
90
|
@pytest.mark.datasource
|
|
78
91
|
@pytest.mark.confluence
|
|
79
|
-
@pytest.mark.
|
|
92
|
+
@pytest.mark.api
|
|
80
93
|
def test_edit_description_for_confluence_data_source(
|
|
81
94
|
client, confluence_datasource, datasource_utils
|
|
82
95
|
):
|
|
@@ -92,7 +105,7 @@ def test_edit_description_for_confluence_data_source(
|
|
|
92
105
|
|
|
93
106
|
@pytest.mark.datasource
|
|
94
107
|
@pytest.mark.confluence
|
|
95
|
-
@pytest.mark.
|
|
108
|
+
@pytest.mark.api
|
|
96
109
|
def test_create_confluence_datasource_with_existing_name(
|
|
97
110
|
datasource_utils, confluence_datasource
|
|
98
111
|
):
|
|
@@ -6,12 +6,17 @@ from hamcrest import (
|
|
|
6
6
|
)
|
|
7
7
|
from requests import HTTPError
|
|
8
8
|
|
|
9
|
+
from codemie_test_harness.tests.enums.tools import Default
|
|
9
10
|
from codemie_test_harness.tests.test_data.file_test_data import (
|
|
10
11
|
file_test_data,
|
|
11
12
|
large_files_test_data,
|
|
12
13
|
RESPONSE_FOR_TWO_FILES_INDEXED,
|
|
13
14
|
)
|
|
14
|
-
from codemie_test_harness.tests.utils.base_utils import
|
|
15
|
+
from codemie_test_harness.tests.utils.base_utils import (
|
|
16
|
+
get_random_name,
|
|
17
|
+
assert_response,
|
|
18
|
+
assert_tool_triggered,
|
|
19
|
+
)
|
|
15
20
|
from codemie_test_harness.tests.test_data.index_test_data import index_test_data
|
|
16
21
|
from codemie_test_harness.tests.utils.client_factory import get_client
|
|
17
22
|
from codemie_test_harness.tests.utils.constants import FILES_PATH
|
|
@@ -60,7 +65,7 @@ def pytest_generate_tests(metafunc):
|
|
|
60
65
|
|
|
61
66
|
@pytest.mark.datasource
|
|
62
67
|
@pytest.mark.file
|
|
63
|
-
@pytest.mark.
|
|
68
|
+
@pytest.mark.api
|
|
64
69
|
@pytest.mark.smoke
|
|
65
70
|
def test_create_assistant_with_file_datasource(
|
|
66
71
|
assistant,
|
|
@@ -82,7 +87,10 @@ def test_create_assistant_with_file_datasource(
|
|
|
82
87
|
test_assistant = assistant(context=kb_context(datasource))
|
|
83
88
|
|
|
84
89
|
prompt = "Show KB context. Return all information available in the context. Query may be 'Show content of the KB'"
|
|
85
|
-
response = assistant_utils.ask_assistant(
|
|
90
|
+
response, triggered_tools = assistant_utils.ask_assistant(
|
|
91
|
+
test_assistant, prompt, minimal_response=False
|
|
92
|
+
)
|
|
93
|
+
assert_tool_triggered(Default.SEARCH_KB, triggered_tools)
|
|
86
94
|
|
|
87
95
|
similarity_check.check_similarity(
|
|
88
96
|
response, expected_response, assistant_name=test_assistant.name
|
|
@@ -91,7 +99,7 @@ def test_create_assistant_with_file_datasource(
|
|
|
91
99
|
|
|
92
100
|
@pytest.mark.datasource
|
|
93
101
|
@pytest.mark.file
|
|
94
|
-
@pytest.mark.
|
|
102
|
+
@pytest.mark.api
|
|
95
103
|
@pytest.mark.smoke
|
|
96
104
|
def test_edit_description_for_file_datasource(datasource_utils):
|
|
97
105
|
initial_description = "[Autotest] Initial CSV datasource description"
|
|
@@ -115,7 +123,7 @@ def test_edit_description_for_file_datasource(datasource_utils):
|
|
|
115
123
|
|
|
116
124
|
@pytest.mark.datasource
|
|
117
125
|
@pytest.mark.file
|
|
118
|
-
@pytest.mark.
|
|
126
|
+
@pytest.mark.api
|
|
119
127
|
@pytest.mark.smoke
|
|
120
128
|
@pytest.mark.parametrize("file_name", large_files_test_data)
|
|
121
129
|
def test_create_file_datasource_with_large_files(datasource_utils, file_name):
|
|
@@ -134,7 +142,7 @@ def test_create_file_datasource_with_large_files(datasource_utils, file_name):
|
|
|
134
142
|
|
|
135
143
|
@pytest.mark.datasource
|
|
136
144
|
@pytest.mark.file
|
|
137
|
-
@pytest.mark.
|
|
145
|
+
@pytest.mark.api
|
|
138
146
|
@pytest.mark.smoke
|
|
139
147
|
def test_create_file_datasource_with_big_number_of_files(datasource_utils):
|
|
140
148
|
files = [str(FILES_PATH / "test.txt") for _ in range(11)]
|
|
@@ -152,7 +160,7 @@ def test_create_file_datasource_with_big_number_of_files(datasource_utils):
|
|
|
152
160
|
|
|
153
161
|
@pytest.mark.datasource
|
|
154
162
|
@pytest.mark.file
|
|
155
|
-
@pytest.mark.
|
|
163
|
+
@pytest.mark.api
|
|
156
164
|
@pytest.mark.smoke
|
|
157
165
|
def test_create_file_datasource_with_two_files(
|
|
158
166
|
assistant, assistant_utils, datasource_utils, kb_context, similarity_check
|
|
@@ -168,8 +176,11 @@ def test_create_file_datasource_with_two_files(
|
|
|
168
176
|
|
|
169
177
|
test_assistant = assistant(context=kb_context(datasource))
|
|
170
178
|
|
|
171
|
-
response = assistant_utils.ask_assistant(
|
|
172
|
-
test_assistant,
|
|
179
|
+
response, triggered_tools = assistant_utils.ask_assistant(
|
|
180
|
+
test_assistant,
|
|
181
|
+
"What types of data do we have available?",
|
|
182
|
+
minimal_response=False,
|
|
173
183
|
)
|
|
184
|
+
assert_tool_triggered(Default.SEARCH_KB, triggered_tools)
|
|
174
185
|
|
|
175
186
|
similarity_check.check_similarity(response, RESPONSE_FOR_TWO_FILES_INDEXED)
|