codemie-test-harness 0.1.184__py3-none-any.whl → 0.1.198__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of codemie-test-harness might be problematic. Click here for more details.

Files changed (81) hide show
  1. codemie_test_harness/cli/cli.py +42 -6
  2. codemie_test_harness/cli/commands/config_cmd.py +1 -1
  3. codemie_test_harness/cli/commands/run_cmd.py +24 -1
  4. codemie_test_harness/cli/constants.py +1 -0
  5. codemie_test_harness/cli/runner.py +17 -3
  6. codemie_test_harness/cli/utils.py +12 -2
  7. codemie_test_harness/pytest.ini +3 -0
  8. codemie_test_harness/tests/assistant/test_assistants.py +17 -1
  9. codemie_test_harness/tests/assistant/tools/datamanagement/test_assistant_with_data_management_tools.py +47 -6
  10. codemie_test_harness/tests/assistant/tools/mcp/test_cli_mcp_server.py +0 -4
  11. codemie_test_harness/tests/assistant/tools/mcp/test_mcp_servers.py +0 -4
  12. codemie_test_harness/tests/conftest.py +19 -3
  13. codemie_test_harness/tests/enums/environment.py +3 -3
  14. codemie_test_harness/tests/enums/integrations.py +1 -0
  15. codemie_test_harness/tests/enums/model_types.py +1 -0
  16. codemie_test_harness/tests/integrations/project/test_default_integrations.py +41 -15
  17. codemie_test_harness/tests/integrations/project/test_project_integrations.py +42 -0
  18. codemie_test_harness/tests/integrations/user/test_default_integrations.py +41 -15
  19. codemie_test_harness/tests/llm/assistants/test_llm.py +45 -2
  20. codemie_test_harness/tests/test_data/assistant_test_data.py +171 -171
  21. codemie_test_harness/tests/test_data/codebase_tools_test_data.py +2 -0
  22. codemie_test_harness/tests/test_data/data_management_tools_test_data.py +18 -0
  23. codemie_test_harness/tests/test_data/direct_tools/data_management_tools_test_data.py +18 -1
  24. codemie_test_harness/tests/test_data/direct_tools/report_portal_tools_test_data.py +189 -197
  25. codemie_test_harness/tests/test_data/integrations_test_data.py +163 -2
  26. codemie_test_harness/tests/test_data/llm_test_data.py +1 -0
  27. codemie_test_harness/tests/test_data/open_api_tools_test_data.py +22 -1
  28. codemie_test_harness/tests/test_data/report_portal_tools_test_data.py +89 -112
  29. codemie_test_harness/tests/test_data/research_tools_test_data.py +29 -7
  30. codemie_test_harness/tests/utils/assistant_utils.py +22 -12
  31. codemie_test_harness/tests/utils/credentials_manager.py +66 -8
  32. codemie_test_harness/tests/utils/workflow_utils.py +91 -0
  33. codemie_test_harness/tests/workflow/assistant_tools/access_management/test_workflow_with_assistant_with_keycloak_tool.py +7 -0
  34. codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_test_plan_tools.py +50 -1
  35. codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_wiki_tools.py +39 -1
  36. codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_work_item_tools.py +34 -1
  37. codemie_test_harness/tests/workflow/assistant_tools/cloud/test_workflow_with_assistant_cloud_tools.py +6 -0
  38. codemie_test_harness/tests/workflow/assistant_tools/codebase/test_worfklow_with_assistant_codebase_tools.py +11 -0
  39. codemie_test_harness/tests/workflow/assistant_tools/data_management/test_workflow_with_assistant_with_data_management_tools.py +72 -1
  40. codemie_test_harness/tests/workflow/assistant_tools/default_integrations/test_default_integrations_for_tool.py +31 -0
  41. codemie_test_harness/tests/workflow/assistant_tools/default_integrations/test_default_integrations_for_tool_kit.py +31 -0
  42. codemie_test_harness/tests/workflow/assistant_tools/default_integrations/test_default_integrations_for_tool_with_datasource.py +26 -0
  43. codemie_test_harness/tests/workflow/assistant_tools/file_management/test_workflow_with_assistant_with_file_management_tools.py +63 -1
  44. codemie_test_harness/tests/workflow/assistant_tools/git/test_workflow_with_assistant_git_tools.py +82 -7
  45. codemie_test_harness/tests/workflow/assistant_tools/mcp/test_workflow_with_assistant_with_mcp_server.py +23 -4
  46. codemie_test_harness/tests/workflow/assistant_tools/notification/test_workflow_with_assistant_notification_tools.py +12 -0
  47. codemie_test_harness/tests/workflow/assistant_tools/open_api/test_workflow_with_assistant_with_open_api_tools.py +6 -0
  48. codemie_test_harness/tests/workflow/assistant_tools/plugin/test_workflow_with_assistant_with_development_plugin.py +29 -2
  49. codemie_test_harness/tests/workflow/assistant_tools/plugin/test_workflow_with_assistant_with_plugin_and_mcp_servers.py +14 -1
  50. codemie_test_harness/tests/workflow/assistant_tools/project_management/test_workflow_with_assistant_pm_tools.py +7 -0
  51. codemie_test_harness/tests/workflow/assistant_tools/report_portal/test_workflow_with_assistant_with_report_portal_tools.py +7 -0
  52. codemie_test_harness/tests/workflow/assistant_tools/research/test_workflow_with_assistant_research_tools.py +14 -1
  53. codemie_test_harness/tests/workflow/assistant_tools/servicenow/test_workflow_with_servicenow_tools.py +6 -0
  54. codemie_test_harness/tests/workflow/assistant_tools/vcs/workflow_with_assistant_vcs_tools.py +6 -0
  55. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_cloud_tools.py +12 -9
  56. codemie_test_harness/tests/workflow/virtual_assistant_tools/access_management/test_workflow_with_keycloak_tool.py +8 -1
  57. codemie_test_harness/tests/workflow/virtual_assistant_tools/ado/test_workflow_with_ado_test_plan_tools.py +28 -1
  58. codemie_test_harness/tests/workflow/virtual_assistant_tools/ado/test_workflow_with_ado_wiki_tools.py +24 -1
  59. codemie_test_harness/tests/workflow/virtual_assistant_tools/ado/test_workflow_with_ado_work_item_tools.py +20 -1
  60. codemie_test_harness/tests/workflow/virtual_assistant_tools/cloud/test_workflow_with_cloud_tools.py +13 -4
  61. codemie_test_harness/tests/workflow/virtual_assistant_tools/codebase/test_workflow_with_codebase_tools.py +16 -1
  62. codemie_test_harness/tests/workflow/virtual_assistant_tools/data_management/test_workflow_with_data_management_tools.py +73 -1
  63. codemie_test_harness/tests/workflow/virtual_assistant_tools/default_integrations/test_default_integrations_for_tool.py +34 -1
  64. codemie_test_harness/tests/workflow/virtual_assistant_tools/default_integrations/test_default_integrations_for_tool_kit.py +34 -1
  65. codemie_test_harness/tests/workflow/virtual_assistant_tools/default_integrations/test_default_integrations_for_tool_with_datasource.py +34 -1
  66. codemie_test_harness/tests/workflow/virtual_assistant_tools/file_management/test_workflow_with_file_management_tools.py +101 -49
  67. codemie_test_harness/tests/workflow/virtual_assistant_tools/git/test_workflow_with_git_tools.py +42 -3
  68. codemie_test_harness/tests/workflow/virtual_assistant_tools/mcp/test_workflow_with_mcp_server.py +27 -5
  69. codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/test_workflow_with_notification_tools.py +13 -0
  70. codemie_test_harness/tests/workflow/virtual_assistant_tools/open_api/test_workflow_with_open_api_tools.py +10 -1
  71. codemie_test_harness/tests/workflow/virtual_assistant_tools/plugin/test_workflow_with_development_plugin.py +20 -0
  72. codemie_test_harness/tests/workflow/virtual_assistant_tools/plugin/test_workflow_with_plugin_and_mcp_servers.py +14 -1
  73. codemie_test_harness/tests/workflow/virtual_assistant_tools/project_management/test_workflow_with_project_management_tools.py +10 -1
  74. codemie_test_harness/tests/workflow/virtual_assistant_tools/report_portal/test_workflow_with_report_portal_tool.py +10 -1
  75. codemie_test_harness/tests/workflow/virtual_assistant_tools/research/test_workflow_with_research_tools.py +9 -0
  76. codemie_test_harness/tests/workflow/virtual_assistant_tools/servicenow/test_workflow_with_servicenow_tools.py +10 -1
  77. codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/test_workflow_with_vcs_tools.py +9 -1
  78. {codemie_test_harness-0.1.184.dist-info → codemie_test_harness-0.1.198.dist-info}/METADATA +134 -3
  79. {codemie_test_harness-0.1.184.dist-info → codemie_test_harness-0.1.198.dist-info}/RECORD +81 -81
  80. {codemie_test_harness-0.1.184.dist-info → codemie_test_harness-0.1.198.dist-info}/WHEEL +0 -0
  81. {codemie_test_harness-0.1.184.dist-info → codemie_test_harness-0.1.198.dist-info}/entry_points.txt +0 -0
@@ -15,6 +15,8 @@ from .constants import (
15
15
  KEY_MARKS,
16
16
  KEY_XDIST_N,
17
17
  KEY_RERUNS,
18
+ KEY_COUNT,
19
+ KEY_TIMEOUT,
18
20
  KEY_AUTH_SERVER_URL,
19
21
  KEY_AUTH_CLIENT_ID,
20
22
  KEY_AUTH_CLIENT_SECRET,
@@ -22,12 +24,10 @@ from .constants import (
22
24
  KEY_CODEMIE_API_DOMAIN,
23
25
  KEY_AUTH_USERNAME,
24
26
  KEY_AUTH_PASSWORD,
25
- # integrations
26
27
  DEFAULT_MARKS,
27
28
  DEFAULT_XDIST_N,
28
29
  DEFAULT_RERUNS,
29
- AUTH_KEYS,
30
- INTEGRATION_KEYS,
30
+ DEFAULT_TIMEOUT,
31
31
  )
32
32
  from .utils import get_config_value, ensure_env_from_config
33
33
  from .runner import run_pytest
@@ -50,6 +50,18 @@ from .commands.marks_cmd import marks_cmd
50
50
  @click.option(
51
51
  "--reruns", envvar=KEY_RERUNS, type=int, help="Number of reruns for flaky tests"
52
52
  )
53
+ @click.option(
54
+ "--count",
55
+ envvar=KEY_COUNT,
56
+ type=int,
57
+ help="Number of times to repeat each test (requires pytest-repeat)",
58
+ )
59
+ @click.option(
60
+ "--timeout",
61
+ envvar=KEY_TIMEOUT,
62
+ type=int,
63
+ help="Per-test timeout in seconds (default: 300)",
64
+ )
53
65
  @click.option("--auth-server-url", envvar=KEY_AUTH_SERVER_URL, help="Auth server url")
54
66
  @click.option("--auth-client-id", envvar=KEY_AUTH_CLIENT_ID, help="Auth client id")
55
67
  @click.option(
@@ -68,6 +80,8 @@ def cli(
68
80
  marks: Optional[str],
69
81
  workers: Optional[int],
70
82
  reruns: Optional[int],
83
+ count: Optional[int],
84
+ timeout: Optional[int],
71
85
  auth_server_url: Optional[str],
72
86
  auth_client_id: Optional[str],
73
87
  auth_client_secret: Optional[str],
@@ -99,6 +113,16 @@ def cli(
99
113
  if reruns is not None
100
114
  else int(get_config_value(KEY_RERUNS, str(DEFAULT_RERUNS)))
101
115
  )
116
+ resolved_count = (
117
+ count
118
+ if count is not None
119
+ else (int(get_config_value(KEY_COUNT)) if get_config_value(KEY_COUNT) else None)
120
+ )
121
+ resolved_timeout = (
122
+ timeout
123
+ if timeout is not None
124
+ else int(get_config_value(KEY_TIMEOUT, str(DEFAULT_TIMEOUT)))
125
+ )
102
126
 
103
127
  # Ensure env vars. CLI args override env/config.
104
128
  provided = {
@@ -115,15 +139,27 @@ def cli(
115
139
  if v is not None and v != "":
116
140
  os.environ[k] = str(v)
117
141
  # populate any missing values from saved config
118
- ensure_env_from_config(AUTH_KEYS + INTEGRATION_KEYS)
142
+ ensure_env_from_config()
119
143
 
120
144
  ctx.obj.update(
121
- dict(marks=resolved_marks, workers=resolved_workers, reruns=resolved_reruns)
145
+ dict(
146
+ marks=resolved_marks,
147
+ workers=resolved_workers,
148
+ reruns=resolved_reruns,
149
+ count=resolved_count,
150
+ timeout=resolved_timeout,
151
+ )
122
152
  )
123
153
 
124
154
  # default behavior
125
155
  if ctx.invoked_subcommand is None and not ctx.resilient_parsing:
126
- run_pytest(resolved_workers, resolved_marks, resolved_reruns)
156
+ run_pytest(
157
+ resolved_workers,
158
+ resolved_marks,
159
+ resolved_reruns,
160
+ resolved_count,
161
+ resolved_timeout,
162
+ )
127
163
 
128
164
 
129
165
  # Register subcommands
@@ -5,9 +5,9 @@ from typing import Dict, List
5
5
  from ..constants import (
6
6
  CONSOLE,
7
7
  CREDENTIAL_CATEGORIES,
8
- INTEGRATION_KEYS,
9
8
  mask_sensitive_value,
10
9
  is_sensitive_key,
10
+ INTEGRATION_KEYS,
11
11
  )
12
12
  from ..utils import (
13
13
  load_config,
@@ -10,6 +10,16 @@ from ..runner import run_pytest
10
10
  "-n", "workers", type=int, help="Override number of xdist workers for this run"
11
11
  )
12
12
  @click.option("--reruns", type=int, help="Override number of reruns for this run")
13
+ @click.option(
14
+ "--count",
15
+ type=int,
16
+ help="Number of times to repeat each test (requires pytest-repeat)",
17
+ )
18
+ @click.option(
19
+ "--timeout",
20
+ type=int,
21
+ help="Per-test timeout in seconds (overrides config/default)",
22
+ )
13
23
  @click.argument("extra", nargs=-1)
14
24
  @click.pass_context
15
25
  def run_cmd(
@@ -17,14 +27,27 @@ def run_cmd(
17
27
  marks: Optional[str],
18
28
  workers: Optional[int],
19
29
  reruns: Optional[int],
30
+ count: Optional[int],
31
+ timeout: Optional[int],
20
32
  extra: Tuple[str, ...],
21
33
  ):
22
34
  """Run pytest with configured options.
23
35
 
24
36
  Example: codemie-test-harness run --marks "smoke and not ui" -n 8 --reruns 2 -k keyword
37
+ Example with repeat: codemie-test-harness run --marks excel_generation --count 50 -n 10
38
+ Example with timeout: codemie-test-harness run --marks slow --timeout 600 -n 4
25
39
  """
26
40
  resolved_marks = marks or ctx.obj.get("marks")
27
41
  resolved_workers = workers if workers is not None else ctx.obj.get("workers")
28
42
  resolved_reruns = reruns if reruns is not None else ctx.obj.get("reruns")
43
+ resolved_count = count if count is not None else ctx.obj.get("count")
44
+ resolved_timeout = timeout if timeout is not None else ctx.obj.get("timeout")
29
45
 
30
- run_pytest(int(resolved_workers), str(resolved_marks), int(resolved_reruns), extra)
46
+ run_pytest(
47
+ int(resolved_workers),
48
+ str(resolved_marks),
49
+ int(resolved_reruns),
50
+ resolved_count,
51
+ resolved_timeout,
52
+ extra,
53
+ )
@@ -19,6 +19,7 @@ KEY_CODEMIE_API_DOMAIN = "CODEMIE_API_DOMAIN"
19
19
  KEY_MARKS = "PYTEST_MARKS"
20
20
  KEY_XDIST_N = "PYTEST_N"
21
21
  KEY_RERUNS = "PYTEST_RERUNS"
22
+ KEY_COUNT = "PYTEST_COUNT"
22
23
 
23
24
  # === COMPLETE INTEGRATION CREDENTIALS KEYS ===
24
25
  # Version Control Systems (GitLab, GitHub)
@@ -49,7 +49,12 @@ def resolve_tests_path_and_root() -> tuple[str, str]:
49
49
 
50
50
 
51
51
  def build_pytest_cmd(
52
- workers: int, marks: str, reruns: int, extra: Iterable[str] | None = None
52
+ workers: int,
53
+ marks: str,
54
+ reruns: int,
55
+ count: int | None = None,
56
+ timeout: int | None = None,
57
+ extra: Iterable[str] | None = None,
53
58
  ) -> tuple[List[str], str]:
54
59
  tests_path, root_dir = resolve_tests_path_and_root()
55
60
  cmd = [sys.executable, "-m", "pytest", tests_path]
@@ -59,6 +64,10 @@ def build_pytest_cmd(
59
64
  cmd += ["-m", str(marks)]
60
65
  if reruns and int(reruns) > 0:
61
66
  cmd += ["--reruns", str(reruns)]
67
+ if count and int(count) > 0:
68
+ cmd += ["--count", str(count)]
69
+ if timeout and int(timeout) > 0:
70
+ cmd += ["--timeout", str(timeout)]
62
71
  if extra:
63
72
  cmd += list(extra)
64
73
  return cmd, root_dir
@@ -97,11 +106,16 @@ def validate_marks_expression(marks: str) -> None:
97
106
 
98
107
 
99
108
  def run_pytest(
100
- workers: int, marks: str, reruns: int, extra: Iterable[str] | None = None
109
+ workers: int,
110
+ marks: str,
111
+ reruns: int,
112
+ count: int | None = None,
113
+ timeout: int | None = None,
114
+ extra: Iterable[str] | None = None,
101
115
  ) -> None:
102
116
  # Validate marks before running pytest
103
117
  validate_marks_expression(marks)
104
118
 
105
- cmd, root_dir = build_pytest_cmd(workers, marks, reruns, extra)
119
+ cmd, root_dir = build_pytest_cmd(workers, marks, reruns, count, timeout, extra)
106
120
  CONSOLE.print(f"[cyan]Running:[/] {' '.join(cmd)} (cwd={root_dir})")
107
121
  raise SystemExit(subprocess.call(cmd, cwd=root_dir))
@@ -37,9 +37,19 @@ def set_config_value(key: str, value: Any) -> None:
37
37
  save_config(cfg)
38
38
 
39
39
 
40
- def ensure_env_from_config(keys: list[str]) -> None:
41
- # Populate missing env vars from config file if present
40
+ def ensure_env_from_config(keys: Optional[list[str]] = None) -> None:
41
+ """
42
+ Populate missing environment variables from config file.
43
+
44
+ Args:
45
+ keys: Optional list of specific keys to load. If None, loads all keys from config.
46
+ """
42
47
  cfg = load_config()
48
+
49
+ # If no specific keys provided, load all keys from config
50
+ if keys is None:
51
+ keys = list(cfg.keys())
52
+
43
53
  for k in keys:
44
54
  if k not in os.environ and k in cfg:
45
55
  os.environ[k] = str(cfg[k])
@@ -1,5 +1,8 @@
1
1
  [pytest]
2
2
  addopts = -v
3
+ timeout = 300
4
+ timeout_method = signal
5
+ timeout_func_only = true
3
6
  filterwarnings =
4
7
  ignore::pytest.PytestUnknownMarkWarning
5
8
  ignore::urllib3.exceptions.InsecureRequestWarning
@@ -436,7 +436,9 @@ def test_excel_tool_extended_functionality(
436
436
  - Multi-sheet comprehensive analysis
437
437
 
438
438
  """
439
- assistant_instance = assistant()
439
+ assistant_instance = assistant(
440
+ system_prompt="You have all required information in initial prompt. Do not ask additional questions and proceed with request."
441
+ )
440
442
 
441
443
  uploaded_file = assistant_utils.upload_file_to_chat(
442
444
  FILES_PATH / "test_extended.xlsx"
@@ -490,3 +492,17 @@ def test_docx_tool_extended_functionality(
490
492
 
491
493
  assert_tool_triggered(Default.DOCX_TOOL, triggered_tools)
492
494
  similarity_check.check_similarity(response, expected_response)
495
+
496
+
497
+ @pytest.mark.assistant
498
+ # @pytest.mark.api
499
+ @pytest.mark.smoke
500
+ @pytest.mark.excel_generation
501
+ def test_excel_file_generation(assistant_utils, assistant, similarity_check):
502
+ assistant_instance = assistant()
503
+
504
+ response = assistant_utils.ask_assistant(
505
+ assistant_instance, "Generate excel with 5 sheets with random data"
506
+ )
507
+
508
+ similarity_check.check_similarity(response, "expected_response_here")
@@ -3,6 +3,7 @@ import uuid
3
3
  import pytest
4
4
  from codemie_sdk.models.integration import CredentialTypes
5
5
 
6
+ from codemie_test_harness.tests.enums.integrations import DataBaseDialect
6
7
  from codemie_test_harness.tests.enums.tools import (
7
8
  Toolkit,
8
9
  DataManagementTool,
@@ -16,10 +17,12 @@ from codemie_test_harness.tests.test_data.data_management_tools_test_data import
16
17
  SQL_TOOL_INSERT_TABLE_TASK,
17
18
  SQL_TOOL_QUERY_TABLE_TASK,
18
19
  RESPONSE_FOR_SQL,
20
+ INFLUXDB_QUERY_MEASUREMENT_TASK,
21
+ RESPONSE_FOR_INFLUXDB,
19
22
  )
23
+ from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
20
24
  from codemie_test_harness.tests.utils.credentials_manager import CredentialsManager
21
25
  from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
22
- from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
23
26
 
24
27
  pytestmark = pytest.mark.skipif(
25
28
  EnvironmentResolver.is_localhost(),
@@ -70,12 +73,15 @@ def test_create_assistant_with_sql_tool(
70
73
  )
71
74
 
72
75
  assistant = assistant(
73
- Toolkit.DATA_MANAGEMENT, DataManagementTool.SQL, settings=settings
76
+ Toolkit.DATA_MANAGEMENT,
77
+ DataManagementTool.SQL,
78
+ settings=settings,
79
+ system_prompt="Always run tools for user prompt",
74
80
  )
75
81
 
76
82
  conversation_id = str(uuid.uuid4())
77
83
 
78
- response, triggered_tools = assistant_utils.ask_assistant(
84
+ _, triggered_tools = assistant_utils.ask_assistant(
79
85
  assistant,
80
86
  SQL_TOOL_CREATE_TABLE_TASK,
81
87
  conversation_id=conversation_id,
@@ -83,7 +89,7 @@ def test_create_assistant_with_sql_tool(
83
89
  )
84
90
  assert_tool_triggered(DataManagementTool.SQL, triggered_tools)
85
91
 
86
- response, triggered_tools = assistant_utils.ask_assistant(
92
+ _, triggered_tools = assistant_utils.ask_assistant(
87
93
  assistant,
88
94
  SQL_TOOL_INSERT_TABLE_TASK,
89
95
  conversation_id=conversation_id,
@@ -98,8 +104,9 @@ def test_create_assistant_with_sql_tool(
98
104
  minimal_response=False,
99
105
  )
100
106
  assert_tool_triggered(DataManagementTool.SQL, triggered_tools)
107
+ similarity_check.check_similarity(response, RESPONSE_FOR_SQL)
101
108
 
102
- response, triggered_tools = assistant_utils.ask_assistant(
109
+ _, triggered_tools = assistant_utils.ask_assistant(
103
110
  assistant,
104
111
  SQL_TOOL_DELETE_TABLE_TASK,
105
112
  conversation_id=conversation_id,
@@ -107,4 +114,38 @@ def test_create_assistant_with_sql_tool(
107
114
  )
108
115
  assert_tool_triggered(DataManagementTool.SQL, triggered_tools)
109
116
 
110
- similarity_check.check_similarity(response, RESPONSE_FOR_SQL)
117
+
118
+ @pytest.mark.assistant
119
+ @pytest.mark.sql
120
+ @pytest.mark.influx
121
+ @pytest.mark.api
122
+ @pytest.mark.testcase("EPMCDME-6132")
123
+ @pytest.mark.skipif(
124
+ not EnvironmentResolver.is_sandbox(),
125
+ reason="InfluxDB is only available in sandbox environments",
126
+ )
127
+ def test_create_assistant_with_influxdb_tool(
128
+ integration_utils, assistant_utils, assistant, similarity_check
129
+ ):
130
+ """Test creating assistant with InfluxDB tool and performing time-series operations."""
131
+
132
+ credential_values = CredentialsManager.sql_credentials(DataBaseDialect.INFLUX)
133
+ settings = integration_utils.create_integration(
134
+ CredentialTypes.SQL, credential_values
135
+ )
136
+
137
+ assistant = assistant(
138
+ Toolkit.DATA_MANAGEMENT, DataManagementTool.SQL, settings=settings
139
+ )
140
+
141
+ conversation_id = str(uuid.uuid4())
142
+
143
+ # Query the measurement
144
+ response, triggered_tools = assistant_utils.ask_assistant(
145
+ assistant,
146
+ INFLUXDB_QUERY_MEASUREMENT_TASK,
147
+ conversation_id=conversation_id,
148
+ minimal_response=False,
149
+ )
150
+ assert_tool_triggered(DataManagementTool.SQL, triggered_tools)
151
+ similarity_check.check_similarity(response, RESPONSE_FOR_INFLUXDB)
@@ -17,10 +17,6 @@ from codemie_test_harness.tests.test_data.mcp_server_test_data import (
17
17
  CLI_MCP_SERVER,
18
18
  )
19
19
 
20
- # pytestmark = pytest.mark.skipif(
21
- # EnvironmentResolver.is_localhost(), reason="Skipping this test on local environment"
22
- # )
23
-
24
20
 
25
21
  @pytest.mark.assistant
26
22
  @pytest.mark.mcp
@@ -12,10 +12,6 @@ from codemie_test_harness.tests.test_data.mcp_server_test_data import (
12
12
  fetch_server_prompt,
13
13
  )
14
14
 
15
- # pytestmark = pytest.mark.skipif(
16
- # EnvironmentResolver.is_localhost(), reason="Skipping this test on local environment"
17
- # )
18
-
19
15
 
20
16
  @pytest.mark.assistant
21
17
  @pytest.mark.mcp
@@ -25,6 +25,7 @@ from codemie_sdk.models.integration import (
25
25
  from codemie_sdk.models.workflow import WorkflowCreateRequest, WorkflowMode, Workflow
26
26
 
27
27
  from codemie_test_harness.tests import PROJECT, autotest_entity_prefix
28
+ from codemie_test_harness.tests.test_data.file_test_data import file_test_data
28
29
  from codemie_test_harness.tests.test_data.google_datasource_test_data import (
29
30
  GOOGLE_DOC_URL,
30
31
  )
@@ -32,7 +33,7 @@ from codemie_test_harness.tests.utils.assistant_utils import AssistantUtils
32
33
  from codemie_test_harness.tests.utils.credentials_manager import CredentialsManager
33
34
  from codemie_test_harness.tests.utils.base_utils import get_random_name, wait_for_entity
34
35
  from codemie_test_harness.tests.utils.client_factory import get_client
35
- from codemie_test_harness.tests.utils.constants import TESTS_PATH
36
+ from codemie_test_harness.tests.utils.constants import TESTS_PATH, FILES_PATH
36
37
  from codemie_test_harness.tests.utils.conversation_utils import ConversationUtils
37
38
  from codemie_test_harness.tests.utils.datasource_utils import DataSourceUtils
38
39
  from codemie_test_harness.tests.utils.gitbud_utils import GitBudUtils
@@ -458,6 +459,21 @@ def code_datasource(
458
459
  datasource_utils.delete_datasource(datasource)
459
460
 
460
461
 
462
+ @pytest.fixture(scope="session")
463
+ def file_datasource(datasource_utils, default_embedding_llm):
464
+ file_name = file_test_data[2][0]
465
+
466
+ datasource = datasource_utils.create_file_datasource(
467
+ name=get_random_name(),
468
+ description=f"[Autotest] {file_name} with {default_embedding_llm.base_name} embedding model",
469
+ files=[str(FILES_PATH / file_name)],
470
+ embeddings_model=default_embedding_llm.base_name,
471
+ )
472
+ yield datasource
473
+ if datasource:
474
+ datasource_utils.delete_datasource(datasource)
475
+
476
+
461
477
  @pytest.fixture(scope="session")
462
478
  def gitlab_datasource(datasource_utils, gitlab_integration, default_embedding_llm):
463
479
  datasource = datasource_utils.create_gitlab_datasource(
@@ -845,8 +861,8 @@ def ado_integration(integration_utils):
845
861
 
846
862
 
847
863
  @pytest.fixture(scope="function")
848
- def cloud_integration(integration_utils):
849
- """Create Cloud integration"""
864
+ def integration(integration_utils):
865
+ """Create integration with custom credentials"""
850
866
 
851
867
  created_integration: Optional[Integration] = None
852
868
 
@@ -81,7 +81,7 @@ class Environment(Enum):
81
81
  Returns:
82
82
  List of Environment enums: [PREVIEW, AZURE, LOCALHOST]
83
83
  """
84
- return [cls.PREVIEW, cls.AZURE, cls.LOCALHOST]
84
+ return [cls.PREVIEW, cls.AZURE, cls.LOCALHOST, cls.PRODUCTION]
85
85
 
86
86
  @classmethod
87
87
  def get_gcp_environments(cls) -> List["Environment"]:
@@ -90,7 +90,7 @@ class Environment(Enum):
90
90
  Returns:
91
91
  List of Environment enums: [PREVIEW, GCP, LOCALHOST]
92
92
  """
93
- return [cls.PREVIEW, cls.GCP, cls.LOCALHOST]
93
+ return [cls.PREVIEW, cls.GCP, cls.LOCALHOST, cls.PRODUCTION]
94
94
 
95
95
  @classmethod
96
96
  def get_aws_environments(cls) -> List["Environment"]:
@@ -99,4 +99,4 @@ class Environment(Enum):
99
99
  Returns:
100
100
  List of Environment enums: [PREVIEW, AWS, LOCALHOST]
101
101
  """
102
- return [cls.PREVIEW, cls.AWS, cls.LOCALHOST]
102
+ return [cls.PREVIEW, cls.AWS, cls.LOCALHOST, cls.PRODUCTION]
@@ -7,3 +7,4 @@ class DataBaseDialect(str, Enum):
7
7
  MS_SQL = "mssql"
8
8
  MY_SQL = "mysql"
9
9
  POSTGRES = "postgres"
10
+ INFLUX = "influxdb"
@@ -33,6 +33,7 @@ class ModelTypes(str, Enum):
33
33
  CLAUDE_35_SONNET_V2 = "claude-3-5-sonnet-v2"
34
34
  CLAUDE_37_SONNET_V1 = "claude-3-7"
35
35
  CLAUDE_4_SONNET = "claude-4-sonnet"
36
+ CLAUDE_4_5_SONNET = "claude-4-5-sonnet"
36
37
  CLAUDE_4_OPUS = "claude-4-opus"
37
38
  CLAUDE_4_1_OPUS = "claude-4-1-opus"
38
39
  CLAUDE_4_SONNET_1M = "claude-4-sonnet-1m"
@@ -37,15 +37,9 @@ from codemie_test_harness.tests.test_data.pm_tools_test_data import (
37
37
  from codemie_test_harness.tests.test_data.report_portal_tools_test_data import (
38
38
  rp_test_data,
39
39
  )
40
+ from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
40
41
  from codemie_test_harness.tests.utils.credentials_manager import CredentialsManager
41
- from codemie_test_harness.tests.enums.environment import Environment
42
42
  from codemie_test_harness.tests.utils.constants import test_project_name
43
- from codemie_test_harness.tests.utils.env_resolver import get_environment
44
-
45
- pytestmark = pytest.mark.skipif(
46
- get_environment() in [Environment.LOCALHOST, Environment.GCP],
47
- reason="Skipping this test on local environment",
48
- )
49
43
 
50
44
 
51
45
  @pytest.mark.assistant
@@ -87,7 +81,11 @@ def test_assistant_with_default_integration_cloud(
87
81
  # create an assistant
88
82
  cloud_assistant = assistant(toolkit, tool_name, project_name=test_project_name)
89
83
 
90
- response = assistant_utils.ask_assistant(cloud_assistant, prompt)
84
+ response, triggered_tools = assistant_utils.ask_assistant(
85
+ cloud_assistant, prompt, minimal_response=False
86
+ )
87
+
88
+ assert_tool_triggered(tool_name, triggered_tools)
91
89
 
92
90
  similarity_check.check_similarity(response, expected_response)
93
91
 
@@ -126,7 +124,11 @@ def test_assistant_with_default_integration_ado(
126
124
  # create an assistant
127
125
  ado_assistant = assistant(toolkit, tool_name, project_name=test_project_name)
128
126
 
129
- response = assistant_utils.ask_assistant(ado_assistant, prompt)
127
+ response, triggered_tools = assistant_utils.ask_assistant(
128
+ ado_assistant, prompt, minimal_response=False
129
+ )
130
+
131
+ assert_tool_triggered(tool_name, triggered_tools)
130
132
 
131
133
  similarity_check.check_similarity(response, expected_response)
132
134
 
@@ -170,7 +172,11 @@ def test_assistant_with_default_integration_codebase(
170
172
  toolkit, CodeBaseTool.SONAR, project_name=test_project_name
171
173
  )
172
174
 
173
- response = assistant_utils.ask_assistant(sonar_assistant, prompt)
175
+ response, triggered_tools = assistant_utils.ask_assistant(
176
+ sonar_assistant, prompt, minimal_response=False
177
+ )
178
+
179
+ assert_tool_triggered(CodeBaseTool.SONAR, triggered_tools)
174
180
 
175
181
  similarity_check.check_similarity(response, expected_response)
176
182
 
@@ -223,7 +229,11 @@ def test_assistant_with_default_integration_git(
223
229
  project_name=test_project_name,
224
230
  )
225
231
 
226
- response = assistant_utils.ask_assistant(git_assistant, prompt)
232
+ response, triggered_tools = assistant_utils.ask_assistant(
233
+ git_assistant, prompt, minimal_response=False
234
+ )
235
+
236
+ assert_tool_triggered(tool_name, triggered_tools)
227
237
 
228
238
  similarity_check.check_similarity(response, expected_response)
229
239
 
@@ -262,7 +272,11 @@ def test_assistant_with_default_integration_jira(
262
272
  project_name=test_project_name,
263
273
  )
264
274
 
265
- response = assistant_utils.ask_assistant(jira_assistant, JIRA_TOOL_PROMPT)
275
+ response, triggered_tools = assistant_utils.ask_assistant(
276
+ jira_assistant, JIRA_TOOL_PROMPT, minimal_response=False
277
+ )
278
+
279
+ assert_tool_triggered(ProjectManagementTool.JIRA, triggered_tools)
266
280
 
267
281
  similarity_check.check_similarity(response, RESPONSE_FOR_JIRA_TOOL)
268
282
 
@@ -299,7 +313,11 @@ def test_assistant_with_default_integration_email(
299
313
  Toolkit.NOTIFICATION, NotificationTool.EMAIL, project_name=test_project_name
300
314
  )
301
315
 
302
- response = assistant_utils.ask_assistant(email_assistant, EMAIL_TOOL_PROMPT)
316
+ response, triggered_tools = assistant_utils.ask_assistant(
317
+ email_assistant, EMAIL_TOOL_PROMPT, minimal_response=False
318
+ )
319
+
320
+ assert_tool_triggered(NotificationTool.EMAIL, triggered_tools)
303
321
 
304
322
  similarity_check.check_similarity(response, EMAIL_RESPONSE)
305
323
 
@@ -336,7 +354,11 @@ def test_assistant_with_default_integration_keycloak(
336
354
  project_name=test_project_name,
337
355
  )
338
356
 
339
- response = assistant_utils.ask_assistant(keycloak_assistant, KEYCLOAK_TOOL_PROMPT)
357
+ response, triggered_tools = assistant_utils.ask_assistant(
358
+ keycloak_assistant, KEYCLOAK_TOOL_PROMPT, minimal_response=False
359
+ )
360
+
361
+ assert_tool_triggered(AccessManagementTool.KEYCLOAK, triggered_tools)
340
362
 
341
363
  similarity_check.check_similarity(response, KEYCLOAK_TOOL_RESPONSE)
342
364
 
@@ -376,6 +398,10 @@ def test_assistant_with_default_integration_report_portal(
376
398
  project_name=test_project_name,
377
399
  )
378
400
 
379
- response = assistant_utils.ask_assistant(report_portal_assistant, prompt)
401
+ response, triggered_tools = assistant_utils.ask_assistant(
402
+ report_portal_assistant, prompt, minimal_response=False
403
+ )
404
+
405
+ assert_tool_triggered(ReportPortalTool.GET_DASHBOARD_DATA, triggered_tools)
380
406
 
381
407
  similarity_check.check_similarity(response, expected_response)
@@ -13,10 +13,12 @@ from codemie_test_harness.tests.test_data.integrations_test_data import (
13
13
  valid_integrations,
14
14
  invalid_integrations,
15
15
  testable_integrations,
16
+ empty_credentials_integrations,
16
17
  )
17
18
  from codemie_test_harness.tests.utils.base_utils import (
18
19
  get_random_name,
19
20
  assert_error_details,
21
+ assert_tool_triggered,
20
22
  )
21
23
 
22
24
 
@@ -250,3 +252,43 @@ def test_delete_integration(
250
252
  equal_to("Specified credential removed"),
251
253
  "Integration delete response is not as expected.",
252
254
  )
255
+
256
+
257
+ @pytest.mark.assistant
258
+ @pytest.mark.integration
259
+ @pytest.mark.api
260
+ @pytest.mark.parametrize(
261
+ "empty_credentials, credential_type, toolkit, tool_name, prompt, expected_response",
262
+ empty_credentials_integrations,
263
+ )
264
+ def test_assistant_with_empty_credentials(
265
+ assistant_utils,
266
+ assistant,
267
+ integration,
268
+ similarity_check,
269
+ empty_credentials,
270
+ credential_type,
271
+ toolkit,
272
+ tool_name,
273
+ prompt,
274
+ expected_response,
275
+ ):
276
+ # Create integration with empty credentials
277
+ empty_integration = integration(
278
+ credential_type=credential_type,
279
+ credential_values=empty_credentials,
280
+ )
281
+
282
+ assistant_instance = assistant(
283
+ toolkit,
284
+ tool_name,
285
+ settings=empty_integration,
286
+ )
287
+
288
+ response, triggered_tools = assistant_utils.ask_assistant(
289
+ assistant_instance, prompt, minimal_response=False, extract_failed_tools=True
290
+ )
291
+
292
+ assert_tool_triggered(tool_name, triggered_tools)
293
+
294
+ similarity_check.check_similarity(response, expected_response)