codemie-test-harness 0.1.218__py3-none-any.whl → 0.1.221__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of codemie-test-harness might be problematic. Click here for more details.

@@ -0,0 +1,371 @@
1
+ """
2
+ Tests for assistant functionality with sub-assistants.
3
+
4
+ This module tests the ability to create assistants with sub-assistants,
5
+ delegate tasks between parent and child assistants, and manage hierarchical
6
+ assistant structures.
7
+ """
8
+
9
+ import pytest
10
+ from codemie_sdk.models.assistant import (
11
+ AssistantUpdateRequest,
12
+ )
13
+ from hamcrest import (
14
+ assert_that,
15
+ equal_to,
16
+ has_items,
17
+ )
18
+
19
+ from codemie_test_harness.tests import PROJECT
20
+ from codemie_test_harness.tests.enums.tools import (
21
+ ProjectManagementTool,
22
+ GitTool,
23
+ FileManagementTool,
24
+ Toolkit,
25
+ )
26
+ from codemie_test_harness.tests.test_data.file_management_tools_test_data import (
27
+ CODE_INTERPRETER_TOOL_TASK,
28
+ RESPONSE_FOR_CODE_INTERPRETER,
29
+ )
30
+ from codemie_test_harness.tests.test_data.git_tools_test_data import (
31
+ list_branches_set_active_branch_test_data,
32
+ )
33
+ from codemie_test_harness.tests.test_data.pm_tools_test_data import (
34
+ JIRA_TOOL_PROMPT,
35
+ RESPONSE_FOR_JIRA_TOOL,
36
+ )
37
+ from codemie_test_harness.tests.utils.base_utils import assert_error_details
38
+
39
+
40
+ @pytest.mark.assistant
41
+ @pytest.mark.sub_assistant
42
+ @pytest.mark.api
43
+ @pytest.mark.smoke
44
+ def test_create_assistant_with_single_sub_assistant(assistant, assistant_utils):
45
+ """
46
+ Test creating an assistant with a single sub-assistant.
47
+
48
+ Verifies that:
49
+ - Parent assistant can be created with a sub-assistant reference
50
+ - Sub-assistant is properly linked to parent assistant
51
+ - The assistant_ids field contains the sub-assistant ID
52
+ """
53
+ # Create sub-assistant first
54
+ sub_assistant = assistant(
55
+ system_prompt="You are a specialized sub-assistant for calculations",
56
+ )
57
+
58
+ # Create parent assistant with sub-assistant
59
+ parent_assistant = assistant(
60
+ system_prompt="You are a parent assistant that delegates to sub-assistants",
61
+ sub_assistants_ids=[sub_assistant.id],
62
+ )
63
+
64
+ # Verify parent has sub-assistant
65
+ assert_that(
66
+ assistant_utils.get_assistant_by_id(parent_assistant.id).assistant_ids,
67
+ has_items(sub_assistant.id),
68
+ "Parent assistant should contain sub-assistant ID",
69
+ )
70
+
71
+
72
+ @pytest.mark.assistant
73
+ @pytest.mark.sub_assistant
74
+ @pytest.mark.api
75
+ @pytest.mark.smoke
76
+ def test_create_assistant_with_multiple_sub_assistants(assistant, assistant_utils):
77
+ """
78
+ Test creating an assistant with multiple sub-assistants.
79
+
80
+ Verifies that:
81
+ - Parent assistant can reference multiple sub-assistants
82
+ - All sub-assistant IDs are properly stored
83
+ - Multiple sub-assistants can coexist
84
+ """
85
+ # Create multiple sub-assistants
86
+ sub_assistant_1 = assistant(
87
+ system_prompt="You are a specialized sub-assistant for math",
88
+ )
89
+
90
+ sub_assistant_2 = assistant(
91
+ system_prompt="You are a specialized sub-assistant for text analysis",
92
+ )
93
+
94
+ sub_assistant_3 = assistant(
95
+ system_prompt="You are a specialized sub-assistant for data processing",
96
+ )
97
+
98
+ # Create parent assistant with multiple sub-assistants
99
+ parent_assistant = assistant(
100
+ system_prompt="You are a parent assistant coordinating multiple sub-assistants",
101
+ sub_assistants_ids=[sub_assistant_1.id, sub_assistant_2.id, sub_assistant_3.id],
102
+ )
103
+
104
+ # Verify parent has all sub-assistants
105
+ parent_assistant = assistant_utils.get_assistant_by_id(parent_assistant.id)
106
+ assert_that(
107
+ len(parent_assistant.assistant_ids),
108
+ equal_to(3),
109
+ "Parent assistant should have 3 sub-assistants",
110
+ )
111
+ assert_that(
112
+ parent_assistant.assistant_ids,
113
+ has_items(sub_assistant_1.id, sub_assistant_2.id, sub_assistant_3.id),
114
+ "Parent assistant should contain all sub-assistant IDs",
115
+ )
116
+
117
+
118
+ @pytest.mark.assistant
119
+ @pytest.mark.sub_assistant
120
+ @pytest.mark.api
121
+ def test_update_assistant_to_add_sub_assistant(assistant, assistant_utils):
122
+ """
123
+ Test updating an existing assistant to add a sub-assistant.
124
+
125
+ Verifies that:
126
+ - Existing assistant can be updated to include sub-assistants
127
+ - Sub-assistant list can be modified after creation
128
+ """
129
+ # Create assistants
130
+ sub_assistant = assistant(
131
+ system_prompt="You are a specialized sub-assistant",
132
+ )
133
+
134
+ parent_assistant = assistant(
135
+ system_prompt="You are a parent assistant",
136
+ )
137
+
138
+ # Verify parent initially has no sub-assistants
139
+ parent_assistant = assistant_utils.get_assistant_by_id(parent_assistant.id)
140
+ assert_that(
141
+ len(parent_assistant.assistant_ids),
142
+ equal_to(0),
143
+ "Parent should initially have no sub-assistants",
144
+ )
145
+
146
+ # Update parent to include sub-assistant
147
+ update_request = AssistantUpdateRequest(
148
+ name=parent_assistant.name,
149
+ description=parent_assistant.description,
150
+ shared=False,
151
+ system_prompt=parent_assistant.system_prompt,
152
+ project=PROJECT,
153
+ llm_model_type=parent_assistant.llm_model_type,
154
+ assistant_ids=[sub_assistant.id],
155
+ )
156
+
157
+ assistant_utils.update_assistant(parent_assistant.id, update_request)
158
+
159
+ # Verify sub-assistant was added
160
+ updated_parent = assistant_utils.get_assistant_by_id(parent_assistant.id)
161
+ assert_that(
162
+ updated_parent["assistant_ids"],
163
+ has_items(sub_assistant.id),
164
+ "Parent assistant should now contain sub-assistant ID",
165
+ )
166
+
167
+
168
+ @pytest.mark.assistant
169
+ @pytest.mark.sub_assistant
170
+ @pytest.mark.api
171
+ def test_update_assistant_to_remove_sub_assistant(assistant, assistant_utils):
172
+ """
173
+ Test updating an assistant to remove a sub-assistant.
174
+
175
+ Verifies that:
176
+ - Sub-assistants can be removed from parent assistant
177
+ - Sub-assistant list can be cleared via update
178
+ """
179
+ # Create sub-assistant
180
+ sub_assistant = assistant(
181
+ system_prompt="You are a specialized sub-assistant",
182
+ )
183
+
184
+ # Create parent with sub-assistant
185
+ parent_assistant = assistant(
186
+ system_prompt="You are a parent assistant",
187
+ sub_assistants_ids=[sub_assistant.id],
188
+ )
189
+
190
+ # Verify sub-assistant was added
191
+ parent_assistant = assistant_utils.get_assistant_by_id(parent_assistant.id)
192
+ assert_that(
193
+ len(parent_assistant.assistant_ids),
194
+ equal_to(1),
195
+ "Parent should have one sub-assistant",
196
+ )
197
+
198
+ # Remove sub-assistant
199
+ update_request = AssistantUpdateRequest(
200
+ name=parent_assistant.name,
201
+ slug=parent_assistant.slug,
202
+ description=parent_assistant.description,
203
+ shared=False,
204
+ system_prompt=parent_assistant.system_prompt,
205
+ project=PROJECT,
206
+ llm_model_type=parent_assistant.llm_model_type,
207
+ assistant_ids=[],
208
+ )
209
+ assistant_utils.update_assistant(parent_assistant.id, update_request)
210
+
211
+ # Verify sub-assistant was removed
212
+ updated_parent = assistant_utils.get_assistant_by_id(parent_assistant.id)
213
+ assert_that(
214
+ len(updated_parent["assistant_ids"]),
215
+ equal_to(0),
216
+ "Parent should have no sub-assistants after removal",
217
+ )
218
+
219
+
220
+ @pytest.mark.assistant
221
+ @pytest.mark.sub_assistant
222
+ @pytest.mark.api
223
+ def test_nested_sub_assistants_not_allowed(assistant, assistant_utils):
224
+ """
225
+ Test that nested sub-assistant hierarchies are not allowed.
226
+
227
+ Verifies that:
228
+ - System prevents or rejects nested sub-assistant structures
229
+ - An assistant that is already a sub-assistant cannot be added as a parent
230
+ - Only flat (single-level) sub-assistant relationships are supported
231
+ """
232
+
233
+ assistant_c = assistant(
234
+ system_prompt="You are assistant C",
235
+ )
236
+
237
+ # First, make C a sub-assistant of B (B -> C)
238
+ assistant_b = assistant(
239
+ system_prompt="You are assistant B", sub_assistants_ids=[assistant_c.id]
240
+ )
241
+
242
+ # Verify B has C as sub-assistant
243
+ assistant_b = assistant_utils.get_assistant_by_id(assistant_b.id)
244
+ assert_that(
245
+ assistant_b["assistant_ids"],
246
+ has_items(assistant_c.id),
247
+ "Assistant B should have C as sub-assistant",
248
+ )
249
+
250
+ # Now try to create assistant with sub-assistant B (which has sub-assistant)
251
+ with pytest.raises(Exception) as exec_info:
252
+ assistant_utils.send_create_assistant_request(
253
+ system_prompt="You are assistant A",
254
+ assistant_ids=[assistant_b.id],
255
+ )
256
+ assert_error_details(
257
+ exec_info.value.response,
258
+ 400,
259
+ f"Nested assistants not supported. Inner assistants ({assistant_b.name}) cannot contain their own inner assistants",
260
+ )
261
+
262
+ assistant_a = assistant(
263
+ system_prompt="You are assistant A",
264
+ )
265
+
266
+ assistant_a = assistant_utils.get_assistant_by_id(assistant_a.id)
267
+
268
+ update_a = AssistantUpdateRequest(
269
+ name=assistant_a.name,
270
+ description=assistant_a.description,
271
+ shared=False,
272
+ system_prompt=assistant_a.system_prompt,
273
+ project=PROJECT,
274
+ llm_model_type=assistant_a.llm_model_type,
275
+ assistant_ids=[assistant_b.id], # Try to add B (which has C) as sub-assistant
276
+ )
277
+
278
+ with pytest.raises(Exception) as exec_info:
279
+ assistant_utils.update_assistant(assistant_a.id, update_a)
280
+ assert_error_details(
281
+ exec_info.value.response,
282
+ 400,
283
+ f"Nested assistants not supported. Inner assistants ({assistant_b.name}) cannot contain their own inner assistants",
284
+ )
285
+
286
+
287
+ @pytest.mark.assistant
288
+ @pytest.mark.sub_assistant
289
+ @pytest.mark.api
290
+ def test_chat_with_assistant_having_different_sub_assistants(
291
+ assistant,
292
+ assistant_utils,
293
+ similarity_check,
294
+ jira_integration,
295
+ gitlab_integration,
296
+ code_datasource,
297
+ code_context,
298
+ ):
299
+ """
300
+ Test chatting with an assistant that has sub-assistants with different tools.
301
+
302
+ Verifies that:
303
+ - Parent assistant can delegate to sub-assistants with different capabilities
304
+ - Sub-assistants with Jira, Git, and Code-executor tools work correctly
305
+ - Chat functionality properly routes requests to appropriate sub-assistants
306
+ """
307
+
308
+ # 1. Create sub-assistant with Jira tool
309
+ jira_assistant = assistant(
310
+ Toolkit.PROJECT_MANAGEMENT,
311
+ ProjectManagementTool.JIRA,
312
+ description="Jira expert. Helps with searching and managing Jira issues.",
313
+ settings=jira_integration,
314
+ system_prompt="You are a Jira specialist assistant. You help with searching and managing Jira issues.",
315
+ )
316
+
317
+ # 2. Create sub-assistant with Git tool
318
+ git_assistant = assistant(
319
+ Toolkit.GIT,
320
+ GitTool.LIST_BRANCHES_IN_REPO,
321
+ description="Git expert. Helps with Git repository operations.",
322
+ settings=gitlab_integration,
323
+ context=code_context(code_datasource),
324
+ system_prompt="You are a Git specialist assistant. You help with Git repository operations.",
325
+ )
326
+
327
+ # 3. Create sub-assistant with Code-executor tool (Python REPL)
328
+ code_executor_assistant = assistant(
329
+ Toolkit.FILE_MANAGEMENT,
330
+ FileManagementTool.PYTHON_CODE_INTERPRETER,
331
+ description="Python code execution expert. Helps with running Python code.",
332
+ system_prompt="You are a Python code execution specialist. You help with running Python code.",
333
+ )
334
+
335
+ # 4. Create parent assistant with all sub-assistants
336
+ parent_assistant = assistant(
337
+ system_prompt=(
338
+ "You are a coordinator assistant that delegates tasks to specialized sub-assistants. "
339
+ "You have access to sub-assistants for Jira operations, Git operations, and Python code execution. "
340
+ f"Use {jira_assistant.name} for Jira related requests."
341
+ f"Use {git_assistant.name} for Git related requests."
342
+ f"Use {code_executor_assistant.name} for Code execution requests."
343
+ ),
344
+ sub_assistants_ids=[
345
+ jira_assistant.id,
346
+ git_assistant.id,
347
+ code_executor_assistant.id,
348
+ ],
349
+ )
350
+
351
+ # 5. Send a chat request to parent assistant asking Jira-related question
352
+ response = assistant_utils.ask_assistant(
353
+ parent_assistant, JIRA_TOOL_PROMPT, minimal_response=True
354
+ )
355
+ similarity_check.check_similarity(response, RESPONSE_FOR_JIRA_TOOL)
356
+
357
+ # 6. Send a chat request to parent assistant asking Git-related question
358
+ git_tool_prompt = list_branches_set_active_branch_test_data[0][2]
359
+ git_tool_answer = list_branches_set_active_branch_test_data[0][3]
360
+ response = assistant_utils.ask_assistant(
361
+ parent_assistant, git_tool_prompt, minimal_response=True
362
+ )
363
+
364
+ similarity_check.check_similarity(response, git_tool_answer)
365
+
366
+ # 7. Send a chat request to parent assistant asking Code-executor question
367
+ response = assistant_utils.ask_assistant(
368
+ parent_assistant, CODE_INTERPRETER_TOOL_TASK, minimal_response=True
369
+ )
370
+
371
+ similarity_check.check_similarity(response, RESPONSE_FOR_CODE_INTERPRETER)
@@ -1,12 +1,8 @@
1
1
  import pytest
2
+ from codemie_sdk.models.integration import CredentialTypes
2
3
  from hamcrest import assert_that, equal_to
3
4
 
4
- from codemie_sdk.models.integration import CredentialTypes
5
5
  from codemie_test_harness.tests.enums.tools import Toolkit, NotificationTool
6
- from codemie_test_harness.tests.utils.credentials_manager import CredentialsManager
7
- from codemie_test_harness.tests.enums.environment import Environment
8
- from codemie_test_harness.tests.utils.env_resolver import get_environment
9
- from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
10
6
  from codemie_test_harness.tests.test_data.notification_tools_test_data import (
11
7
  EMAIL_TOOL_PROMPT,
12
8
  EMAIL_RESPONSE,
@@ -15,6 +11,9 @@ from codemie_test_harness.tests.test_data.notification_tools_test_data import (
15
11
  TELEGRAM_TOOL_PROMPT,
16
12
  TELEGRAM_RESPONSE,
17
13
  )
14
+ from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
15
+ from codemie_test_harness.tests.utils.credentials_manager import CredentialsManager
16
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
18
17
 
19
18
 
20
19
  @pytest.mark.assistant
@@ -22,7 +21,7 @@ from codemie_test_harness.tests.test_data.notification_tools_test_data import (
22
21
  @pytest.mark.email
23
22
  @pytest.mark.api
24
23
  @pytest.mark.skipif(
25
- get_environment() in [Environment.LOCALHOST, Environment.GCP],
24
+ EnvironmentResolver.is_localhost(),
26
25
  reason="Skipping this test on local environment",
27
26
  )
28
27
  def test_assistant_with_email_tool(
@@ -595,6 +595,7 @@ def assistant(default_llm, assistant_utils, conversation_utils):
595
595
  project_name: str = None,
596
596
  description: str = None,
597
597
  system_prompt="You are a helpful integration test assistant",
598
+ sub_assistants_ids: List[str] = None,
598
599
  ):
599
600
  nonlocal created_assistant
600
601
  # Correctly handle empty `tool_names`
@@ -622,6 +623,7 @@ def assistant(default_llm, assistant_utils, conversation_utils):
622
623
  system_prompt=system_prompt,
623
624
  project_name=project_name,
624
625
  description=description,
626
+ assistant_ids=sub_assistants_ids if sub_assistants_ids else [],
625
627
  )
626
628
  return created_assistant
627
629
 
@@ -1,6 +1,9 @@
1
+ from typing import Sequence
2
+
1
3
  import pytest
2
4
  from codemie_sdk.models.assistant import ToolKitDetails, ToolDetails
3
5
  from hamcrest import assert_that, has_item
6
+
4
7
  from codemie_test_harness.tests.enums.model_types import ModelTypes
5
8
  from codemie_test_harness.tests.enums.tools import Toolkit, FileManagementTool
6
9
  from codemie_test_harness.tests.test_data.llm_test_data import MODEL_RESPONSES
@@ -8,6 +11,13 @@ from codemie_test_harness.tests.utils.client_factory import get_client
8
11
  from codemie_test_harness.tests.utils.env_resolver import get_environment
9
12
  from codemie_test_harness.tests.utils.pytest_utils import check_mark
10
13
 
14
+ SIMPLE_GREETING_PROMPT = "Just say one word: 'Hello'"
15
+
16
+
17
+ def get_model_names(llm_utils) -> Sequence[str]:
18
+ """Get list of available model names."""
19
+ return [row.base_name for row in llm_utils.list_llm_models()]
20
+
11
21
 
12
22
  def pytest_generate_tests(metafunc):
13
23
  if "model_type" in metafunc.fixturenames:
@@ -41,7 +51,7 @@ def test_assistant_with_different_models(
41
51
  llm_utils, assistant_utils, model_type, similarity_check, filesystem_integration
42
52
  ):
43
53
  assert_that(
44
- [row.base_name for row in llm_utils.list_llm_models()],
54
+ get_model_names(llm_utils),
45
55
  has_item(model_type),
46
56
  f"{model_type} is missing in backend response",
47
57
  )
@@ -56,8 +66,8 @@ def test_assistant_with_different_models(
56
66
  settings=filesystem_integration,
57
67
  )
58
68
 
59
- assistant = assistant_utils.create_assistant(model_type, toolkits=[tool])
60
- response = assistant_utils.ask_assistant(assistant, "Just say one word: 'Hello'")
69
+ _assistant = assistant_utils.create_assistant(model_type, toolkits=[tool])
70
+ response = assistant_utils.ask_assistant(_assistant, SIMPLE_GREETING_PROMPT)
61
71
 
62
72
  if model_type in [ModelTypes.DEEPSEEK_R1, ModelTypes.RLAB_QWQ_32B]:
63
73
  response = "\n".join(response.split("\n")[-3:])
@@ -73,12 +83,12 @@ def test_assistant_with_different_models_with_top_p_parameter(
73
83
  llm_utils, assistant_utils, model_type, similarity_check
74
84
  ):
75
85
  assert_that(
76
- [row.base_name for row in llm_utils.list_llm_models()],
86
+ get_model_names(llm_utils),
77
87
  has_item(model_type),
78
88
  f"{model_type} is missing in backend response",
79
89
  )
80
- assistant = assistant_utils.create_assistant(model_type, top_p=0.5)
81
- response = assistant_utils.ask_assistant(assistant, "Just say one word: 'Hello'")
90
+ _assistant = assistant_utils.create_assistant(model_type, top_p=0.5)
91
+ response = assistant_utils.ask_assistant(_assistant, SIMPLE_GREETING_PROMPT)
82
92
 
83
93
  if model_type in [ModelTypes.DEEPSEEK_R1, ModelTypes.RLAB_QWQ_32B]:
84
94
  response = "\n".join(response.split("\n")[-3:])
@@ -94,12 +104,12 @@ def test_assistant_with_different_models_with_temperature_parameter(
94
104
  llm_utils, assistant_utils, model_type, similarity_check
95
105
  ):
96
106
  assert_that(
97
- [row.base_name for row in llm_utils.list_llm_models()],
107
+ get_model_names(llm_utils),
98
108
  has_item(model_type),
99
109
  f"{model_type} is missing in backend response",
100
110
  )
101
- assistant = assistant_utils.create_assistant(model_type, temperature=0.5)
102
- response = assistant_utils.ask_assistant(assistant, "Just say one word: 'Hello'")
111
+ _assistant = assistant_utils.create_assistant(model_type, temperature=0.5)
112
+ response = assistant_utils.ask_assistant(_assistant, SIMPLE_GREETING_PROMPT)
103
113
 
104
114
  if model_type in [ModelTypes.DEEPSEEK_R1, ModelTypes.RLAB_QWQ_32B]:
105
115
  response = "\n".join(response.split("\n")[-3:])
@@ -121,15 +131,49 @@ def test_assistant_with_different_models_with_datasource_attached(
121
131
  file_datasource,
122
132
  ):
123
133
  assert_that(
124
- [row.base_name for row in llm_utils.list_llm_models()],
134
+ get_model_names(llm_utils),
125
135
  has_item(model_type),
126
136
  f"{model_type} is missing in backend response",
127
137
  )
128
138
 
129
- assistant = assistant_utils.create_assistant(
139
+ _assistant = assistant_utils.create_assistant(
130
140
  model_type, context=[kb_context(file_datasource)]
131
141
  )
132
- response = assistant_utils.ask_assistant(assistant, "Just say one word: 'Hello'")
142
+ response = assistant_utils.ask_assistant(_assistant, SIMPLE_GREETING_PROMPT)
143
+
144
+ if model_type in [ModelTypes.DEEPSEEK_R1, ModelTypes.RLAB_QWQ_32B]:
145
+ response = "\n".join(response.split("\n")[-3:])
146
+ similarity_check.check_similarity(response, "Hello")
147
+
148
+
149
+ @pytest.mark.assistant
150
+ @pytest.mark.llm
151
+ @pytest.mark.sub_assistant
152
+ @pytest.mark.api
153
+ @pytest.mark.smoke
154
+ def test_assistant_with_different_models_with_sub_assistant(
155
+ llm_utils, assistant_utils, model_type, similarity_check
156
+ ):
157
+ assert_that(
158
+ get_model_names(llm_utils),
159
+ has_item(model_type),
160
+ f"{model_type} is missing in backend response",
161
+ )
162
+
163
+ # Create sub-assistant with default model
164
+ sub_assistant = assistant_utils.create_assistant(
165
+ system_prompt="You are a specialized sub-assistant.",
166
+ )
167
+
168
+ # Create parent assistant with the parametrized model that delegates to sub-assistant
169
+ parent_assistant = assistant_utils.create_assistant(
170
+ llm_model_type=model_type,
171
+ system_prompt="You are a coordinator assistant.",
172
+ assistant_ids=[sub_assistant.id],
173
+ )
174
+
175
+ # Test parent assistant that should delegate to sub-assistant
176
+ response = assistant_utils.ask_assistant(parent_assistant, SIMPLE_GREETING_PROMPT)
133
177
 
134
178
  if model_type in [ModelTypes.DEEPSEEK_R1, ModelTypes.RLAB_QWQ_32B]:
135
179
  response = "\n".join(response.split("\n")[-3:])
@@ -1,6 +1,3 @@
1
- from codemie_test_harness.tests.enums.environment import Environment
2
- from codemie_test_harness.tests.utils.env_resolver import get_environment
3
-
4
1
  import pytest
5
2
 
6
3
  from codemie_test_harness.tests.enums.tools import NotificationTool
@@ -9,6 +6,7 @@ from codemie_test_harness.tests.test_data.notification_tools_test_data import (
9
6
  EMAIL_BODY,
10
7
  EMAIL_RESPONSE,
11
8
  )
9
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
12
10
 
13
11
  notification_tools_test_data = [
14
12
  pytest.param(
@@ -22,7 +20,7 @@ notification_tools_test_data = [
22
20
  marks=[
23
21
  pytest.mark.email,
24
22
  pytest.mark.skipif(
25
- get_environment() in [Environment.LOCALHOST, Environment.GCP],
23
+ EnvironmentResolver.is_localhost(),
26
24
  reason="Skipping this test on local environment",
27
25
  ),
28
26
  ],
@@ -2,7 +2,7 @@ from codemie_test_harness.tests.utils.confluence_utils import CONFLUENCE_SPACE_K
2
2
  from codemie_test_harness.tests.utils.jira_utils import JIRA_PROJECT_KEYS
3
3
 
4
4
  JIRA_TOOL_PROMPT = (
5
- "Get a title for ticket EPMCDME-222 ticket."
5
+ "Get a title for EPMCDME-222 ticket."
6
6
  "For generic jira tool use exactly the same parameters:"
7
7
  "relative_url=/rest/api/2/issue/EPMCDME-222"
8
8
  "method=GET"
@@ -30,6 +30,7 @@ class AssistantUtils(BaseUtils):
30
30
  project_name=None,
31
31
  top_p=None,
32
32
  temperature=None,
33
+ assistant_ids=(),
33
34
  ):
34
35
  # Generate a random name if assistant_name is not provided
35
36
  assistant_name = assistant_name if assistant_name else get_random_name()
@@ -50,6 +51,7 @@ class AssistantUtils(BaseUtils):
50
51
  mcp_servers=mcp_servers,
51
52
  top_p=top_p,
52
53
  temperature=temperature,
54
+ assistant_ids=list(assistant_ids) if assistant_ids else [],
53
55
  )
54
56
 
55
57
  response = self.client.assistants.create(request)
@@ -70,6 +72,7 @@ class AssistantUtils(BaseUtils):
70
72
  top_p=None,
71
73
  temperature=None,
72
74
  description=None,
75
+ assistant_ids=(),
73
76
  ):
74
77
  # Generate a random name if assistant_name is not provided
75
78
  assistant_name = assistant_name if assistant_name else get_random_name()
@@ -91,6 +94,7 @@ class AssistantUtils(BaseUtils):
91
94
  top_p=top_p,
92
95
  temperature=temperature,
93
96
  description=description,
97
+ assistant_ids=assistant_ids,
94
98
  )
95
99
 
96
100
  return wait_for_entity(
@@ -1,6 +1,3 @@
1
- from codemie_test_harness.tests.enums.environment import Environment
2
- from codemie_test_harness.tests.utils.env_resolver import get_environment
3
-
4
1
  import pytest
5
2
  from hamcrest import assert_that, equal_to
6
3
 
@@ -14,6 +11,7 @@ from codemie_test_harness.tests.test_data.notification_tools_test_data import (
14
11
  TELEGRAM_RESPONSE,
15
12
  )
16
13
  from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
14
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
17
15
 
18
16
 
19
17
  @pytest.mark.workflow
@@ -22,7 +20,7 @@ from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
22
20
  @pytest.mark.email
23
21
  @pytest.mark.api
24
22
  @pytest.mark.skipif(
25
- get_environment() in [Environment.LOCALHOST, Environment.GCP],
23
+ EnvironmentResolver.is_localhost(),
26
24
  reason="Skipping this test on local environment",
27
25
  )
28
26
  def test_workflow_with_email_tool(
@@ -39,7 +39,7 @@ def test_workflow_with_notification_tools_direct(
39
39
  _workflow.id, tool_and_state_name, user_input=json.dumps(prompt)
40
40
  )
41
41
 
42
- similarity_check.check_similarity(response, expected_response, 80)
42
+ similarity_check.check_similarity(response, expected_response, 75)
43
43
 
44
44
 
45
45
  @pytest.mark.api
@@ -65,7 +65,7 @@ def test_workflow_with_notification_tools_with_hardcoded_args(
65
65
  )
66
66
  response = workflow_utils.execute_workflow(_workflow.id, tool_and_state_name)
67
67
 
68
- similarity_check.check_similarity(response, expected_response, 80)
68
+ similarity_check.check_similarity(response, expected_response, 75)
69
69
 
70
70
 
71
71
  @pytest.mark.api
@@ -2,7 +2,6 @@ import pytest
2
2
  from hamcrest import assert_that, equal_to
3
3
 
4
4
  from codemie_test_harness.tests.enums.tools import NotificationTool
5
- from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
6
5
  from codemie_test_harness.tests.test_data.notification_tools_test_data import (
7
6
  EMAIL_TOOL_PROMPT,
8
7
  EMAIL_RESPONSE,
@@ -11,9 +10,9 @@ from codemie_test_harness.tests.test_data.notification_tools_test_data import (
11
10
  TELEGRAM_TOOL_PROMPT,
12
11
  TELEGRAM_RESPONSE,
13
12
  )
13
+ from codemie_test_harness.tests.utils.base_utils import assert_tool_triggered
14
14
  from codemie_test_harness.tests.utils.base_utils import get_random_name
15
- from codemie_test_harness.tests.enums.environment import Environment
16
- from codemie_test_harness.tests.utils.env_resolver import get_environment
15
+ from codemie_test_harness.tests.utils.env_resolver import EnvironmentResolver
17
16
 
18
17
 
19
18
  @pytest.mark.workflow
@@ -23,7 +22,7 @@ from codemie_test_harness.tests.utils.env_resolver import get_environment
23
22
  @pytest.mark.api
24
23
  @pytest.mark.testcase("EPMCDME-6652")
25
24
  @pytest.mark.skipif(
26
- get_environment() in [Environment.LOCALHOST, Environment.GCP],
25
+ EnvironmentResolver.is_localhost(),
27
26
  reason="Skipping this test on local environment",
28
27
  )
29
28
  def test_workflow_with_notification_email_tool(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: codemie-test-harness
3
- Version: 0.1.218
3
+ Version: 0.1.221
4
4
  Summary: Autotest for CodeMie backend and UI
5
5
  Author: Anton Yeromin
6
6
  Author-email: anton_yeromin@epam.com
@@ -13,7 +13,7 @@ Requires-Dist: aws-assume-role-lib (>=2.10.0,<3.0.0)
13
13
  Requires-Dist: boto3 (>=1.39.8,<2.0.0)
14
14
  Requires-Dist: click (>=8.1.7,<9.0.0)
15
15
  Requires-Dist: codemie-plugins (>=0.1.123,<0.2.0)
16
- Requires-Dist: codemie-sdk-python (==0.1.218)
16
+ Requires-Dist: codemie-sdk-python (==0.1.221)
17
17
  Requires-Dist: pytest (>=8.4.1,<9.0.0)
18
18
  Requires-Dist: pytest-playwright (>=0.7.0,<0.8.0)
19
19
  Requires-Dist: pytest-repeat (>=0.9.3,<0.10.0)
@@ -26,6 +26,7 @@ codemie_test_harness/tests/assistant/default_integrations/test_default_integrati
26
26
  codemie_test_harness/tests/assistant/default_integrations/test_default_integrations_for_tool_kit.py,sha256=k455DvSPG-mIyk2vuJfp-hJVUsUUyWlOBlb-igMJi2E,8511
27
27
  codemie_test_harness/tests/assistant/default_integrations/test_default_integrations_for_tool_with_datasource.py,sha256=knJ86qJLvMozHPLXMeFzeXrJJVIleqNQte9zSw6b0oI,10831
28
28
  codemie_test_harness/tests/assistant/test_assistants.py,sha256=pFcTM8BjuOYnOKu_qwai1NVdsG0ztWYYNQuOkivPS7A,16609
29
+ codemie_test_harness/tests/assistant/test_sub_assistants.py,sha256=BY-kbq2I1I3vSvf0pDBU8LLKwGqyQnuhc4KqlMx0nvA,12657
29
30
  codemie_test_harness/tests/assistant/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
31
  codemie_test_harness/tests/assistant/tools/access_management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
32
  codemie_test_harness/tests/assistant/tools/access_management/test_keycloak_tool.py,sha256=FVCb9VBUtwApiBkrqDCSpelZvPRXRR4LmbrNr1wk13s,941
@@ -47,7 +48,7 @@ codemie_test_harness/tests/assistant/tools/mcp/__init__.py,sha256=47DEQpj8HBSa-_
47
48
  codemie_test_harness/tests/assistant/tools/mcp/test_cli_mcp_server.py,sha256=SCEf5koVICJYRv5Du3TT0viAPmWojgHO8PX5ZVGPCrE,2761
48
49
  codemie_test_harness/tests/assistant/tools/mcp/test_mcp_servers.py,sha256=omH5BVtyOZqlqX9NOfWelJ93geLSTVQXropCMAhAkiA,1568
49
50
  codemie_test_harness/tests/assistant/tools/notification/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
- codemie_test_harness/tests/assistant/tools/notification/test_assistant_notification_tools.py,sha256=4XrHYvUtbm8bp2bmajOuSSgvSuwXI8etHK6vRhIWkDM,2962
51
+ codemie_test_harness/tests/assistant/tools/notification/test_assistant_notification_tools.py,sha256=ils26CN9ZUh6s5D48uuz3IGEDtilR7uRl64cQFCa61U,2870
51
52
  codemie_test_harness/tests/assistant/tools/openapi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
53
  codemie_test_harness/tests/assistant/tools/openapi/test_assistant_with_open_api_tools.py,sha256=DXyCNON0EmrTqDbg-su9pPe46dS4EB5UasuHBNwMn-s,1262
53
54
  codemie_test_harness/tests/assistant/tools/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -64,7 +65,7 @@ codemie_test_harness/tests/assistant/tools/servicenow/__init__.py,sha256=47DEQpj
64
65
  codemie_test_harness/tests/assistant/tools/servicenow/test_servicenow_tools.py,sha256=aUjfZ4773WoQJjcHx3JqH5e8ckaKB-aIMO-OZWTm0Ek,888
65
66
  codemie_test_harness/tests/assistant/tools/vcs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
66
67
  codemie_test_harness/tests/assistant/tools/vcs/test_assistant_with_vcs_tools.py,sha256=qOPr4XOh2rgUV2MXMxkRzRGkAKl9ViwQGCZ-dMEtscU,1145
67
- codemie_test_harness/tests/conftest.py,sha256=o2A8_JrhYCIqPzBJwPtlORQexLjv8k6HpZmJNPy7An0,33807
68
+ codemie_test_harness/tests/conftest.py,sha256=FH6xBlPTjU6wlHNZSJAxbNvYCjICZq3SRWMCEAKvF2c,33929
68
69
  codemie_test_harness/tests/conversations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
69
70
  codemie_test_harness/tests/conversations/test_conversations_endpoints.py,sha256=HQ2nu9lXfRNkyJhA0rzar7Rmv6pMe-te0rFYAy-X5UA,4128
70
71
  codemie_test_harness/tests/e2e/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -84,7 +85,7 @@ codemie_test_harness/tests/integrations/user/test_user_integrations.py,sha256=SO
84
85
  codemie_test_harness/tests/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
85
86
  codemie_test_harness/tests/llm/assistants/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
86
87
  codemie_test_harness/tests/llm/assistants/test_lite_llm.py,sha256=jYtf_J7lBSmB1LmmXWjGHkMWmpcqAJ3l_IJovgYxmZM,3683
87
- codemie_test_harness/tests/llm/assistants/test_llm.py,sha256=b5HhrDkz1lwCaSZH5kdPdacmLXH_Bxnj5vO_A5ho3k8,4838
88
+ codemie_test_harness/tests/llm/assistants/test_llm.py,sha256=APpyNpdcs0ID-IYy8Yxip_y954AqEz4zQzVgpeVYa2M,6130
88
89
  codemie_test_harness/tests/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
89
90
  codemie_test_harness/tests/providers/test_providers_endpoints.py,sha256=0lHGRO3mh4uVK-GUPasPc05nrL4HfaYZPUPu9TZzw04,7946
90
91
  codemie_test_harness/tests/scheduler/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -123,7 +124,7 @@ codemie_test_harness/tests/test_data/direct_tools/data_management_tools_test_dat
123
124
  codemie_test_harness/tests/test_data/direct_tools/direct_tools_test_data.py,sha256=y9awl1IA6EXGXyz05QzcNdt5z7Rk9J5LzIbfi4CFE3s,3233
124
125
  codemie_test_harness/tests/test_data/direct_tools/file_management_tools_test_data.py,sha256=HD55qI37_koSBzSJBkafUI1CrgnL5hnWoA9n0cjOkmY,1622
125
126
  codemie_test_harness/tests/test_data/direct_tools/keycloak_tool_test_data.py,sha256=6lU1YC6DSV6e_3VNQEVvtA4o3_lSFgOQik4T0u-TF1g,2979
126
- codemie_test_harness/tests/test_data/direct_tools/notification_tools_test_data.py,sha256=Xo76Bd4R9cmfhEHUaUGh2O1qPzeoLG1El0m7VLRwP5M,1880
127
+ codemie_test_harness/tests/test_data/direct_tools/notification_tools_test_data.py,sha256=EF8L0c7XjbueooF0Px4tdAqWje16VfXv39tNvfn3AoQ,1787
127
128
  codemie_test_harness/tests/test_data/direct_tools/open_api_tools_test_data.py,sha256=UwXVrwD3MnOIS2wwikNDk1EZNYR14p_IjhOOUwjYHOI,497
128
129
  codemie_test_harness/tests/test_data/direct_tools/project_management_tools_test_data.py,sha256=3s2biYoy5Nv8nwaWYbp1AoMpZ0lrUvlqHuFP9BlOHwg,21084
129
130
  codemie_test_harness/tests/test_data/direct_tools/report_portal_tools_test_data.py,sha256=C_3eqdhBh9bAl23fXXP0P23aN7xwMvvBVgaPC6_-ksA,53557
@@ -163,7 +164,7 @@ codemie_test_harness/tests/test_data/open_api_tools_test_data.py,sha256=mvf0legy
163
164
  codemie_test_harness/tests/test_data/openapi.json,sha256=X4uqtfjpTUuMifefQRf8mHI1k8pspp8-L0rpJlhLOI4,10459
164
165
  codemie_test_harness/tests/test_data/output_schema_test_data.py,sha256=4l7AvXbMl9hIvoFxu1LPPSGz9hb5Uz2_is4zTm77ARY,261
165
166
  codemie_test_harness/tests/test_data/plugin_tools_test_data.py,sha256=bVamztyQ4bAVo1CRSrtu6f5H-gkjhAN2nq5Jbc0erqM,4168
166
- codemie_test_harness/tests/test_data/pm_tools_test_data.py,sha256=yjEG24F10Rb5Br5dEPMIO74WMi2p3huZFgfRMYfzG7c,5150
167
+ codemie_test_harness/tests/test_data/pm_tools_test_data.py,sha256=HNPxPQtHPjC0I02LmxF8dDr5orpqwtKOQ1KoHD_4_9c,5143
167
168
  codemie_test_harness/tests/test_data/project_management_test_data.py,sha256=2RWzrJmdlrOuJQKcmlWOfYz2daw_Oc2RkDU5XyM-w6U,3242
168
169
  codemie_test_harness/tests/test_data/report_portal_tools_test_data.py,sha256=YZdmfEwrwOdCduNxs768LOB8OHfL8sfNI-R2k-koKTk,14555
169
170
  codemie_test_harness/tests/test_data/research_tools_test_data.py,sha256=zwpzm-VSnrLZEfG97AE9Ms7z7j3xmqxiNd1EmZyWCSk,9102
@@ -269,7 +270,7 @@ codemie_test_harness/tests/ui/workflows/test_workflow_executions_page.py,sha256=
269
270
  codemie_test_harness/tests/ui/workflows/test_workflow_templates.py,sha256=u3EK7FkwjdGLK2JoDhc4gcVFtVXT-2-1UKBlFL-XlR0,4656
270
271
  codemie_test_harness/tests/ui/workflows/test_workflows.py,sha256=zRBFiQYhJ_MWKGGxUgGNsiTbs8P8Zw6HCuJXzkABvj0,3817
271
272
  codemie_test_harness/tests/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
272
- codemie_test_harness/tests/utils/assistant_utils.py,sha256=2s1MikCfIcM3UlkcuwaedyxI2hePuWT3h8F3SDjBCtk,8022
273
+ codemie_test_harness/tests/utils/assistant_utils.py,sha256=McT6EYZVQELYeYqjkgcJR08Pk4wXEXPKoT-z8olmcVY,8187
273
274
  codemie_test_harness/tests/utils/aws_parameters_store.py,sha256=YAVpvwElkKZJZvzSVxtOue1Gjs-kvSBS2y5QvIlz484,3267
274
275
  codemie_test_harness/tests/utils/base_utils.py,sha256=Yyj9HUk8-3Wf0hpWrMXiWDjvuw7ZRt89wiYUkoDghLk,7203
275
276
  codemie_test_harness/tests/utils/client_factory.py,sha256=xGta0ZaVYzWfwJ4cu3f89KkGc_R5Bq-9lqnhr57x_2w,972
@@ -324,7 +325,7 @@ codemie_test_harness/tests/workflow/assistant_tools/git/test_workflow_with_assis
324
325
  codemie_test_harness/tests/workflow/assistant_tools/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
325
326
  codemie_test_harness/tests/workflow/assistant_tools/mcp/test_workflow_with_assistant_with_mcp_server.py,sha256=_tj5IP5Qy0tNI69hdDd5Cuecwu4Xz5dyrMCdvfnRQDs,3330
326
327
  codemie_test_harness/tests/workflow/assistant_tools/notification/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
327
- codemie_test_harness/tests/workflow/assistant_tools/notification/test_workflow_with_assistant_notification_tools.py,sha256=0LztefJikX9looSkNzHgCW_ocj6B6EWQsSlooTBntl8,3120
328
+ codemie_test_harness/tests/workflow/assistant_tools/notification/test_workflow_with_assistant_notification_tools.py,sha256=zwbWIOVxKnkoTGJTq39gpJmUKqJMQOwzbqgJds72QMA,3027
328
329
  codemie_test_harness/tests/workflow/assistant_tools/open_api/__init__.py,sha256=s6X-VOfd5UR7yxRL90VvL21YdYs1rPd6wkGFSfOI3Qs,46
329
330
  codemie_test_harness/tests/workflow/assistant_tools/open_api/test_workflow_with_assistant_with_open_api_tools.py,sha256=CtvQx6K1Nr821KusEfM1bqCjpm6IglCHQD7iaHgCXQw,1448
330
331
  codemie_test_harness/tests/workflow/assistant_tools/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -355,7 +356,7 @@ codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_code
355
356
  codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_data_management_tools_elastic.py,sha256=NzPxNdxLYAySIDzsP1ttF4BiDuk9kibHipFpqavKHAA,3271
356
357
  codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_data_management_tools_sql.py,sha256=8Clmo81Kd6Y_sEn532f4nhsIYvGy8REDGCPiFZyvPno,4161
357
358
  codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_file_management_tools.py,sha256=Ii7b65srfwk16jYALy5BzMhVErE3GPBWZ2M3JLkjqo0,2880
358
- codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_notification_tools.py,sha256=YzhsmmfidBYsGymBoO3q2WM9zw2zZfq7uOj90-bPN0c,2983
359
+ codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_notification_tools.py,sha256=AUHfqOFHDmYZlMeSx1sQ-V1ia2caQ8JExqb-xHLka3M,2983
359
360
  codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_open_api_tools.py,sha256=CkF5VikbCMQyGoeibF9Al50AN-GW83F6k1usow17YlY,3208
360
361
  codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_plugin_tools.py,sha256=k8k0WlR0nJwr7OP61vHZ3a2Zvn9g4Gy2W1boiuiBXG0,4106
361
362
  codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_project_management_tools.py,sha256=BiMmDw5f7KNoYH1IfNX-7syV6CJV-4kP1U4OTSRJ6u4,3405
@@ -388,7 +389,7 @@ codemie_test_harness/tests/workflow/virtual_assistant_tools/git/test_workflow_wi
388
389
  codemie_test_harness/tests/workflow/virtual_assistant_tools/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
389
390
  codemie_test_harness/tests/workflow/virtual_assistant_tools/mcp/test_workflow_with_mcp_server.py,sha256=qIIrKKIa0copWSUOcP87_YEFV1QpKs7OqsAAcVXF2uM,3560
390
391
  codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/__init__.py,sha256=hUDnGT_v3FoV6qsGpdACg_DfBFlfuubj8FjZLiuMrt0,50
391
- codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/test_workflow_with_notification_tools.py,sha256=UvULHw9k-GhrwBHwj3_s_2UVwh0nZgzu0YG0ZzlZMow,3193
392
+ codemie_test_harness/tests/workflow/virtual_assistant_tools/notification/test_workflow_with_notification_tools.py,sha256=SMHB1EPJ6aqsZsJgLzzk7OjT7UL_X20Or5l5bpYnsJE,3101
392
393
  codemie_test_harness/tests/workflow/virtual_assistant_tools/open_api/__init__.py,sha256=s6X-VOfd5UR7yxRL90VvL21YdYs1rPd6wkGFSfOI3Qs,46
393
394
  codemie_test_harness/tests/workflow/virtual_assistant_tools/open_api/test_workflow_with_open_api_tools.py,sha256=JHZAtbWXLsgI6R0IYzw1yrZ8Pn5KtdD2KgbpokVcJjE,1474
394
395
  codemie_test_harness/tests/workflow/virtual_assistant_tools/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -404,7 +405,7 @@ codemie_test_harness/tests/workflow/virtual_assistant_tools/servicenow/__init__.
404
405
  codemie_test_harness/tests/workflow/virtual_assistant_tools/servicenow/test_workflow_with_servicenow_tools.py,sha256=D835gaRbCnB4va5mi9TdA_u9StSpGXQ_fgzwW0S2pwo,1173
405
406
  codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
406
407
  codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/test_workflow_with_vcs_tools.py,sha256=Se9imIiBYuJU78m1pLu0g4ZmHygKZjr6JjIWkGXTy1Q,1364
407
- codemie_test_harness-0.1.218.dist-info/METADATA,sha256=EmA3TMXO9FmRr4mF2hWy9Mcsu4AoEiA0tgjKDJUsdPM,27184
408
- codemie_test_harness-0.1.218.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
409
- codemie_test_harness-0.1.218.dist-info/entry_points.txt,sha256=n98t-EOM5M1mnMl_j2X4siyeO9zr0WD9a5LF7JyElIM,73
410
- codemie_test_harness-0.1.218.dist-info/RECORD,,
408
+ codemie_test_harness-0.1.221.dist-info/METADATA,sha256=OuLbVODiKmBvq1nHcaT-popN7ecY22G6Mcci97oQCOc,27184
409
+ codemie_test_harness-0.1.221.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
410
+ codemie_test_harness-0.1.221.dist-info/entry_points.txt,sha256=n98t-EOM5M1mnMl_j2X4siyeO9zr0WD9a5LF7JyElIM,73
411
+ codemie_test_harness-0.1.221.dist-info/RECORD,,