codemie-test-harness 0.1.215__py3-none-any.whl → 0.1.217__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of codemie-test-harness might be problematic. Click here for more details.
- codemie_test_harness/tests/conftest.py +35 -0
- codemie_test_harness/tests/scheduler/__init__.py +0 -0
- codemie_test_harness/tests/scheduler/test_scheduler_service.py +264 -0
- codemie_test_harness/tests/utils/base_utils.py +8 -6
- codemie_test_harness/tests/utils/datasource_utils.py +41 -0
- codemie_test_harness/tests/utils/workflow_utils.py +8 -4
- {codemie_test_harness-0.1.215.dist-info → codemie_test_harness-0.1.217.dist-info}/METADATA +2 -2
- {codemie_test_harness-0.1.215.dist-info → codemie_test_harness-0.1.217.dist-info}/RECORD +10 -8
- {codemie_test_harness-0.1.215.dist-info → codemie_test_harness-0.1.217.dist-info}/WHEEL +0 -0
- {codemie_test_harness-0.1.215.dist-info → codemie_test_harness-0.1.217.dist-info}/entry_points.txt +0 -0
|
@@ -439,6 +439,41 @@ def webhook_integration(integration_utils):
|
|
|
439
439
|
pass
|
|
440
440
|
|
|
441
441
|
|
|
442
|
+
@pytest.fixture(scope="module")
|
|
443
|
+
def scheduler_integration(integration_utils):
|
|
444
|
+
created_integrations = []
|
|
445
|
+
|
|
446
|
+
def _create(
|
|
447
|
+
alias,
|
|
448
|
+
resource_type,
|
|
449
|
+
resource_id,
|
|
450
|
+
prompt: str = None,
|
|
451
|
+
schedule: str = "* * * * *", # every minute
|
|
452
|
+
is_enabled: bool = True,
|
|
453
|
+
):
|
|
454
|
+
credential_values = [
|
|
455
|
+
CredentialValues(key="url", value=CredentialsManager.AUTO_GENERATED),
|
|
456
|
+
CredentialValues(key="is_enabled", value=is_enabled),
|
|
457
|
+
CredentialValues(key="schedule", value=schedule),
|
|
458
|
+
CredentialValues(key="resource_type", value=resource_type),
|
|
459
|
+
CredentialValues(key="resource_id", value=resource_id),
|
|
460
|
+
CredentialValues(key="prompt", value=prompt),
|
|
461
|
+
]
|
|
462
|
+
integration = integration_utils.create_integration(
|
|
463
|
+
CredentialTypes.SCHEDULER, credential_values, integration_alias=alias
|
|
464
|
+
)
|
|
465
|
+
created_integrations.append(integration)
|
|
466
|
+
return integration
|
|
467
|
+
|
|
468
|
+
yield _create
|
|
469
|
+
|
|
470
|
+
for integration in created_integrations:
|
|
471
|
+
try:
|
|
472
|
+
integration_utils.delete_integration(integration)
|
|
473
|
+
except HTTPError:
|
|
474
|
+
pass
|
|
475
|
+
|
|
476
|
+
|
|
442
477
|
@pytest.fixture(scope="function")
|
|
443
478
|
def general_integration(integration_utils):
|
|
444
479
|
created_integration: Optional[Integration] = None
|
|
File without changes
|
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
"""Scheduler integration tests for CodeMie.
|
|
2
|
+
|
|
3
|
+
This module contains tests for scheduler functionality including:
|
|
4
|
+
- Basic scheduler operations with different resource types (assistant, workflow, datasource)
|
|
5
|
+
- Error handling and validation for invalid cron expressions
|
|
6
|
+
- Error handling for invalid resource IDs
|
|
7
|
+
- Disabled scheduler verification
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import pytest
|
|
11
|
+
|
|
12
|
+
from hamcrest import (
|
|
13
|
+
assert_that,
|
|
14
|
+
equal_to,
|
|
15
|
+
has_item,
|
|
16
|
+
any_of,
|
|
17
|
+
)
|
|
18
|
+
from requests import HTTPError
|
|
19
|
+
from codemie_test_harness.tests.utils.base_utils import get_random_name, wait_for_entity
|
|
20
|
+
from codemie_sdk.models.datasource import DataSourceStatus
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@pytest.mark.scheduler
|
|
24
|
+
@pytest.mark.api
|
|
25
|
+
class TestSchedulerValidIntegrations:
|
|
26
|
+
"""Tests for valid scheduler integrations - verify scheduler creation with different resource types."""
|
|
27
|
+
|
|
28
|
+
def test_scheduler_with_assistant(
|
|
29
|
+
self, assistant, scheduler_integration, conversation_utils
|
|
30
|
+
):
|
|
31
|
+
"""Test scheduler can be created with assistant resource type.
|
|
32
|
+
|
|
33
|
+
This test verifies:
|
|
34
|
+
1. Scheduler integration can be created with an assistant
|
|
35
|
+
2. Integration stores correct configuration (schedule, resource type, resource ID)
|
|
36
|
+
3. Assistant can be triggered manually to verify the resource is valid
|
|
37
|
+
"""
|
|
38
|
+
scheduler_alias = get_random_name()
|
|
39
|
+
message = f"Test message for scheduler {scheduler_alias}"
|
|
40
|
+
|
|
41
|
+
# Create assistant
|
|
42
|
+
assistant = assistant()
|
|
43
|
+
|
|
44
|
+
# Create scheduler integration with assistant
|
|
45
|
+
scheduler_integration(
|
|
46
|
+
alias=scheduler_alias,
|
|
47
|
+
resource_type="assistant",
|
|
48
|
+
resource_id=assistant.id,
|
|
49
|
+
prompt=message,
|
|
50
|
+
is_enabled=True,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Wait for conversation
|
|
54
|
+
conversation = wait_for_entity(
|
|
55
|
+
lambda: conversation_utils.list_conversations(),
|
|
56
|
+
entity_name=message,
|
|
57
|
+
timeout=80,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# Verify conversation details
|
|
61
|
+
assert_that(conversation.name, equal_to(message))
|
|
62
|
+
assert_that(conversation.initial_assistant_id, equal_to(assistant.id))
|
|
63
|
+
assert_that(conversation.assistant_ids, has_item(assistant.id))
|
|
64
|
+
|
|
65
|
+
def test_scheduler_with_workflow(
|
|
66
|
+
self,
|
|
67
|
+
workflow_with_virtual_assistant,
|
|
68
|
+
scheduler_integration,
|
|
69
|
+
workflow_utils,
|
|
70
|
+
):
|
|
71
|
+
"""Test scheduler can be created with workflow resource type.
|
|
72
|
+
|
|
73
|
+
This test verifies:
|
|
74
|
+
1. Scheduler integration can be created with a workflow
|
|
75
|
+
2. Integration stores correct configuration
|
|
76
|
+
3. Workflow can be executed manually to verify the resource is valid
|
|
77
|
+
"""
|
|
78
|
+
scheduler_alias = get_random_name()
|
|
79
|
+
message = f"Test message for scheduler {scheduler_alias}"
|
|
80
|
+
|
|
81
|
+
# Create a simple workflow
|
|
82
|
+
workflow = workflow_with_virtual_assistant(scheduler_alias)
|
|
83
|
+
|
|
84
|
+
# Create scheduler integration with workflow
|
|
85
|
+
scheduler_integration(
|
|
86
|
+
alias=scheduler_alias,
|
|
87
|
+
resource_type="workflow",
|
|
88
|
+
resource_id=workflow.id,
|
|
89
|
+
prompt=message,
|
|
90
|
+
is_enabled=True,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Wait for execution with specific prompt
|
|
94
|
+
execution = wait_for_entity(
|
|
95
|
+
lambda: workflow_utils.get_executions(workflow),
|
|
96
|
+
entity_name=message,
|
|
97
|
+
timeout=80,
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
# Verify execution details
|
|
101
|
+
assert_that(execution.prompt.strip('""'), equal_to(message))
|
|
102
|
+
assert_that(execution.workflow_id, equal_to(workflow.id))
|
|
103
|
+
|
|
104
|
+
@pytest.mark.parametrize(
|
|
105
|
+
"datasource_fixture",
|
|
106
|
+
[
|
|
107
|
+
"jira_datasource",
|
|
108
|
+
"confluence_datasource",
|
|
109
|
+
"code_datasource",
|
|
110
|
+
],
|
|
111
|
+
)
|
|
112
|
+
def test_scheduler_with_datasource(
|
|
113
|
+
self,
|
|
114
|
+
request,
|
|
115
|
+
datasource_fixture,
|
|
116
|
+
scheduler_integration,
|
|
117
|
+
datasource_utils,
|
|
118
|
+
):
|
|
119
|
+
"""Test scheduler can be created with datasource resource type.
|
|
120
|
+
|
|
121
|
+
This test verifies:
|
|
122
|
+
1. Scheduler integration can be created with different datasource types
|
|
123
|
+
2. Integration stores correct configuration
|
|
124
|
+
3. Datasource indexing can be triggered manually to verify the resource is valid
|
|
125
|
+
|
|
126
|
+
Test is parametrized to work with different datasource types:
|
|
127
|
+
- jira_datasource
|
|
128
|
+
- confluence_datasource
|
|
129
|
+
- code_datasource
|
|
130
|
+
"""
|
|
131
|
+
# Get the datasource from the fixture
|
|
132
|
+
datasource = request.getfixturevalue(datasource_fixture)
|
|
133
|
+
scheduler_alias = get_random_name()
|
|
134
|
+
|
|
135
|
+
# Create scheduler integration with datasource
|
|
136
|
+
scheduler_integration(
|
|
137
|
+
alias=scheduler_alias,
|
|
138
|
+
resource_type="datasource",
|
|
139
|
+
resource_id=datasource.id,
|
|
140
|
+
is_enabled=True,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
triggered_datasource = datasource_utils.wait_for_update_date_change(
|
|
144
|
+
datasource_id=datasource.id, timeout=80
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Verify datasource exists and check its status (indexing should be in progress)
|
|
148
|
+
assert_that(triggered_datasource.id, equal_to(datasource.id))
|
|
149
|
+
assert_that(triggered_datasource.name, equal_to(datasource.name))
|
|
150
|
+
assert_that(triggered_datasource.status, DataSourceStatus.FETCHING)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
@pytest.mark.scheduler
|
|
154
|
+
@pytest.mark.api
|
|
155
|
+
class TestSchedulerInvalidIntegrations:
|
|
156
|
+
"""Tests for invalid scheduler integrations - verify proper error handling."""
|
|
157
|
+
|
|
158
|
+
@pytest.mark.parametrize(
|
|
159
|
+
"invalid_cron",
|
|
160
|
+
[
|
|
161
|
+
("invalid_cron_expression"),
|
|
162
|
+
("* * * *"),
|
|
163
|
+
("60 0 * * *"),
|
|
164
|
+
("0 25 * * *"),
|
|
165
|
+
("0 0 32 * *"),
|
|
166
|
+
("0 0 * 13 *"),
|
|
167
|
+
("0 0 * * 8"),
|
|
168
|
+
(""),
|
|
169
|
+
],
|
|
170
|
+
)
|
|
171
|
+
def test_scheduler_with_invalid_cron(
|
|
172
|
+
self, assistant, scheduler_integration, invalid_cron
|
|
173
|
+
):
|
|
174
|
+
"""Test scheduler creation fails with invalid cron expressions.
|
|
175
|
+
|
|
176
|
+
This test verifies that the system properly validates cron expressions
|
|
177
|
+
and rejects invalid formats.
|
|
178
|
+
|
|
179
|
+
Parametrized with various invalid cron expressions:
|
|
180
|
+
- Completely invalid format
|
|
181
|
+
- Incomplete expressions
|
|
182
|
+
- Out-of-range values for each field
|
|
183
|
+
- Empty expressions
|
|
184
|
+
"""
|
|
185
|
+
scheduler_alias = get_random_name()
|
|
186
|
+
created_assistant = assistant()
|
|
187
|
+
|
|
188
|
+
with pytest.raises(HTTPError) as exc_info:
|
|
189
|
+
scheduler_integration(
|
|
190
|
+
alias=scheduler_alias,
|
|
191
|
+
resource_type="assistant",
|
|
192
|
+
resource_id=created_assistant.id,
|
|
193
|
+
schedule=invalid_cron,
|
|
194
|
+
prompt=scheduler_alias,
|
|
195
|
+
is_enabled=True,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
assert_that(exc_info.value.response.status_code, equal_to(422))
|
|
199
|
+
assert_that(
|
|
200
|
+
exc_info.value.response.json()["error"]["message"],
|
|
201
|
+
any_of(
|
|
202
|
+
equal_to("Invalid cron expression"), equal_to("Invalid schedule format")
|
|
203
|
+
),
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
@pytest.mark.parametrize(
|
|
207
|
+
"resource_type, error",
|
|
208
|
+
[
|
|
209
|
+
("assistant", "Assistant not found"),
|
|
210
|
+
("workflow", "Workflow not found"),
|
|
211
|
+
("datasource", "Datasource not found"),
|
|
212
|
+
],
|
|
213
|
+
)
|
|
214
|
+
def test_scheduler_with_invalid_resource_id(
|
|
215
|
+
self, scheduler_integration, resource_type, error
|
|
216
|
+
):
|
|
217
|
+
"""Test scheduler creation with non-existent resource ID.
|
|
218
|
+
|
|
219
|
+
This test verifies that the system handles invalid resource IDs properly,
|
|
220
|
+
either by rejecting them during creation or handling them gracefully during execution.
|
|
221
|
+
"""
|
|
222
|
+
scheduler_alias = get_random_name()
|
|
223
|
+
invalid_resource_id = "invalid_resource_" + get_random_name()
|
|
224
|
+
|
|
225
|
+
with pytest.raises(HTTPError) as exc_info:
|
|
226
|
+
scheduler_integration(
|
|
227
|
+
alias=scheduler_alias,
|
|
228
|
+
resource_type=resource_type,
|
|
229
|
+
resource_id=invalid_resource_id,
|
|
230
|
+
prompt=scheduler_alias,
|
|
231
|
+
is_enabled=True,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
assert_that(exc_info.value.response.status_code, equal_to(404))
|
|
235
|
+
assert_that(
|
|
236
|
+
exc_info.value.response.json()["error"]["message"],
|
|
237
|
+
equal_to(error),
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
def test_scheduler_with_invalid_resource_type(
|
|
241
|
+
self, assistant, scheduler_integration
|
|
242
|
+
):
|
|
243
|
+
"""Test scheduler creation with invalid resource type.
|
|
244
|
+
|
|
245
|
+
This test verifies that the system properly validates resource types.
|
|
246
|
+
"""
|
|
247
|
+
scheduler_alias = get_random_name()
|
|
248
|
+
created_assistant = assistant()
|
|
249
|
+
|
|
250
|
+
with pytest.raises(HTTPError) as exc_info:
|
|
251
|
+
scheduler_integration(
|
|
252
|
+
alias=scheduler_alias,
|
|
253
|
+
resource_type="invalid_resource_type",
|
|
254
|
+
resource_id=created_assistant.id,
|
|
255
|
+
prompt=scheduler_alias,
|
|
256
|
+
is_enabled=True,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
# Verify error has 422 status code (Unprocessable Entity)
|
|
260
|
+
assert_that(exc_info.value.response.status_code, equal_to(422))
|
|
261
|
+
assert_that(
|
|
262
|
+
exc_info.value.response.json()["error"]["message"],
|
|
263
|
+
equal_to("Invalid resource type"),
|
|
264
|
+
)
|
|
@@ -104,12 +104,14 @@ def wait_for_entity(get_entity_callable, entity_name, timeout=10, poll_interval=
|
|
|
104
104
|
start_time = time.time()
|
|
105
105
|
|
|
106
106
|
while time.time() - start_time < timeout:
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
107
|
+
entities = [
|
|
108
|
+
raw
|
|
109
|
+
for raw in get_entity_callable()
|
|
110
|
+
if (hasattr(raw, "name") and entity_name == raw.name)
|
|
111
|
+
or (hasattr(raw, "alias") and entity_name == raw.alias)
|
|
112
|
+
or (hasattr(raw, "prompt") and entity_name == raw.prompt)
|
|
113
|
+
]
|
|
114
|
+
|
|
113
115
|
if len(entities) > 0:
|
|
114
116
|
return entities[0]
|
|
115
117
|
time.sleep(poll_interval)
|
|
@@ -208,6 +208,47 @@ class DataSourceUtils(BaseUtils):
|
|
|
208
208
|
raise ApiError(f"Datasource {datasource_id} indexing failed")
|
|
209
209
|
raise TimeoutError("Datasource was not indexed within the timeout period.")
|
|
210
210
|
|
|
211
|
+
def wait_for_update_date_change(
|
|
212
|
+
self,
|
|
213
|
+
datasource_id: str,
|
|
214
|
+
timeout: int = 80,
|
|
215
|
+
pool_interval: int = 3,
|
|
216
|
+
) -> DataSource:
|
|
217
|
+
"""Wait for datasource update_date to become greater than created_date.
|
|
218
|
+
|
|
219
|
+
This method polls the datasource and waits until the update_date is greater
|
|
220
|
+
than the created_date, indicating that the datasource has been updated/re-indexed.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
datasource_id: The ID of the datasource to monitor
|
|
224
|
+
timeout: Maximum time to wait in seconds (default from DEFAULT_TIMEOUT)
|
|
225
|
+
pool_interval: Time between polling attempts in seconds (default 3)
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
DataSource: The updated datasource object with update_date > created_date
|
|
229
|
+
|
|
230
|
+
Raises:
|
|
231
|
+
TimeoutError: If update_date doesn't change within the timeout period
|
|
232
|
+
"""
|
|
233
|
+
start_time = time.time()
|
|
234
|
+
|
|
235
|
+
# Get initial datasource state
|
|
236
|
+
datasource = self.client.datasources.get(datasource_id)
|
|
237
|
+
created_date = datasource.created_date
|
|
238
|
+
|
|
239
|
+
while time.time() - start_time < timeout:
|
|
240
|
+
sleep(pool_interval)
|
|
241
|
+
datasource = self.client.datasources.get(datasource_id)
|
|
242
|
+
|
|
243
|
+
# Check if update_date is greater than created_date
|
|
244
|
+
if datasource.update_date and datasource.update_date > created_date:
|
|
245
|
+
return datasource
|
|
246
|
+
|
|
247
|
+
raise TimeoutError(
|
|
248
|
+
f"Datasource {datasource_id} update_date did not change within the timeout period. "
|
|
249
|
+
f"Created: {created_date}, Last checked update: {datasource.update_date}"
|
|
250
|
+
)
|
|
251
|
+
|
|
211
252
|
def get_datasource(self, datasource_id):
|
|
212
253
|
return self.client.datasources.get(datasource_id)
|
|
213
254
|
|
|
@@ -123,11 +123,15 @@ class WorkflowUtils(BaseUtils):
|
|
|
123
123
|
):
|
|
124
124
|
self.client.workflows.run(workflow, user_input=user_input, file_name=file_name)
|
|
125
125
|
executions_service = self.client.workflows.executions(workflow)
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
126
|
+
|
|
127
|
+
# Wait for execution to appear (returns execution object with prompt attribute)
|
|
128
|
+
execution = wait_for_entity(
|
|
129
|
+
lambda: executions_service.list(),
|
|
130
|
+
entity_name=user_input,
|
|
130
131
|
)
|
|
132
|
+
execution_id = execution.execution_id
|
|
133
|
+
|
|
134
|
+
# Wait for state to appear
|
|
131
135
|
states_service = executions_service.states(execution_id)
|
|
132
136
|
state = wait_for_entity(
|
|
133
137
|
lambda: states_service.list(),
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: codemie-test-harness
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.217
|
|
4
4
|
Summary: Autotest for CodeMie backend and UI
|
|
5
5
|
Author: Anton Yeromin
|
|
6
6
|
Author-email: anton_yeromin@epam.com
|
|
@@ -13,7 +13,7 @@ Requires-Dist: aws-assume-role-lib (>=2.10.0,<3.0.0)
|
|
|
13
13
|
Requires-Dist: boto3 (>=1.39.8,<2.0.0)
|
|
14
14
|
Requires-Dist: click (>=8.1.7,<9.0.0)
|
|
15
15
|
Requires-Dist: codemie-plugins (>=0.1.123,<0.2.0)
|
|
16
|
-
Requires-Dist: codemie-sdk-python (==0.1.
|
|
16
|
+
Requires-Dist: codemie-sdk-python (==0.1.217)
|
|
17
17
|
Requires-Dist: pytest (>=8.4.1,<9.0.0)
|
|
18
18
|
Requires-Dist: pytest-playwright (>=0.7.0,<0.8.0)
|
|
19
19
|
Requires-Dist: pytest-repeat (>=0.9.3,<0.10.0)
|
|
@@ -64,7 +64,7 @@ codemie_test_harness/tests/assistant/tools/servicenow/__init__.py,sha256=47DEQpj
|
|
|
64
64
|
codemie_test_harness/tests/assistant/tools/servicenow/test_servicenow_tools.py,sha256=aUjfZ4773WoQJjcHx3JqH5e8ckaKB-aIMO-OZWTm0Ek,888
|
|
65
65
|
codemie_test_harness/tests/assistant/tools/vcs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
66
66
|
codemie_test_harness/tests/assistant/tools/vcs/test_assistant_with_vcs_tools.py,sha256=qOPr4XOh2rgUV2MXMxkRzRGkAKl9ViwQGCZ-dMEtscU,1145
|
|
67
|
-
codemie_test_harness/tests/conftest.py,sha256=
|
|
67
|
+
codemie_test_harness/tests/conftest.py,sha256=o2A8_JrhYCIqPzBJwPtlORQexLjv8k6HpZmJNPy7An0,33807
|
|
68
68
|
codemie_test_harness/tests/conversations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
69
69
|
codemie_test_harness/tests/conversations/test_conversations_endpoints.py,sha256=HQ2nu9lXfRNkyJhA0rzar7Rmv6pMe-te0rFYAy-X5UA,4128
|
|
70
70
|
codemie_test_harness/tests/e2e/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -87,6 +87,8 @@ codemie_test_harness/tests/llm/assistants/test_lite_llm.py,sha256=jYtf_J7lBSmB1L
|
|
|
87
87
|
codemie_test_harness/tests/llm/assistants/test_llm.py,sha256=b5HhrDkz1lwCaSZH5kdPdacmLXH_Bxnj5vO_A5ho3k8,4838
|
|
88
88
|
codemie_test_harness/tests/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
89
89
|
codemie_test_harness/tests/providers/test_providers_endpoints.py,sha256=iV9pxFOxTPVbk8aH8RGFjVDUCUDMUiRWcDMrvwqoTqk,8043
|
|
90
|
+
codemie_test_harness/tests/scheduler/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
91
|
+
codemie_test_harness/tests/scheduler/test_scheduler_service.py,sha256=CRPoqANVErsTunJ3i9d-zqroEbBLL5R3Ks6udtlPK6g,9039
|
|
90
92
|
codemie_test_harness/tests/search/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
91
93
|
codemie_test_harness/tests/search/test_search_assistant.py,sha256=dMTH619tOWKSCLpEVTnrClEEDEo10tqe8o4rSfs4SXs,3269
|
|
92
94
|
codemie_test_harness/tests/search/test_search_datasource.py,sha256=qFxopY4w8U7EgY_vh74V0ra315iW-7u16r9APzgw5W8,6871
|
|
@@ -269,13 +271,13 @@ codemie_test_harness/tests/ui/workflows/test_workflows.py,sha256=zRBFiQYhJ_MWKGG
|
|
|
269
271
|
codemie_test_harness/tests/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
270
272
|
codemie_test_harness/tests/utils/assistant_utils.py,sha256=2s1MikCfIcM3UlkcuwaedyxI2hePuWT3h8F3SDjBCtk,8022
|
|
271
273
|
codemie_test_harness/tests/utils/aws_parameters_store.py,sha256=YAVpvwElkKZJZvzSVxtOue1Gjs-kvSBS2y5QvIlz484,3267
|
|
272
|
-
codemie_test_harness/tests/utils/base_utils.py,sha256=
|
|
274
|
+
codemie_test_harness/tests/utils/base_utils.py,sha256=WIuwbnmA5xZhb8C40zH59tf4HhjSrOh5cHWDASLTQEU,7044
|
|
273
275
|
codemie_test_harness/tests/utils/client_factory.py,sha256=xGta0ZaVYzWfwJ4cu3f89KkGc_R5Bq-9lqnhr57x_2w,972
|
|
274
276
|
codemie_test_harness/tests/utils/confluence_utils.py,sha256=auhip1ntqSDsHWAoWCxQxfuNv05BinS6TWXyg_F2dfc,4544
|
|
275
277
|
codemie_test_harness/tests/utils/constants.py,sha256=aGs0gdHB38Ozd-UyCKNpWRoQXMy3D0bjmfxiPcdZdqY,1165
|
|
276
278
|
codemie_test_harness/tests/utils/conversation_utils.py,sha256=SWj6TBWOQoX5Yh6Wk63yHQFveRXgK1mpLb3PUKAa57A,648
|
|
277
279
|
codemie_test_harness/tests/utils/credentials_manager.py,sha256=xF7fjQbT4b1rPrOOQfo3ie5c06FLjUzppvTaJDVOg2s,55252
|
|
278
|
-
codemie_test_harness/tests/utils/datasource_utils.py,sha256=
|
|
280
|
+
codemie_test_harness/tests/utils/datasource_utils.py,sha256=1QthGUSAUPaA9N_pIZkrR0wK4S5r9e4y7ILjQ_0z2Oc,14318
|
|
279
281
|
codemie_test_harness/tests/utils/env_resolver.py,sha256=25776Aq9oIDcDzGtfFs07lj7eldeFgmsocxeS3RUclE,4280
|
|
280
282
|
codemie_test_harness/tests/utils/env_utils.py,sha256=9tyVgxKfYqdtSoo9dRTScOZWjAUm82_65JjaKggcwCg,3999
|
|
281
283
|
codemie_test_harness/tests/utils/file_utils.py,sha256=hY-kwnyzvtd1BQif8r5NhvRTGfpKLmQKyRsq1Tuflhg,585
|
|
@@ -293,7 +295,7 @@ codemie_test_harness/tests/utils/search_utils.py,sha256=SrXiB2d9wiI5ka9bgg0CD73G
|
|
|
293
295
|
codemie_test_harness/tests/utils/similarity_check.py,sha256=2URqvD3Ft7efwLmhh2iYVsXrYboP9f-_B_ekZmJn0ac,1527
|
|
294
296
|
codemie_test_harness/tests/utils/user_utils.py,sha256=zJNrmL3Fb7iGuaVRobUMwJ2Og6NqEPcM_9lw60m18T8,242
|
|
295
297
|
codemie_test_harness/tests/utils/webhook_utils.py,sha256=YjyLwAqQjR12vYFOUmYhJCJIyZvKm4SvU-1oIjIYNqg,340
|
|
296
|
-
codemie_test_harness/tests/utils/workflow_utils.py,sha256=
|
|
298
|
+
codemie_test_harness/tests/utils/workflow_utils.py,sha256=bHxPQkblRPF1fZp_AXI36elMFm14nzqGYoU5Eqz4MWY,11553
|
|
297
299
|
codemie_test_harness/tests/utils/yaml_utils.py,sha256=iIdEl-rUUh1LgzAmD_mjfftthhvlzXyCuA37yBoH0Gw,1617
|
|
298
300
|
codemie_test_harness/tests/webhook/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
299
301
|
codemie_test_harness/tests/webhook/test_webhook_service.py,sha256=POmxQG0tpcNW9-yKQ62CcnQpUEFYlTOs0_4H9MijIHY,8127
|
|
@@ -402,7 +404,7 @@ codemie_test_harness/tests/workflow/virtual_assistant_tools/servicenow/__init__.
|
|
|
402
404
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/servicenow/test_workflow_with_servicenow_tools.py,sha256=D835gaRbCnB4va5mi9TdA_u9StSpGXQ_fgzwW0S2pwo,1173
|
|
403
405
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
404
406
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/test_workflow_with_vcs_tools.py,sha256=Se9imIiBYuJU78m1pLu0g4ZmHygKZjr6JjIWkGXTy1Q,1364
|
|
405
|
-
codemie_test_harness-0.1.
|
|
406
|
-
codemie_test_harness-0.1.
|
|
407
|
-
codemie_test_harness-0.1.
|
|
408
|
-
codemie_test_harness-0.1.
|
|
407
|
+
codemie_test_harness-0.1.217.dist-info/METADATA,sha256=KKZZdTwepcYnWr2XOgnNVVTr6lk6YGg64_GmO3gDkuk,27184
|
|
408
|
+
codemie_test_harness-0.1.217.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
|
|
409
|
+
codemie_test_harness-0.1.217.dist-info/entry_points.txt,sha256=n98t-EOM5M1mnMl_j2X4siyeO9zr0WD9a5LF7JyElIM,73
|
|
410
|
+
codemie_test_harness-0.1.217.dist-info/RECORD,,
|
|
File without changes
|
{codemie_test_harness-0.1.215.dist-info → codemie_test_harness-0.1.217.dist-info}/entry_points.txt
RENAMED
|
File without changes
|