codemie-test-harness 0.1.200__py3-none-any.whl → 0.1.202__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of codemie-test-harness might be problematic. Click here for more details.
- codemie_test_harness/tests/utils/workflow_utils.py +14 -2
- codemie_test_harness/tests/workflow/test_workflows.py +199 -1
- {codemie_test_harness-0.1.200.dist-info → codemie_test_harness-0.1.202.dist-info}/METADATA +109 -3
- {codemie_test_harness-0.1.200.dist-info → codemie_test_harness-0.1.202.dist-info}/RECORD +6 -6
- {codemie_test_harness-0.1.200.dist-info → codemie_test_harness-0.1.202.dist-info}/WHEEL +0 -0
- {codemie_test_harness-0.1.200.dist-info → codemie_test_harness-0.1.202.dist-info}/entry_points.txt +0 -0
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
import os
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Optional
|
|
2
4
|
|
|
3
5
|
from codemie_sdk.models.workflow import (
|
|
4
6
|
WorkflowCreateRequest,
|
|
5
7
|
WorkflowUpdateRequest,
|
|
6
8
|
WorkflowMode,
|
|
7
9
|
)
|
|
10
|
+
|
|
8
11
|
from codemie_test_harness.tests import PROJECT
|
|
9
12
|
from codemie_test_harness.tests.utils.base_utils import (
|
|
10
13
|
BaseUtils,
|
|
@@ -97,8 +100,14 @@ class WorkflowUtils(BaseUtils):
|
|
|
97
100
|
entity_name=response[1],
|
|
98
101
|
)
|
|
99
102
|
|
|
100
|
-
def execute_workflow(
|
|
101
|
-
self
|
|
103
|
+
def execute_workflow(
|
|
104
|
+
self,
|
|
105
|
+
workflow,
|
|
106
|
+
execution_name,
|
|
107
|
+
user_input="",
|
|
108
|
+
file_name: Optional[str] = None,
|
|
109
|
+
):
|
|
110
|
+
self.client.workflows.run(workflow, user_input=user_input, file_name=file_name)
|
|
102
111
|
executions = self.client.workflows.executions(workflow)
|
|
103
112
|
execution_id = next(
|
|
104
113
|
row.execution_id for row in executions.list() if row.prompt == user_input
|
|
@@ -232,6 +241,9 @@ class WorkflowUtils(BaseUtils):
|
|
|
232
241
|
|
|
233
242
|
return triggered_tools
|
|
234
243
|
|
|
244
|
+
def upload_file(self, file_path: Path):
|
|
245
|
+
return self.client.files.bulk_upload([file_path])
|
|
246
|
+
|
|
235
247
|
@staticmethod
|
|
236
248
|
def _extract_tools_from_thoughts(thoughts_data):
|
|
237
249
|
"""
|
|
@@ -3,8 +3,18 @@ import json
|
|
|
3
3
|
import pytest
|
|
4
4
|
from hamcrest import assert_that, equal_to
|
|
5
5
|
|
|
6
|
+
from codemie_test_harness.tests.enums.tools import Default
|
|
7
|
+
from codemie_test_harness.tests.test_data.assistant_test_data import (
|
|
8
|
+
EXCEL_TOOL_TEST_DATA,
|
|
9
|
+
DOCX_TOOL_TEST_DATA,
|
|
10
|
+
)
|
|
11
|
+
from codemie_test_harness.tests.test_data.file_test_data import file_test_data
|
|
6
12
|
from codemie_test_harness.tests.test_data.output_schema_test_data import output_schema
|
|
7
|
-
from codemie_test_harness.tests.utils.base_utils import
|
|
13
|
+
from codemie_test_harness.tests.utils.base_utils import (
|
|
14
|
+
get_random_name,
|
|
15
|
+
assert_tool_triggered,
|
|
16
|
+
)
|
|
17
|
+
from codemie_test_harness.tests.utils.constants import FILES_PATH
|
|
8
18
|
from codemie_test_harness.tests.utils.yaml_utils import AssistantModel, StateModel
|
|
9
19
|
|
|
10
20
|
|
|
@@ -37,3 +47,191 @@ def test_workflow_with_json_output_schema(default_llm, workflow, workflow_utils)
|
|
|
37
47
|
)
|
|
38
48
|
|
|
39
49
|
assert_that(json.loads(response)["results"][0], equal_to(2))
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@pytest.mark.workflow
|
|
53
|
+
@pytest.mark.api
|
|
54
|
+
@pytest.mark.smoke
|
|
55
|
+
@pytest.mark.file
|
|
56
|
+
@pytest.mark.parametrize(
|
|
57
|
+
"file_name, expected_response, expected_tool",
|
|
58
|
+
file_test_data,
|
|
59
|
+
ids=[f"{row[0]}" for row in file_test_data],
|
|
60
|
+
)
|
|
61
|
+
def test_workflow_with_user_input_and_file_attachment(
|
|
62
|
+
workflow_with_virtual_assistant,
|
|
63
|
+
workflow_utils,
|
|
64
|
+
assistant_utils,
|
|
65
|
+
file_name,
|
|
66
|
+
expected_response,
|
|
67
|
+
expected_tool,
|
|
68
|
+
similarity_check,
|
|
69
|
+
):
|
|
70
|
+
"""
|
|
71
|
+
Test workflow execution with user input that includes file attachment.
|
|
72
|
+
|
|
73
|
+
This test demonstrates how workflows can handle file attachments by:
|
|
74
|
+
1. Uploading a file
|
|
75
|
+
2. Creating a workflow with file processing capabilities
|
|
76
|
+
3. Executing the workflow with user input that references the uploaded file
|
|
77
|
+
4. Verifying that file processing tools are triggered
|
|
78
|
+
"""
|
|
79
|
+
assistant_and_state_name = get_random_name()
|
|
80
|
+
|
|
81
|
+
# Upload file to get file URL
|
|
82
|
+
upload_response = workflow_utils.upload_file(FILES_PATH / file_name)
|
|
83
|
+
file_url = upload_response.files[0].file_url
|
|
84
|
+
|
|
85
|
+
# Create workflow with virtual assistant that has file analysis capabilities
|
|
86
|
+
# Note: File analysis tools are automatically available when files are processed
|
|
87
|
+
system_prompt = "You are a helpful assistant that can analyze and process files. "
|
|
88
|
+
|
|
89
|
+
workflow_instance = workflow_with_virtual_assistant(
|
|
90
|
+
assistant_and_state_name=assistant_and_state_name,
|
|
91
|
+
system_prompt=system_prompt,
|
|
92
|
+
task=(
|
|
93
|
+
"Analyze the uploaded file from the provided file URL and give a detailed summary. "
|
|
94
|
+
"Use the appropriate file analysis tools based on the file type to extract and process the content."
|
|
95
|
+
),
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Prepare user input that includes file reference
|
|
99
|
+
user_input = "Please provide a summary about file content."
|
|
100
|
+
|
|
101
|
+
# Execute workflow
|
|
102
|
+
response = workflow_utils.execute_workflow(
|
|
103
|
+
workflow_instance.id,
|
|
104
|
+
assistant_and_state_name,
|
|
105
|
+
user_input=user_input,
|
|
106
|
+
file_name=file_url,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
# Extract triggered tools from execution
|
|
110
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
111
|
+
workflow_instance
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
assert_tool_triggered(expected_tool, triggered_tools)
|
|
115
|
+
similarity_check.check_similarity(response, expected_response)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@pytest.mark.workflow
|
|
119
|
+
@pytest.mark.file
|
|
120
|
+
@pytest.mark.api
|
|
121
|
+
@pytest.mark.smoke
|
|
122
|
+
@pytest.mark.parametrize("prompt, expected_response", EXCEL_TOOL_TEST_DATA)
|
|
123
|
+
def test_workflow_excel_tool_extended_functionality(
|
|
124
|
+
workflow_with_virtual_assistant,
|
|
125
|
+
workflow_utils,
|
|
126
|
+
similarity_check,
|
|
127
|
+
prompt,
|
|
128
|
+
expected_response,
|
|
129
|
+
):
|
|
130
|
+
"""
|
|
131
|
+
Test extended Excel tool functionality with various scenarios in workflow.
|
|
132
|
+
|
|
133
|
+
This test covers:
|
|
134
|
+
- Data extraction from visible sheets only
|
|
135
|
+
- All data including hidden sheets
|
|
136
|
+
- Sheet name listing functionality
|
|
137
|
+
- File statistics and structure analysis
|
|
138
|
+
- Single sheet extraction by index and name
|
|
139
|
+
- Data cleaning and normalization
|
|
140
|
+
- Hidden sheet visibility control
|
|
141
|
+
- Column structure and data type analysis
|
|
142
|
+
- Tabular structure normalization
|
|
143
|
+
- Multi-sheet comprehensive analysis
|
|
144
|
+
|
|
145
|
+
"""
|
|
146
|
+
assistant_and_state_name = get_random_name()
|
|
147
|
+
|
|
148
|
+
# Upload file to get file URL
|
|
149
|
+
upload_response = workflow_utils.upload_file(FILES_PATH / "test_extended.xlsx")
|
|
150
|
+
file_url = upload_response.files[0].file_url
|
|
151
|
+
|
|
152
|
+
# Create workflow with virtual assistant that has Excel file processing capabilities
|
|
153
|
+
system_prompt = "You have all required information in initial prompt. Do not ask additional questions and proceed with request."
|
|
154
|
+
|
|
155
|
+
workflow_instance = workflow_with_virtual_assistant(
|
|
156
|
+
assistant_and_state_name=assistant_and_state_name,
|
|
157
|
+
system_prompt=system_prompt,
|
|
158
|
+
task=prompt,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# Execute workflow with the file URL
|
|
162
|
+
response = workflow_utils.execute_workflow(
|
|
163
|
+
workflow_instance.id,
|
|
164
|
+
assistant_and_state_name,
|
|
165
|
+
user_input="Process the uploaded Excel file as requested.",
|
|
166
|
+
file_name=file_url,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Extract triggered tools from execution
|
|
170
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
171
|
+
workflow_instance
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
assert_tool_triggered(Default.EXCEL_TOOL, triggered_tools)
|
|
175
|
+
similarity_check.check_similarity(response, expected_response)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
@pytest.mark.workflow
|
|
179
|
+
@pytest.mark.file
|
|
180
|
+
@pytest.mark.api
|
|
181
|
+
@pytest.mark.smoke
|
|
182
|
+
@pytest.mark.parametrize("prompt, expected_response", DOCX_TOOL_TEST_DATA)
|
|
183
|
+
def test_workflow_docx_tool_extended_functionality(
|
|
184
|
+
workflow_with_virtual_assistant,
|
|
185
|
+
workflow_utils,
|
|
186
|
+
similarity_check,
|
|
187
|
+
prompt,
|
|
188
|
+
expected_response,
|
|
189
|
+
):
|
|
190
|
+
"""
|
|
191
|
+
Test extended Docx tool functionality with various scenarios in workflow.
|
|
192
|
+
|
|
193
|
+
This test covers:
|
|
194
|
+
- Extract plain text using 'text' query
|
|
195
|
+
- Extract text with metadata using 'text_with_metadata' query
|
|
196
|
+
- Extract document structure using 'structure_only' query
|
|
197
|
+
- Extract tables using 'table_extraction' query
|
|
198
|
+
- Generate summary using 'summary' query
|
|
199
|
+
- Perform analysis with custom instructions using 'analyze' query
|
|
200
|
+
- Process specific pages '1-3' using pages parameter
|
|
201
|
+
- Process specific pages '1,5,10' using pages parameter
|
|
202
|
+
- Extract images using 'image_extraction' query
|
|
203
|
+
- Extract text with OCR from images using 'text_with_images' query
|
|
204
|
+
|
|
205
|
+
"""
|
|
206
|
+
assistant_and_state_name = get_random_name()
|
|
207
|
+
|
|
208
|
+
# Upload file to get file URL
|
|
209
|
+
upload_response = workflow_utils.upload_file(FILES_PATH / "test_extended.docx")
|
|
210
|
+
file_url = upload_response.files[0].file_url
|
|
211
|
+
|
|
212
|
+
# Create workflow with virtual assistant that has DOCX file processing capabilities
|
|
213
|
+
system_prompt = """You are a helpful assistant that can analyze and process DOCX files.
|
|
214
|
+
You have all required information in initial prompt.
|
|
215
|
+
Do not ask additional questions and proceed with request."""
|
|
216
|
+
|
|
217
|
+
workflow_instance = workflow_with_virtual_assistant(
|
|
218
|
+
assistant_and_state_name=assistant_and_state_name,
|
|
219
|
+
system_prompt=system_prompt,
|
|
220
|
+
task=prompt,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
# Execute workflow with the file URL
|
|
224
|
+
response = workflow_utils.execute_workflow(
|
|
225
|
+
workflow_instance.id,
|
|
226
|
+
assistant_and_state_name,
|
|
227
|
+
user_input="Process the uploaded DOCX file as requested.",
|
|
228
|
+
file_name=file_url,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
# Extract triggered tools from execution
|
|
232
|
+
triggered_tools = workflow_utils.extract_triggered_tools_from_execution(
|
|
233
|
+
workflow_instance
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
assert_tool_triggered(Default.DOCX_TOOL, triggered_tools)
|
|
237
|
+
similarity_check.check_similarity(response, expected_response)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: codemie-test-harness
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.202
|
|
4
4
|
Summary: Autotest for CodeMie backend and UI
|
|
5
5
|
Author: Anton Yeromin
|
|
6
6
|
Author-email: anton_yeromin@epam.com
|
|
@@ -13,7 +13,7 @@ Requires-Dist: aws-assume-role-lib (>=2.10.0,<3.0.0)
|
|
|
13
13
|
Requires-Dist: boto3 (>=1.39.8,<2.0.0)
|
|
14
14
|
Requires-Dist: click (>=8.1.7,<9.0.0)
|
|
15
15
|
Requires-Dist: codemie-plugins (>=0.1.123,<0.2.0)
|
|
16
|
-
Requires-Dist: codemie-sdk-python (==0.1.
|
|
16
|
+
Requires-Dist: codemie-sdk-python (==0.1.202)
|
|
17
17
|
Requires-Dist: pytest (>=8.4.1,<9.0.0)
|
|
18
18
|
Requires-Dist: pytest-playwright (>=0.7.0,<0.8.0)
|
|
19
19
|
Requires-Dist: pytest-repeat (>=0.9.3,<0.10.0)
|
|
@@ -59,6 +59,7 @@ The CLI now provides four main command groups:
|
|
|
59
59
|
2. **`run`** - Enhanced test execution with flexible parameters
|
|
60
60
|
3. **`assistant`** - Direct assistant interaction and chat capabilities
|
|
61
61
|
4. **`workflow`** - Workflow execution
|
|
62
|
+
5. **`marks`** - List all available pytest marks in the test suite.
|
|
62
63
|
|
|
63
64
|
Each command group includes extensive help and validation features.
|
|
64
65
|
|
|
@@ -101,6 +102,49 @@ codemie-test-harness config set PYTEST_RERUNS 2
|
|
|
101
102
|
codemie-test-harness config set PYTEST_COUNT 10 # For performance testing (optional)
|
|
102
103
|
```
|
|
103
104
|
|
|
105
|
+
#### Running Tests Locally (Minimal Configuration)
|
|
106
|
+
|
|
107
|
+
If you're running tests against a **local CodeMie instance** and want to use integration settings already stored in AWS Parameter Store (instead of configuring each integration manually), you only need to set these **4 minimal keys**:
|
|
108
|
+
|
|
109
|
+
```shell
|
|
110
|
+
# Minimal local setup - uses AWS Parameter Store for all integrations
|
|
111
|
+
codemie-test-harness config set AWS_ACCESS_KEY <your_aws_access_key>
|
|
112
|
+
codemie-test-harness config set AWS_SECRET_KEY <your_aws_secret_key>
|
|
113
|
+
codemie-test-harness config set CODEMIE_API_DOMAIN http://localhost:8080
|
|
114
|
+
codemie-test-harness config set TEST_USER_FULL_NAME "dev-codemie-user"
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
Then run your tests:
|
|
118
|
+
|
|
119
|
+
```shell
|
|
120
|
+
# Run all tests locally
|
|
121
|
+
codemie-test-harness run
|
|
122
|
+
|
|
123
|
+
# Run specific test categories
|
|
124
|
+
codemie-test-harness run --marks smoke
|
|
125
|
+
codemie-test-harness run --marks "api and not not_for_parallel_run" -n 8
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
**How it works:**
|
|
129
|
+
- The AWS credentials allow the test harness to automatically fetch integration credentials (GitLab, JIRA, Confluence, etc.) from AWS Parameter Store
|
|
130
|
+
- You don't need to manually configure individual integrations unless you want to override specific values
|
|
131
|
+
- All 86+ integration variables are automatically pulled from Parameter Store as needed
|
|
132
|
+
- This is ideal for local development and testing against your local CodeMie backend
|
|
133
|
+
|
|
134
|
+
**To override specific integrations locally:**
|
|
135
|
+
|
|
136
|
+
If you need to use your own tokens instead of shared Parameter Store values:
|
|
137
|
+
|
|
138
|
+
```shell
|
|
139
|
+
# Override with personal GitLab token
|
|
140
|
+
codemie-test-harness config set GITLAB_TOKEN <your_personal_token>
|
|
141
|
+
|
|
142
|
+
# Override with personal JIRA credentials
|
|
143
|
+
codemie-test-harness config set JIRA_TOKEN <your_personal_token>
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
Values set explicitly in the config take priority over AWS Parameter Store values.
|
|
147
|
+
|
|
104
148
|
#### Integration Categories & Management
|
|
105
149
|
|
|
106
150
|
The CLI supports **10 major integration categories** with comprehensive credential management:
|
|
@@ -296,6 +340,35 @@ codemie-test-harness --git-env github run --marks git
|
|
|
296
340
|
codemie-test-harness run --marks ui --headless
|
|
297
341
|
```
|
|
298
342
|
|
|
343
|
+
#### Advanced Marks Usage (Logical Operators)
|
|
344
|
+
|
|
345
|
+
Combine multiple markers using `and`, `or`, and `not` keywords for fine-grained test selection:
|
|
346
|
+
|
|
347
|
+
```shell
|
|
348
|
+
# OR operator - run tests with either marker
|
|
349
|
+
codemie-test-harness run --marks "smoke or gitlab" -n 8
|
|
350
|
+
codemie-test-harness run --marks "jira_kb or confluence_kb" -n 6
|
|
351
|
+
|
|
352
|
+
# AND operator - run tests with both markers
|
|
353
|
+
codemie-test-harness run --marks "api and smoke" -n 10
|
|
354
|
+
codemie-test-harness run --marks "gitlab and code_kb" -n 4
|
|
355
|
+
|
|
356
|
+
# NOT operator - exclude specific markers
|
|
357
|
+
codemie-test-harness run --marks "api and not ui" -n 10
|
|
358
|
+
codemie-test-harness run --marks "not not_for_parallel_run" -n 12
|
|
359
|
+
|
|
360
|
+
# Complex combinations with parentheses
|
|
361
|
+
codemie-test-harness run --marks "(smoke or api) and not ui" -n 8
|
|
362
|
+
codemie-test-harness run --marks "(gitlab or github) and not not_for_parallel_run" -n 10
|
|
363
|
+
codemie-test-harness run --marks "smoke and (jira_kb or confluence_kb)" -n 6
|
|
364
|
+
|
|
365
|
+
# Exclude multiple markers
|
|
366
|
+
codemie-test-harness run --marks "api and not (ui or not_for_parallel_run)" -n 10
|
|
367
|
+
|
|
368
|
+
# Run all knowledge base tests except code
|
|
369
|
+
codemie-test-harness run --marks "(jira_kb or confluence_kb) and not code_kb" -n 8
|
|
370
|
+
```
|
|
371
|
+
|
|
299
372
|
#### Performance and Load Testing
|
|
300
373
|
|
|
301
374
|
Run tests multiple times in parallel to simulate load and measure performance:
|
|
@@ -542,7 +615,7 @@ The credentials manager supports **86+ environment variables** across **10 categ
|
|
|
542
615
|
2) Create a .env file in the project root. If you provide AWS credentials, the suite will fetch additional values from AWS Systems Manager Parameter Store and recreate .env accordingly.
|
|
543
616
|
|
|
544
617
|
```properties
|
|
545
|
-
|
|
618
|
+
CODEMIE_API_DOMAIN=http://localhost:8080
|
|
546
619
|
|
|
547
620
|
AWS_ACCESS_KEY=<aws_access_token>
|
|
548
621
|
AWS_SECRET_KEY=<aws_secret_key>
|
|
@@ -594,9 +667,42 @@ pytest -n 10 --count 50 -m excel_generation # Run 50 times with 10 workers
|
|
|
594
667
|
pytest -n 20 --count 100 -m smoke --reruns 2 # Heavy load with retries
|
|
595
668
|
```
|
|
596
669
|
|
|
670
|
+
**Advanced Marks Usage with Logical Operators:**
|
|
671
|
+
|
|
672
|
+
Combine markers using `and`, `or`, and `not` for precise test selection:
|
|
673
|
+
|
|
674
|
+
```shell
|
|
675
|
+
# OR operator - run tests with either marker
|
|
676
|
+
pytest -n 8 -m "smoke or gitlab" --reruns 2
|
|
677
|
+
pytest -n 6 -m "jira_kb or confluence_kb" --reruns 2
|
|
678
|
+
|
|
679
|
+
# AND operator - run tests with both markers
|
|
680
|
+
pytest -n 10 -m "api and smoke" --reruns 2
|
|
681
|
+
pytest -n 4 -m "gitlab and code_kb" --reruns 2
|
|
682
|
+
|
|
683
|
+
# NOT operator - exclude specific markers
|
|
684
|
+
pytest -n 10 -m "api and not ui" --reruns 2
|
|
685
|
+
pytest -n 12 -m "not not_for_parallel_run" --reruns 2
|
|
686
|
+
|
|
687
|
+
# Complex combinations with parentheses
|
|
688
|
+
pytest -n 8 -m "(smoke or api) and not ui" --reruns 2
|
|
689
|
+
pytest -n 10 -m "(gitlab or github) and not not_for_parallel_run" --reruns 2
|
|
690
|
+
pytest -n 6 -m "smoke and (jira_kb or confluence_kb)" --reruns 2
|
|
691
|
+
|
|
692
|
+
# Exclude multiple markers
|
|
693
|
+
pytest -n 10 -m "api and not (ui or not_for_parallel_run)" --reruns 2
|
|
694
|
+
|
|
695
|
+
# Run all knowledge base tests except code KB
|
|
696
|
+
pytest -n 8 -m "(jira_kb or confluence_kb) and not code_kb" --reruns 2
|
|
697
|
+
|
|
698
|
+
# Run all Git-related tests (GitLab or GitHub)
|
|
699
|
+
pytest -n 8 -m "gitlab or github or git" --reruns 2
|
|
700
|
+
```
|
|
701
|
+
|
|
597
702
|
**Notes:**
|
|
598
703
|
- `--reruns 2` uses pytest-rerunfailures to improve resiliency in flaky environments
|
|
599
704
|
- `--count N` uses pytest-repeat to run each test N times (useful for performance/load testing)
|
|
705
|
+
- Use quotes around marker expressions containing spaces or special characters
|
|
600
706
|
|
|
601
707
|
#### Test Timeout Configuration
|
|
602
708
|
|
|
@@ -284,7 +284,7 @@ codemie_test_harness/tests/utils/pytest_utils.py,sha256=k-mEjX2qpnh37sqKpJqYhZT6
|
|
|
284
284
|
codemie_test_harness/tests/utils/search_utils.py,sha256=SrXiB2d9wiI5ka9bgg0CD73GOX_1mqi2Hz5FBm5DsEU,1435
|
|
285
285
|
codemie_test_harness/tests/utils/similarity_check.py,sha256=1U66NGh6esISKABodtVobE2WnuFt0f6vcK3qUri6ZqU,1485
|
|
286
286
|
codemie_test_harness/tests/utils/user_utils.py,sha256=zJNrmL3Fb7iGuaVRobUMwJ2Og6NqEPcM_9lw60m18T8,242
|
|
287
|
-
codemie_test_harness/tests/utils/workflow_utils.py,sha256=
|
|
287
|
+
codemie_test_harness/tests/utils/workflow_utils.py,sha256=Kz4F4Xzdx42PNmye--ENpjSakdB1Y-QL_nwSfKhMAXE,9359
|
|
288
288
|
codemie_test_harness/tests/utils/yaml_utils.py,sha256=y9fUf4u4G4SoCktPOwaC5x71iaDKhktbz_XUfI9kNis,1661
|
|
289
289
|
codemie_test_harness/tests/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
290
290
|
codemie_test_harness/tests/workflow/assistant_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -350,7 +350,7 @@ codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_repo
|
|
|
350
350
|
codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_research_tools.py,sha256=lQzzWV6mcKH1_8k4-OxXoI9_hb48JJbYAmDT9bj-WQ0,2558
|
|
351
351
|
codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_servicenow_tools.py,sha256=meR9c8BgMTgM6LpwqE0n7ygTCIHGVooMvYfb6kGF7UE,2377
|
|
352
352
|
codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_vcs_tools.py,sha256=dbfjHnIhjneW3_JZyElvmswE7omOp0A0EgzoztZ7vow,3098
|
|
353
|
-
codemie_test_harness/tests/workflow/test_workflows.py,sha256=
|
|
353
|
+
codemie_test_harness/tests/workflow/test_workflows.py,sha256=HxwY2VKGLvLW2sl3CwR_0xFOmXrLwj_nfGdqnC39CSw,8129
|
|
354
354
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
355
355
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/access_management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
356
356
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/access_management/test_workflow_with_keycloak_tool.py,sha256=eLM804LRG-Rd5ZIgrO_W53FGoSJ_TxWPKQPtWG598CY,1203
|
|
@@ -391,7 +391,7 @@ codemie_test_harness/tests/workflow/virtual_assistant_tools/servicenow/__init__.
|
|
|
391
391
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/servicenow/test_workflow_with_servicenow_tools.py,sha256=D835gaRbCnB4va5mi9TdA_u9StSpGXQ_fgzwW0S2pwo,1173
|
|
392
392
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
393
393
|
codemie_test_harness/tests/workflow/virtual_assistant_tools/vcs/test_workflow_with_vcs_tools.py,sha256=Se9imIiBYuJU78m1pLu0g4ZmHygKZjr6JjIWkGXTy1Q,1364
|
|
394
|
-
codemie_test_harness-0.1.
|
|
395
|
-
codemie_test_harness-0.1.
|
|
396
|
-
codemie_test_harness-0.1.
|
|
397
|
-
codemie_test_harness-0.1.
|
|
394
|
+
codemie_test_harness-0.1.202.dist-info/METADATA,sha256=9xYAqqEciTJKycdxV0HID1vYNmZFI-qLDaRKtFkC1lE,27184
|
|
395
|
+
codemie_test_harness-0.1.202.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
|
|
396
|
+
codemie_test_harness-0.1.202.dist-info/entry_points.txt,sha256=n98t-EOM5M1mnMl_j2X4siyeO9zr0WD9a5LF7JyElIM,73
|
|
397
|
+
codemie_test_harness-0.1.202.dist-info/RECORD,,
|
|
File without changes
|
{codemie_test_harness-0.1.200.dist-info → codemie_test_harness-0.1.202.dist-info}/entry_points.txt
RENAMED
|
File without changes
|