codemie-test-harness 0.1.157__py3-none-any.whl → 0.1.159__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of codemie-test-harness might be problematic. Click here for more details.

Files changed (27) hide show
  1. codemie_test_harness/tests/assistant/tools/datamanagement/test_assistant_with_data_management_tools.py +8 -4
  2. codemie_test_harness/tests/assistant/tools/report_portal/__init__.py +0 -0
  3. codemie_test_harness/tests/assistant/tools/report_portal/test_assistant_report_portal_tools.py +32 -0
  4. codemie_test_harness/tests/conftest.py +11 -0
  5. codemie_test_harness/tests/enums/tools.py +14 -0
  6. codemie_test_harness/tests/integrations/project/test_default_integrations.py +44 -1
  7. codemie_test_harness/tests/integrations/project/test_project_integrations.py +7 -0
  8. codemie_test_harness/tests/integrations/user/test_default_integrations.py +44 -0
  9. codemie_test_harness/tests/integrations/user/test_user_integrations.py +7 -0
  10. codemie_test_harness/tests/test_data/data_management_tools_test_data.py +4 -37
  11. codemie_test_harness/tests/test_data/direct_tools/data_management_tools_test_data.py +1 -1
  12. codemie_test_harness/tests/test_data/direct_tools/report_portal_tools_test_data.py +1235 -0
  13. codemie_test_harness/tests/test_data/integrations_test_data.py +16 -0
  14. codemie_test_harness/tests/test_data/report_portal_tools_test_data.py +520 -0
  15. codemie_test_harness/tests/utils/aws_parameters_store.py +28 -0
  16. codemie_test_harness/tests/workflow/assistant_tools/data_management/test_workflow_with_assistant_with_data_management_tools.py +4 -5
  17. codemie_test_harness/tests/workflow/assistant_tools/report_portal/__init__.py +0 -0
  18. codemie_test_harness/tests/workflow/assistant_tools/report_portal/test_workflow_with_assistant_with_report_portal_tools.py +38 -0
  19. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_report_portal_tools.py +115 -0
  20. codemie_test_harness/tests/workflow/virtual_assistant_tools/data_management/test_workflow_with_data_management_tools.py +4 -5
  21. codemie_test_harness/tests/workflow/virtual_assistant_tools/report_portal/__init__.py +0 -0
  22. codemie_test_harness/tests/workflow/virtual_assistant_tools/report_portal/test_workflow_with_report_portal_tool.py +39 -0
  23. {codemie_test_harness-0.1.157.dist-info → codemie_test_harness-0.1.159.dist-info}/METADATA +2 -2
  24. {codemie_test_harness-0.1.157.dist-info → codemie_test_harness-0.1.159.dist-info}/RECORD +27 -18
  25. codemie_test_harness/tests/workflow/direct_tools_calling/{test_workflow_with_data_management_tools.py → test_workflow_with_data_management_tools_sql.py} +1 -1
  26. {codemie_test_harness-0.1.157.dist-info → codemie_test_harness-0.1.159.dist-info}/WHEEL +0 -0
  27. {codemie_test_harness-0.1.157.dist-info → codemie_test_harness-0.1.159.dist-info}/entry_points.txt +0 -0
@@ -157,6 +157,11 @@ valid_integrations = [
157
157
  ),
158
158
  ],
159
159
  ),
160
+ pytest.param(
161
+ CredentialTypes.REPORT_PORTAL,
162
+ CredentialsUtil.report_portal_credentials(),
163
+ marks=pytest.mark.report_portal,
164
+ ),
160
165
  ]
161
166
 
162
167
  testable_integrations = [
@@ -261,6 +266,11 @@ testable_integrations = [
261
266
  ),
262
267
  ],
263
268
  ),
269
+ pytest.param(
270
+ CredentialTypes.REPORT_PORTAL,
271
+ CredentialsUtil.report_portal_credentials(),
272
+ marks=pytest.mark.report_portal,
273
+ ),
264
274
  ]
265
275
 
266
276
  invalid_integrations = [
@@ -349,4 +359,10 @@ invalid_integrations = [
349
359
  pytest.mark.cloud,
350
360
  ],
351
361
  ),
362
+ pytest.param(
363
+ CredentialTypes.REPORT_PORTAL,
364
+ CredentialsUtil.invalid_report_portal_credentials(),
365
+ "401 Client Error: for url: https://report-portal.core.kuberocketci.io/api/v1/epm-cdme/launch?page.page=1",
366
+ marks=pytest.mark.report_portal,
367
+ ),
352
368
  ]
@@ -0,0 +1,520 @@
1
+ from codemie_test_harness.tests.enums.tools import Toolkit, ReportPortalTool
2
+
3
+ rp_test_data = [
4
+ (
5
+ Toolkit.REPORT_PORTAL,
6
+ ReportPortalTool.GET_EXTENDED_LAUNCH_DATA_AS_RAW,
7
+ "Get details for the launch with ID 23 in a raw HTML format",
8
+ """
9
+ The launch with ID 23 has been successfully retrieved in HTML format. Here is the raw HTML content for your reference:
10
+
11
+ ```html
12
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
13
+ <html>
14
+ <head>
15
+ <title></title>
16
+ <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
17
+ <style type="text/css">
18
+ a {text-decoration: none}
19
+ </style>
20
+ </head>
21
+ <body text="#000000" link="#000000" alink="#000000" vlink="#000000">
22
+ <table role="none" width="100%" cellpadding="0" cellspacing="0" border="0">
23
+ <tr>
24
+ <td width="50%">&nbsp;</td><td align="center">
25
+ <table id="JR_PAGE_ANCHOR_0_1" role="none" class="jrPage" data-jr-height="57605" cellpadding="0" cellspacing="0" border="0" style="empty-cells: show; width: 842px; border-collapse: collapse;">
26
+ <style type="text/css">
27
+ #JR_PAGE_ANCHOR_0_1 th {font-weight: normal;}
28
+ #JR_PAGE_ANCHOR_0_1 ul {list-style-type: disc; padding-inline-start: 40px; margin: 0px;}
29
+ #JR_PAGE_ANCHOR_0_1 ol {list-style-type: decimal; padding-inline-start: 40px; margin: 0px;}
30
+ </style>
31
+ <tr role="none" valign="top" style="height:0">
32
+ <td style="width:20px"></td>
33
+ <!-- Additional table cells with widths defined -->
34
+ </tr>
35
+ <tr valign="top" style="height:20px">
36
+ <td colspan="26">
37
+ <!-- Detailed information about the launch -->
38
+ </td>
39
+ </tr>
40
+ <!-- Additional rows containing more detailed information about steps, types, and statistics -->
41
+ </table>
42
+ </td>
43
+ </tr>
44
+ <tr>
45
+ <td width="50%">&nbsp;</td>
46
+ </tr>
47
+ </table>
48
+ </body>
49
+ </html>
50
+ ```
51
+ This HTML document contains structured details about the launch, including metadata, statistics, and step-by-step information.
52
+ If you need to reformat the data or extract specific information, please let me know how I can assist further!,
53
+ """,
54
+ ),
55
+ (
56
+ Toolkit.REPORT_PORTAL,
57
+ ReportPortalTool.GET_LAUNCH_DETAILS,
58
+ "Get details about launch with ID 23",
59
+ """
60
+ Launch Details: Pytest Regression (Launch 2)
61
+ Overview
62
+ ID: 23
63
+ Name: Pytest Regression
64
+ Number: 2
65
+ Status: STOPPED
66
+ Owner: anton_yeromin
67
+ Environment: azure
68
+ Timing Information
69
+ Start Time: September 8, 2025, 08:46:01
70
+ End Time: September 8, 2025, 09:11:14
71
+ Duration: ~25 minutes
72
+ Test Execution Statistics
73
+ Total Tests: 888
74
+ Passed: 743 (83.7%)
75
+ Failed: 4 (0.5%)
76
+ Skipped: 141 (15.9%)
77
+ Defect Information
78
+ To Investigate: 145 issues
79
+ This launch appears to have been manually stopped, as indicated by its status and description.
80
+ Despite being stopped, it shows a good pass rate of 83.7%, with only 4 failed tests.
81
+ However, there are 145 items marked as "to investigate" which might require attention.
82
+ """,
83
+ ),
84
+ (
85
+ Toolkit.REPORT_PORTAL,
86
+ ReportPortalTool.GET_ALL_LAUNCHES,
87
+ "Get all Report Portal launches",
88
+ """
89
+ ReportPortal Launches Summary
90
+ Here are all the recent launches in ReportPortal:
91
+ 1. Pytest Regression - Launch 1
92
+ Status: STOPPED
93
+ Owner: anton_yeromin
94
+ Environment: azure
95
+ Duration: September 8, 2025, 08:17 - 08:46 (29 minutes)
96
+ Test Results: 888 total tests (728 passed, 19 failed, 141 skipped)
97
+ Issues: 160 marked "to investigate"
98
+ 2. Pytest Regression - Launch 2
99
+ Status: STOPPED
100
+ Owner: anton_yeromin
101
+ Environment: azure
102
+ Duration: September 8, 2025, 08:46 - 09:11 (25 minutes)
103
+ Test Results: 888 total tests (743 passed, 4 failed, 141 skipped)
104
+ Issues: 145 marked "to investigate"
105
+ 3. Pytest Regression - Launch 4
106
+ Status: FAILED
107
+ Owner: anton_yeromin
108
+ Environment: preview
109
+ Duration: September 8, 2025, 11:49 - 12:06 (17 minutes)
110
+ Test Results: 888 total tests (846 passed, 0 failed, 42 skipped)
111
+ Issues: 42 marked "to investigate"
112
+ 4. Pytest Regression - Launch 5
113
+ Status: STOPPED
114
+ Owner: anton_yeromin
115
+ Environment: azure
116
+ Duration: September 8, 2025, 12:43 - 13:16 (33 minutes)
117
+ Test Results: 888 total tests (744 passed, 3 failed, 141 skipped)
118
+ Issues: 144 marked "to investigate"
119
+ 5. Pytest Regression - Launch 6
120
+ Status: STOPPED
121
+ Owner: anton_yeromin
122
+ Environment: preview
123
+ Duration: September 8, 2025, 13:50 - 13:52 (2 minutes)
124
+ Test Results: 15 total tests (1 passed, 14 failed, 0 skipped)
125
+ Issues: None reported
126
+ 6. Pytest Regression - Launch 7
127
+ Status: FAILED
128
+ Owner: anton_yeromin
129
+ Environment: preview
130
+ Duration: September 8, 2025, 13:58 - 14:16 (18 minutes)
131
+ Test Results: 888 total tests (843 passed, 3 failed, 42 skipped)
132
+ Issues: 45 marked "to investigate"
133
+ 7. Pytest Regression - Launch 8
134
+ Status: FAILED
135
+ Owner: anton_yeromin
136
+ Environment: preview
137
+ Duration: September 8, 2025, 15:00 - 15:17 (17 minutes)
138
+ Test Results: 888 total tests (845 passed, 1 failed, 42 skipped)
139
+ Issues: 43 marked "to investigate"
140
+ 8. Pytest Regression - Launch 9
141
+ Status: INTERRUPTED
142
+ Owner: anton_yeromin
143
+ Environment: aws
144
+ Duration: September 9, 2025, 06:36 - 09:55 (3 hours, 19 minutes)
145
+ Test Results: 888 total tests (770 passed, 8 failed, 110 skipped)
146
+ Issues: 118 marked "to investigate"
147
+ 9. Pytest Regression - Launch 10
148
+ Status: INTERRUPTED
149
+ Owner: anton_yeromin
150
+ Environment: azure
151
+ Duration: September 9, 2025, 06:38 - 09:55 (3 hours, 17 minutes)
152
+ Test Results: 888 tReportPortal Launches Summary
153
+ Here are all the recent launches in ReportPortal:
154
+ 1. Pytest Regression - Launch 1
155
+ Status: STOPPED
156
+ Owner: anton_yeromin
157
+ Environment: azure
158
+ Duration: September 8, 2025, 08:17 - 08:46 (29 minutes)
159
+ Test Results: 888 total tests (728 passed, 19 failed, 141 skipped)
160
+ Issues: 160 marked "to investigate"
161
+ 2. Pytest Regression - Launch 2
162
+ Status: STOPPED
163
+ Owner: anton_yeromin
164
+ Environment: azure
165
+ Duration: September 8, 2025, 08:46 - 09:11 (25 minutes)
166
+ Test Results: 888 total tests (743 passed, 4 failed, 141 skipped)
167
+ Issues: 145 marked "to investigate"
168
+ 3. Pytest Regression - Launch 4
169
+ Status: FAILED
170
+ Owner: anton_yeromin
171
+ Environment: preview
172
+ Duration: September 8, 2025, 11:49 - 12:06 (17 minutes)
173
+ Test Results: 888 total tests (846 passed, 0 failed, 42 skipped)
174
+ Issues: 42 marked "to investigate"
175
+ 4. Pytest Regression - Launch 5
176
+ Status: STOPPED
177
+
178
+ Owner: anton_yeromin
179
+
180
+ Environment: azure
181
+
182
+ Duration: September 8, 2025, 12:43 - 13:16 (33 minutes)
183
+
184
+ Test Results: 888 total tests (744 passed, 3 failed, 141 skipped)
185
+
186
+ Issues: 144 marked "to investigate"
187
+
188
+ 5. Pytest Regression - Launch 6
189
+ Status: STOPPED
190
+
191
+ Owner: anton_yeromin
192
+
193
+ Environment: preview
194
+
195
+ Duration: September 8, 2025, 13:50 - 13:52 (2 minutes)
196
+
197
+ Test Results: 15 total tests (1 passed, 14 failed, 0 skipped)
198
+
199
+ Issues: None reported
200
+
201
+ 6. Pytest Regression - Launch 7
202
+ Status: FAILED
203
+
204
+ Owner: anton_yeromin
205
+
206
+ Environment: preview
207
+
208
+ Duration: September 8, 2025, 13:58 - 14:16 (18 minutes)
209
+
210
+ Test Results: 888 total tests (843 passed, 3 failed, 42 skipped)
211
+
212
+ Issues: 45 marked "to investigate"
213
+
214
+ 7. Pytest Regression - Launch 8
215
+ Status: FAILED
216
+
217
+ Owner: anton_yeromin
218
+
219
+ Environment: preview
220
+
221
+ Duration: September 8, 2025, 15:00 - 15:17 (17 minutes)
222
+
223
+ Test Results: 888 total tests (845 passed, 1 failed, 42 skipped)
224
+
225
+ Issues: 43 marked "to investigate"
226
+
227
+ 8. Pytest Regression - Launch 9
228
+ Status: INTERRUPTED
229
+
230
+ Owner: anton_yeromin
231
+
232
+ Environment: aws
233
+
234
+ Duration: September 9, 2025, 06:36 - 09:55 (3 hours, 19 minutes)
235
+
236
+ Test Results: 888 total tests (770 passed, 8 failed, 110 skipped)
237
+
238
+ Issues: 118 marked "to investigate"
239
+
240
+ 9. Pytest Regression - Launch 10
241
+ Status: INTERRUPTED
242
+
243
+ Owner: anton_yeromin
244
+
245
+ Environment: azure
246
+
247
+ Duration: September 9, 2025, 06:38 - 09:55 (3 hours, 17 minutes)
248
+
249
+ Test Results: 888 total tests (300 passed, 447 failed, 141 skipped)
250
+
251
+ Issues: 588 marked "to investigate"
252
+
253
+ 10. Pytest Regression - Launch 11
254
+ Status: INTERRUPTED
255
+
256
+ Owner: anton_yeromin
257
+
258
+ Environment: azure
259
+
260
+ Duration: September 9, 2025, 07:00 - 10:55 (3 hours, 55 minutes)
261
+
262
+ Test Results: 79 total tests (5 passed, 55 failed, 19 skipped)
263
+
264
+ Issues: 66 marked "to investigate"
265
+
266
+ 11. Pytest Regression - Launch 12
267
+ Status: INTERRUPTED
268
+
269
+ Owner: anton_yeromin
270
+
271
+ Environment: gcp
272
+
273
+ Duration: September 9, 2025, 07:21 - 10:55 (3 hours, 34 minutes)
274
+
275
+ Test Results: 888 total tests (732 passed, 9 failed, 147 skipped)
276
+
277
+ Issues: 156 marked "to investigate"
278
+
279
+ 12. Pytest Regression - Launch 13
280
+ Status: IN_PROGRESS
281
+
282
+ Owner: anton_yeromin
283
+
284
+ Environment: azure
285
+
286
+ Start Time: September 9, 2025, 10:00
287
+
288
+ Test Results (so far): 888 total tests (309 passed, 438 failed, 141 skipped)
289
+
290
+ Issues: 579 marked "to investigate"
291
+
292
+ 13. Pytest Regression - Launch 14
293
+ Status: FAILED
294
+
295
+ Owner: anton_yeromin
296
+
297
+ Environment: preview
298
+
299
+ Duration: September 9, 2025, 10:01 - 10:40 (39 minutes)
300
+
301
+ Test Results: 888 total tests (811 passed, 35 failed, 42 skipped)
302
+
303
+ Issues: 77 marked "to investigate"
304
+ """,
305
+ ),
306
+ (
307
+ Toolkit.REPORT_PORTAL,
308
+ ReportPortalTool.FIND_TEST_ITEM_BY_ID,
309
+ "Get test item by ID 2884",
310
+ """
311
+ Test Item Details: ID 2884
312
+ Test Information
313
+ Name: test_workflow_with_assistant_with_ado_test_plan_tools
314
+ Status: FAILED
315
+ Type: STEP
316
+ Path: workflow/assistant_tools/ado
317
+ Launch ID: 23
318
+ Launch Number: 2
319
+ Location
320
+ File: codemie_test_harness/tests/workflow/assistant_tools/ado/test_workflow_with_assistant_with_ado_test_plan_tools.py
321
+ Timing
322
+ Start Time: September 8, 2025, 08:51:45
323
+ End Time: September 8, 2025, 08:52:54
324
+ Duration: 1 minute, 9 seconds
325
+ Test Statistics
326
+ Executions: Failed: 1
327
+ Defects: To Investigate: 1 (ti001)
328
+ Attributes
329
+ Type: Regression
330
+ Issue Analysis
331
+ This test attempts to create an assistant with Azure DevOps Test Plan tools and create a test plan in the "CodemieAnton" project.
332
+ The test appears to have failed, though the logs show that the assistant was created successfully and the test plan was created with ID 3.
333
+ Without seeing explicit error messages in the logs, the failure might be related to:
334
+ Assertion failures that weren't logged
335
+ Issues during test teardown
336
+ Expected vs. actual state differences
337
+ Timeout issues
338
+ Would you like me to retrieve the logs for this test item to investigate the failure further?
339
+ """,
340
+ ),
341
+ (
342
+ Toolkit.REPORT_PORTAL,
343
+ ReportPortalTool.GET_TEST_ITEMS_FOR_LAUNCH,
344
+ "Get test items for launch ID 23",
345
+ """
346
+ Here are some test items for launch ID 23:
347
+
348
+ 1. **Test Item:** `test_cli_mcp_server[cat file.txt]`
349
+ - **File:** `codemie_test_harness/tests/assistant/tools/mcp/test_cli_mcp_server.py`
350
+ - **Status:** PASSED
351
+ - **Attributes:** Regression, MCP
352
+ - **Start Time:** 2025-09-08T08:46:34.564Z
353
+ - **End Time:** 2025-09-08T08:46:41.799Z
354
+
355
+ 2. **Test Item:** `test_create_assistant_and_prompt_with_file[test.csv]`
356
+ - **File:** `codemie_test_harness/tests/assistant/test_assistants.py`
357
+ - **Status:** PASSED
358
+ - **Attributes:** Regression, Smoke, Testcase EPMCDME-4001, EPMCDME-4002, EPMCDME-2527
359
+ - **Start Time:** 2025-09-08T08:46:34.565Z
360
+ - **End Time:** 2025-09-08T08:46:50.522Z
361
+
362
+ 3. **Test Item:** `test_assistant_with_codebase_tools[Toolkit.CODEBASE_TOOLS_CodeBaseTool.SEARCH_CODE_REPO_V2]`
363
+ - **File:** `codemie_test_harness/tests/assistant/tools/codebase/test_codebase_tools.py`
364
+ - **Status:** PASSED
365
+ - **Attributes:** Regression
366
+ - **Start Time:** 2025-09-08T08:46:34.566Z
367
+ - **End Time:** 2025-09-08T08:47:21.503Z
368
+
369
+ 4. **Test Item:** `test_edit_description_for_file_datasource`
370
+ - **File:** `codemie_test_harness/tests/assistant/datasource/test_file_indexing.py`
371
+ - **Status:** PASSED
372
+ - **Attributes:** Regression, Smoke
373
+ - **Start Time:** 2025-09-08T08:46:34.606Z
374
+ - **End Time:** 2025-09-08T08:46:41.655Z
375
+
376
+ 5. **Test Item:** `test_create_assistant_with_file_datasource[gecko-test.ini]`
377
+ - **File:** `codemie_test_harness/tests/assistant/datasource/test_file_indexing.py`
378
+ - **Status:** SKIPPED
379
+ - **Attributes:** Regression, Smoke
380
+ - **Start Time:** 2025-09-08T08:46:34.564Z
381
+ - **End Time:** 2025-09-08T08:46:34.627Z
382
+ """,
383
+ ),
384
+ (
385
+ Toolkit.REPORT_PORTAL,
386
+ ReportPortalTool.GET_LOGS_FOR_TEST_ITEM,
387
+ "Get logs for test item ID 2884",
388
+ """
389
+ Here are the logs for test item ID 2884:
390
+
391
+ 1. **Time:** 2025-09-08T08:51:45.164Z
392
+ **Level:** DEBUG
393
+ **Message:** Making /v1/assistants request to https://codemie-azure.eks-sandbox.aws.main.edp.projects.epam.com/code-assistant-api<class 'dict'>
394
+
395
+ 2. **Time:** 2025-09-08T08:51:45.164Z
396
+ **Level:** DEBUG
397
+ **Message:** Request body: {'name': 'sdp_xjwyipzcypfocwq', 'description': 'Integration test assistant', 'system_prompt': 'You are a helpful integration test assistant', 'project': 'codemie', 'context': [], 'llm_model_type': 'gpt-4o-2024-08-06', 'toolkits': [{'toolkit': 'Azure DevOps Test Plan', 'tools': [{'name': 'create_test_plan', ...}]
398
+
399
+ 3. **Time:** 2025-09-08T08:51:45.165Z
400
+ **Level:** DEBUG
401
+ **Message:** Starting new HTTPS connection (1): codemie-azure.eks-sandbox.aws.main.edp.projects.epam.com:443
402
+
403
+ 4. **Time:** 2025-09-08T08:51:45.226Z
404
+ **Level:** DEBUG
405
+ **Message:** https://codemie-azure.eks-sandbox.aws.main.edp.projects.epam.com:443 "POST /code-assistant-api/v1/assistants HTTP/1.1" 200 39
406
+
407
+ 5. **Time:** 2025-09-08T08:51:45.226Z
408
+ **Level:** DEBUG
409
+ **Message:** Received response with status 200
410
+
411
+ 6. **Time:** 2025-09-08T08:51:45.226Z
412
+ **Level:** DEBUG
413
+ **Message:** Response datasource_type: <class 'dict'>
414
+
415
+ 7. **Time:** 2025-09-08T08:51:45.228Z
416
+ **Level:** INFO
417
+ **Message:** Successfully processed /v1/assistants request to <class 'dict'>
418
+
419
+ 8. **Time:** 2025-09-08T08:51:45.228Z
420
+ **Level:** DEBUG
421
+ **Message:** Making /v1/assistants request to https://codemie-azure.eks-sandbox.aws.main.edp.projects.epam.com/code-assistant-apityping.List[codemie_sdk.models.assistant.AssistantBase]
422
+
423
+ 9. **Time:** 2025-09-08T08:51:45.228Z
424
+ **Level:** DEBUG
425
+ **Message:** Request params: {'page': 0, 'per_page': 200, 'scope': 'visible_to_user', 'minimal_response': True}
426
+
427
+ 10. **Time:** 2025-09-08T08:51:45.229Z
428
+ **Level:** DEBUG
429
+ **Message:** Starting new HTTPS connection (1): codemie-azure.eks-sandbox.aws.main.edp.projects.epam.com:443
430
+
431
+ 11. **Time:** 2025-09-08T08:51:45.299Z
432
+ **Level:** DEBUG
433
+ **Message:** https://codemie-azure.eks-sandbox.aws.main.edp.projects.epam.com:443 "GET /code-assistant-api/v1/assistants?page=0&per_page=200&scope=visible_to_user&minimal_response=True HTTP/1.1" 200 69989
434
+
435
+ 12. **Time:** 2025-09-08T08:51:45.310Z
436
+ **Level:** DEBUG
437
+ **Message:** Received response with status 200
438
+
439
+ 13. **Time:** 2025-09-08T08:51:45.311Z
440
+ **Level:** DEBUG
441
+ **Message:** Response datasource_type: <class 'dict'>
442
+
443
+ 14. **Time:** 2025-09-08T08:51:45.312Z
444
+ **Level:** INFO
445
+ **Message:** Successfully processed /v1/assistants request to typing.List[codemie_sdk.models.assistant.AssistantBase]
446
+
447
+ 15. **Time:** 2025-09-08T08:51:45.314Z
448
+ **Level:** DEBUG
449
+ **Message:** Making /v1/workflows request to https://codemie-azure.eks-sandbox.aws.main.edp.projects.epam.com/code-assistant-api<class 'dict'>
450
+
451
+ 16. **Time:** 2025-09-08T08:51:45.314Z
452
+ **Level:** DEBUG
453
+ **Message:** Request body: {'project': 'codemie', 'name': 'sdp_xpgivblpavtdeza', 'description': 'Test Workflow', 'yaml_config': 'enable_summarization_node: false\\ntools: []\\nassistants:\\n- id: sdp_xjwyipzcypfocwq\\n model: gpt-4o-2024-08-06\\n assistant_id: 6ef6a01d-70ef-4211-a385-8c99b87eee72\\nstates:\\n- id: ...
454
+
455
+ 17. **Time:** 2025-09-08T08:51:45.315Z
456
+ **Level:** DEBUG
457
+ **Message:** Starting new HTTPS connection (1): codemie-azure.eks-sandbox.aws.main.edp.projects.epam.com:443
458
+
459
+ 18. **Time:** 2025-09-08T08:51:45.394Z
460
+ **Level:** DEBUG
461
+ **Message:** https://codemie-azure.eks-sandbox.aws.main.edp.projects.epam.com:443 "POST /code-assistant-api/v1/workflows HTTP/1.1" 200 2119
462
+
463
+ 19. **Time:** 2025-09-08T08:51:45.394Z
464
+ **Level:** DEBUG
465
+ **Message:** Received response with status 200
466
+
467
+ 20. **Time:** 2025-09-08T08:51:45.394Z
468
+ **Level:** DEBUG
469
+ **Message:** Response datasource_type: <class 'dict'>
470
+ """,
471
+ ),
472
+ (
473
+ Toolkit.REPORT_PORTAL,
474
+ ReportPortalTool.GET_USER_INFORMATION,
475
+ "Get user information from reportal with username anton_yeromin",
476
+ """
477
+ User Information: Anton Yeromin
478
+ Personal Details
479
+ Full Name: Anton Yeromin
480
+ Email: anton_yeromin@epam.com
481
+ Username: anton_yeromin
482
+ User ID: 4
483
+ Account Type: SAML
484
+ Role: USER
485
+ Status: Active
486
+ Photo: Loaded
487
+ Login Information
488
+ Last Login: September 9, 2025
489
+ Synchronization Date: September 8, 2025
490
+ Project Assignments
491
+ EPM-CDME Project
492
+ Project ID: 4
493
+ Role: PROJECT_MANAGER
494
+ Entry Type: INTERNAL
495
+ Personal Project
496
+ Project ID: 6
497
+ Name: anton_yeromin_personal
498
+ Role: PROJECT_MANAGER
499
+ Entry Type: PERSONAL
500
+ This user has project management responsibilities in both an internal project (EPM-CDME) and their personal project space.
501
+ """,
502
+ ),
503
+ (
504
+ Toolkit.REPORT_PORTAL,
505
+ ReportPortalTool.GET_DASHBOARD_DATA,
506
+ "Get dashboard data from report portal where ID = 2",
507
+ """
508
+ Dashboard Details: test-dashboard
509
+ I was able to retrieve the dashboard data for ID 2:
510
+ Dashboard Information
511
+ ID: 2
512
+ Name: test-dashboard
513
+ Owner: dmytro_adamtsev
514
+ Description: dashboard for testing purposes
515
+ Dashboard Content
516
+ This dashboard does not contain any widgets.
517
+ It appears to be an empty dashboard that was likely created for testing purposes, as suggested by its name and description.
518
+ """,
519
+ ),
520
+ ]
@@ -592,6 +592,34 @@ class CredentialsUtil:
592
592
  cred.value = "wrong_secret"
593
593
  return credentials
594
594
 
595
+ @staticmethod
596
+ def report_portal_credentials() -> List[CredentialValues]:
597
+ report_portal_creds = AwsParameterStore.get_cloud_provider_credentials(
598
+ "reportportal"
599
+ )
600
+ return [
601
+ CredentialValues(
602
+ key="url",
603
+ value=report_portal_creds.get("url"),
604
+ ),
605
+ CredentialValues(
606
+ key="project",
607
+ value=report_portal_creds.get("project"),
608
+ ),
609
+ CredentialValues(
610
+ key="api_key",
611
+ value=report_portal_creds.get("api_key"),
612
+ ),
613
+ ]
614
+
615
+ @staticmethod
616
+ def invalid_report_portal_credentials() -> List[CredentialValues]:
617
+ credentials = CredentialsUtil.report_portal_credentials()
618
+ for cred in credentials:
619
+ if cred.key == "api_key":
620
+ cred.value = "wrong_key"
621
+ return credentials
622
+
595
623
  @staticmethod
596
624
  def jira_cloud_jql() -> str:
597
625
  jira_creds = AwsParameterStore.get_cloud_provider_credentials("jira")
@@ -1,7 +1,6 @@
1
1
  import os
2
2
 
3
3
  import pytest
4
-
5
4
  from codemie_sdk.models.integration import CredentialTypes
6
5
 
7
6
  from codemie_test_harness.tests.enums.integrations import DataBaseDialect
@@ -18,12 +17,15 @@ from codemie_test_harness.tests.test_data.data_management_tools_test_data import
18
17
  )
19
18
  from codemie_test_harness.tests.utils.aws_parameters_store import CredentialsUtil
20
19
 
20
+ pytestmark = pytest.mark.skipif(
21
+ os.getenv("ENV") == "local", reason="Skipping this tests on local environment"
22
+ )
23
+
21
24
 
22
25
  @pytest.mark.workflow
23
26
  @pytest.mark.workflow_with_assistant
24
27
  @pytest.mark.elastic
25
28
  @pytest.mark.regression
26
- @pytest.mark.skip(reason="Credentials for Elastic are not available")
27
29
  def test_workflow_with_assistant_with_elastic_tools(
28
30
  assistant,
29
31
  workflow_with_assistant,
@@ -56,9 +58,6 @@ def test_workflow_with_assistant_with_elastic_tools(
56
58
  sql_tools_test_data,
57
59
  ids=[DataBaseDialect.MY_SQL, DataBaseDialect.POSTGRES, DataBaseDialect.MS_SQL],
58
60
  )
59
- @pytest.mark.skipif(
60
- os.getenv("ENV") == "local", reason="Skipping this test on local environment"
61
- )
62
61
  def test_workflow_with_assistant_with_sql_tools(
63
62
  assistant,
64
63
  workflow_with_assistant,
@@ -0,0 +1,38 @@
1
+ import pytest
2
+
3
+ from codemie_test_harness.tests.test_data.report_portal_tools_test_data import (
4
+ rp_test_data,
5
+ )
6
+
7
+
8
+ @pytest.mark.workflow
9
+ @pytest.mark.workflow_with_assistant
10
+ @pytest.mark.report_portal
11
+ @pytest.mark.regression
12
+ @pytest.mark.parametrize(
13
+ "toolkit,tool_name,prompt,expected_response",
14
+ rp_test_data,
15
+ ids=[f"{row[0]}_{row[1]}" for row in rp_test_data],
16
+ )
17
+ def test_workflow_with_assistant_with_report_portal_tools(
18
+ assistant,
19
+ workflow_with_assistant,
20
+ workflow_utils,
21
+ report_portal_integration,
22
+ similarity_check,
23
+ toolkit,
24
+ tool_name,
25
+ prompt,
26
+ expected_response,
27
+ ):
28
+ assistant = assistant(
29
+ toolkit,
30
+ tool_name,
31
+ settings=report_portal_integration,
32
+ )
33
+
34
+ workflow_with_assistant = workflow_with_assistant(assistant, prompt)
35
+ response = workflow_utils.execute_workflow(
36
+ workflow_with_assistant.id, assistant.name
37
+ )
38
+ similarity_check.check_similarity(response, expected_response)