codemie-test-harness 0.1.136__py3-none-any.whl → 0.1.138__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of codemie-test-harness might be problematic. Click here for more details.

Files changed (29) hide show
  1. codemie_test_harness/tests/__init__.py +1 -0
  2. codemie_test_harness/tests/assistant/test_assistants.py +2 -0
  3. codemie_test_harness/tests/conftest.py +17 -0
  4. codemie_test_harness/tests/service/test_assistant_service.py +349 -379
  5. codemie_test_harness/tests/service/test_datasource_service.py +276 -292
  6. codemie_test_harness/tests/service/test_integration_service.py +133 -122
  7. codemie_test_harness/tests/service/test_llm_service.py +16 -17
  8. codemie_test_harness/tests/service/test_task_service.py +108 -120
  9. codemie_test_harness/tests/service/test_user_service.py +36 -19
  10. codemie_test_harness/tests/service/test_workflow_execution_service.py +142 -169
  11. codemie_test_harness/tests/service/test_workflow_service.py +145 -144
  12. codemie_test_harness/tests/test_data/cloud_tools_test_data.py +5 -1
  13. codemie_test_harness/tests/test_data/direct_tools/cloud_tools_test_data.py +5 -1
  14. codemie_test_harness/tests/test_data/direct_tools/codebase_tools_test_data.py +28 -159
  15. codemie_test_harness/tests/test_data/integrations_test_data.py +10 -2
  16. codemie_test_harness/tests/test_data/llm_test_data.py +0 -1
  17. codemie_test_harness/tests/test_data/vcs_tools_test_data.py +4 -1
  18. codemie_test_harness/tests/utils/assistant_utils.py +39 -4
  19. codemie_test_harness/tests/utils/aws_parameters_store.py +1 -1
  20. codemie_test_harness/tests/utils/llm_utils.py +9 -0
  21. codemie_test_harness/tests/utils/search_utils.py +11 -5
  22. codemie_test_harness/tests/utils/user_utils.py +9 -0
  23. codemie_test_harness/tests/utils/workflow_utils.py +34 -6
  24. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_cloud_tools.py +3 -3
  25. codemie_test_harness/tests/workflow/direct_tools_calling/test_workflow_with_research_tools.py +3 -3
  26. {codemie_test_harness-0.1.136.dist-info → codemie_test_harness-0.1.138.dist-info}/METADATA +2 -2
  27. {codemie_test_harness-0.1.136.dist-info → codemie_test_harness-0.1.138.dist-info}/RECORD +29 -27
  28. {codemie_test_harness-0.1.136.dist-info → codemie_test_harness-0.1.138.dist-info}/WHEEL +0 -0
  29. {codemie_test_harness-0.1.136.dist-info → codemie_test_harness-0.1.138.dist-info}/entry_points.txt +0 -0
@@ -1,98 +1,114 @@
1
- """Integration tests for IntegrationService."""
2
-
3
1
  from time import sleep
4
-
5
2
  import pytest
3
+ from hamcrest import (
4
+ assert_that,
5
+ all_of,
6
+ instance_of,
7
+ is_not,
8
+ equal_to,
9
+ less_than_or_equal_to,
10
+ has_length,
11
+ empty,
12
+ any_of,
13
+ has_property,
14
+ greater_than,
15
+ )
6
16
 
7
- from codemie_sdk import CodeMieClient
8
17
  from codemie_sdk.models.integration import (
9
18
  Integration,
10
19
  CredentialTypes,
11
20
  CredentialValues,
12
21
  IntegrationType,
13
22
  )
14
- from codemie_test_harness.tests import PROJECT
23
+ from codemie_test_harness.tests import PROJECT, GITHUB_URL
15
24
  from codemie_test_harness.tests.utils.base_utils import get_random_name
16
25
 
17
26
 
18
- def test_list_project_integrations_minimal(client: CodeMieClient):
19
- """Test listing project integrations with minimal response."""
20
- # Get list of project integrations
21
- integrations = client.integrations.list(setting_type=IntegrationType.PROJECT)
27
+ def test_list_project_integrations_minimal_response(search_utils):
28
+ integrations = search_utils.list_integrations(setting_type=IntegrationType.PROJECT)
29
+ assert_that(integrations, all_of(instance_of(list), has_length(greater_than(0))))
22
30
 
23
- # Verify we got a list of integrations
24
- assert isinstance(integrations, list)
25
-
26
- # Verify each integration has the correct type and required fields
27
31
  for integration in integrations:
28
- assert isinstance(integration, Integration)
29
- assert integration.project_name is not None
30
- assert integration.credential_type is not None
31
- assert isinstance(integration.credential_values, list)
32
- assert integration.setting_type == IntegrationType.PROJECT
33
-
32
+ assert_that(
33
+ integration,
34
+ all_of(
35
+ instance_of(Integration),
36
+ has_property("project_name", has_length(greater_than(0))),
37
+ has_property("credential_type", is_not(any_of(None, empty()))),
38
+ has_property("credential_values", has_length(greater_than(0))),
39
+ has_property("setting_type", equal_to(IntegrationType.PROJECT)),
40
+ ),
41
+ )
34
42
 
35
- def test_list_user_integrations_minimal(client: CodeMieClient):
36
- """Test listing user integrations with minimal response."""
37
- # Get list of user integrations
38
- integrations = client.integrations.list(setting_type=IntegrationType.USER)
39
43
 
40
- # Verify we got a list of integrations
41
- assert isinstance(integrations, list)
44
+ def test_list_user_integrations_minimal_response(search_utils):
45
+ integrations = search_utils.list_integrations()
46
+ assert_that(integrations, all_of(instance_of(list), has_length(greater_than(0))))
42
47
 
43
- # Verify each integration has the correct type and required fields
44
48
  for integration in integrations:
45
- assert isinstance(integration, Integration)
46
- assert integration.project_name is not None
47
- assert integration.credential_type is not None
48
- assert isinstance(integration.credential_values, list)
49
- assert integration.setting_type == IntegrationType.USER
49
+ assert_that(
50
+ integration,
51
+ all_of(
52
+ instance_of(Integration),
53
+ has_property("project_name", is_not(None)),
54
+ has_property("credential_type", is_not(any_of(None, empty()))),
55
+ has_property("credential_values", has_length(greater_than(0))),
56
+ has_property("setting_type", equal_to(IntegrationType.USER)),
57
+ ),
58
+ )
50
59
 
51
60
 
52
- def test_list_integrations_with_filters(client: CodeMieClient):
53
- """Test listing integrations with filters for both types."""
54
- # Test project integrations with filters
61
+ def test_list_integrations_with_filters(search_utils):
55
62
  filters = {"type": CredentialTypes.GIT}
56
- project_integrations = client.integrations.list(
63
+
64
+ project_integrations = search_utils.list_integrations(
57
65
  setting_type=IntegrationType.PROJECT, filters=filters
58
66
  )
59
-
60
67
  for integration in project_integrations:
61
- assert integration.credential_type == CredentialTypes.GIT
62
- assert integration.setting_type == IntegrationType.PROJECT
68
+ assert_that(
69
+ integration,
70
+ all_of(
71
+ has_property("credential_type", equal_to(CredentialTypes.GIT)),
72
+ has_property("setting_type", equal_to(IntegrationType.PROJECT)),
73
+ ),
74
+ )
63
75
 
64
- # Test user integrations with filters
65
- user_integrations = client.integrations.list(
76
+ user_integrations = search_utils.list_integrations(
66
77
  setting_type=IntegrationType.USER, filters=filters
67
78
  )
68
-
69
79
  for integration in user_integrations:
70
- assert integration.credential_type == CredentialTypes.GIT
71
- assert integration.setting_type == IntegrationType.USER
80
+ assert_that(
81
+ integration,
82
+ all_of(
83
+ has_property("credential_type", equal_to(CredentialTypes.GIT)),
84
+ has_property("setting_type", equal_to(IntegrationType.USER)),
85
+ ),
86
+ )
72
87
 
73
88
 
74
- def test_list_integrations_pagination(client: CodeMieClient):
75
- """Test integration listing with pagination for both types."""
89
+ def test_list_integrations_with_pagination(search_utils):
76
90
  for setting_type in [IntegrationType.PROJECT, IntegrationType.USER]:
77
91
  # Get first page with 5 items
78
- page_1 = client.integrations.list(setting_type=setting_type, page=0, per_page=5)
79
- assert len(page_1) <= 5
92
+ page_1 = search_utils.list_integrations(
93
+ setting_type=setting_type, page=0, per_page=5
94
+ )
95
+ assert_that(len(page_1), less_than_or_equal_to(5))
80
96
 
81
97
  # Get second page with 5 items
82
- page_2 = client.integrations.list(setting_type=setting_type, page=1, per_page=5)
83
- assert len(page_2) <= 5
98
+ page_2 = search_utils.list_integrations(
99
+ setting_type=setting_type, page=1, per_page=5
100
+ )
101
+ assert_that(len(page_2), less_than_or_equal_to(5))
84
102
 
85
103
  # Verify pages contain different integrations
86
104
  if page_1 and page_2:
87
- assert page_1[0].id != page_2[0].id
105
+ assert_that(page_1[0].id, is_not(equal_to(page_2[0].id)))
88
106
 
89
107
 
90
108
  @pytest.mark.parametrize(
91
109
  "setting_type", [IntegrationType.PROJECT, IntegrationType.USER]
92
110
  )
93
- def test_integration_lifecycle(client: CodeMieClient, setting_type: IntegrationType):
94
- """Test full integration lifecycle for both user and project settings."""
95
- # Step 1: Create test integration
111
+ def test_integration_full_lifecycle(integration_utils, setting_type: IntegrationType):
96
112
  test_project = PROJECT
97
113
  test_alias = get_random_name()
98
114
 
@@ -108,76 +124,73 @@ def test_integration_lifecycle(client: CodeMieClient, setting_type: IntegrationT
108
124
  setting_type=setting_type,
109
125
  )
110
126
 
111
- # Create integration
112
- created = client.integrations.create(create_request)
113
- assert created is not None
114
- sleep(5)
127
+ created = integration_utils.send_create_integration_request(create_request)
115
128
 
116
- # Step 2: Verify integration exists in the list
117
- found = client.integrations.get_by_alias(test_alias, setting_type=setting_type)
118
- assert found is not None
119
- assert found.alias == test_alias
120
- assert found.setting_type == setting_type
121
- assert found.credential_values[0].value == "https://github.com"
122
- assert found.credential_values[1].value == "test-token-name"
123
-
124
- try:
125
- # Step 3: Update the integration
126
- updated_alias = f"{test_alias} Updated"
127
- update_request = Integration(
128
- project_name=test_project,
129
- credential_type=CredentialTypes.GIT,
130
- credential_values=[
131
- CredentialValues(
132
- key="url", value="https://github.com/test/repo-updated"
133
- ),
134
- CredentialValues(key="token_name", value="test-token-name-updated"),
135
- ],
136
- alias=updated_alias,
137
- setting_type=setting_type,
138
- )
129
+ assert_that(created, is_not(None))
130
+ sleep(5) # Verify integration creation
139
131
 
140
- updated = client.integrations.update(found.id, update_request)
141
- assert updated is not None
142
- sleep(5)
132
+ found_integration = integration_utils.get_integration_by_alias(
133
+ integration_alias=test_alias, integration_type=setting_type
134
+ )
143
135
 
144
- # Step 4: Get and verify updated integration
145
- updated_integration = client.integrations.get_by_alias(
146
- updated_alias, setting_type=setting_type
147
- )
148
- assert updated_integration is not None
149
- assert updated_integration.id == found.id
150
- assert updated_integration.alias == updated_alias
151
- assert updated_integration.setting_type == setting_type
152
- assert updated_integration.credential_values[0].value == "https://github.com"
153
- assert (
154
- updated_integration.credential_values[1].value == "test-token-name-updated"
155
- )
136
+ assert_that(
137
+ found_integration,
138
+ all_of(
139
+ has_property("alias", equal_to(test_alias)),
140
+ has_property("setting_type", equal_to(setting_type)),
141
+ ),
142
+ )
143
+ assert_that(found_integration.credential_values[0].value, equal_to(GITHUB_URL))
144
+ assert_that(
145
+ found_integration.credential_values[1].value, equal_to("test-token-name")
146
+ )
147
+
148
+ # Updating the integration
149
+ updated_alias = f"{test_alias} Updated"
150
+ update_request = Integration(
151
+ id=found_integration.id,
152
+ project_name=test_project,
153
+ credential_type=CredentialTypes.GIT,
154
+ credential_values=[
155
+ CredentialValues(key="url", value="https://github.com/test/repo-updated"),
156
+ CredentialValues(key="token_name", value="test-token-name-updated"),
157
+ ],
158
+ alias=updated_alias,
159
+ setting_type=setting_type,
160
+ )
161
+
162
+ updated = integration_utils.update_integration(update_request)
163
+
164
+ assert_that(updated, is_not(None))
165
+ sleep(5)
156
166
 
157
- finally:
158
- # Step 5: Clean up - delete created integration
159
- if found:
160
- try:
161
- client.integrations.delete(found.id, setting_type=setting_type)
162
- sleep(5)
163
- # Verify deletion
164
- with pytest.raises(Exception) as exc_info:
165
- client.integrations.get(found.id, setting_type=setting_type)
166
- assert (
167
- "404" in str(exc_info.value)
168
- or "not found" in str(exc_info.value).lower()
169
- )
170
- except Exception as e:
171
- pytest.fail(f"Failed to clean up integration: {str(e)}")
167
+ updated_integration = integration_utils.get_integration_by_alias(
168
+ integration_alias=updated_alias, integration_type=setting_type
169
+ )
170
+ assert_that(
171
+ updated_integration,
172
+ all_of(
173
+ has_property("id", equal_to(found_integration.id)),
174
+ has_property("alias", equal_to(updated_alias)),
175
+ has_property("setting_type", equal_to(setting_type)),
176
+ ),
177
+ )
178
+ assert_that(
179
+ updated_integration.credential_values[0].value,
180
+ equal_to(GITHUB_URL),
181
+ )
182
+ assert_that(
183
+ updated_integration.credential_values[1].value,
184
+ equal_to("test-token-name-updated"),
185
+ )
172
186
 
173
187
 
174
188
  @pytest.mark.parametrize(
175
189
  "setting_type", [IntegrationType.PROJECT, IntegrationType.USER]
176
190
  )
177
- def test_create_integration_invalid_data(
178
- client: CodeMieClient, setting_type: IntegrationType
191
+ def test_create_integration_with_invalid_data(
192
+ integration_utils, setting_type: IntegrationType
179
193
  ):
180
- """Test creating integration with invalid data for both types."""
181
194
  with pytest.raises(Exception):
182
195
  invalid_request = Integration(
183
196
  project_name="", # Invalid - empty project name
@@ -185,26 +198,24 @@ def test_create_integration_invalid_data(
185
198
  credential_values=[], # Invalid - empty credentials
186
199
  setting_type=setting_type,
187
200
  )
188
- client.integrations.create(invalid_request)
201
+ integration_utils.send_create_integration_request(invalid_request)
189
202
 
190
203
 
191
204
  @pytest.mark.parametrize(
192
205
  "setting_type", [IntegrationType.PROJECT, IntegrationType.USER]
193
206
  )
194
- def test_update_integration_invalid_data(
195
- client: CodeMieClient, setting_type: IntegrationType
207
+ def test_update_integration_with_invalid_data(
208
+ search_utils, integration_utils, setting_type: IntegrationType
196
209
  ):
197
- """Test updating integration with invalid data for both types."""
198
- # First, get a valid integration ID
199
- integrations = client.integrations.list(setting_type=setting_type)
200
- assert len(integrations) > 0
201
- test_id = integrations[0].id
210
+ integrations = search_utils.list_integrations(setting_type=setting_type)
211
+ assert_that(integrations, has_length(greater_than(0)))
202
212
 
203
213
  with pytest.raises(Exception):
204
214
  invalid_request = Integration(
215
+ id=integrations[0].id,
205
216
  project_name="", # Invalid - empty project name
206
217
  credential_type=CredentialTypes.GIT,
207
218
  credential_values=[], # Invalid - empty credentials
208
219
  setting_type=setting_type,
209
220
  )
210
- client.integrations.update(test_id, invalid_request)
221
+ integration_utils.update_integration(invalid_request)
@@ -1,20 +1,19 @@
1
- """Integration tests for LLMService."""
2
-
3
- from codemie_sdk import CodeMieClient
1
+ import pytest
4
2
  from codemie_sdk.models.llm import LLMModel
3
+ from hamcrest import (
4
+ assert_that,
5
+ instance_of,
6
+ has_length,
7
+ greater_than,
8
+ all_of,
9
+ )
5
10
 
6
11
 
7
- def test_list_llm_models(client: CodeMieClient):
8
- """Test successful retrieval of available LLM models."""
9
- models = client.llms.list()
10
- assert isinstance(models, list)
11
- assert len(models) > 0
12
- assert all(isinstance(model, LLMModel) for model in models)
13
-
14
-
15
- def test_list_embeddings_models(client: CodeMieClient):
16
- """Test successful retrieval of available embeddings models."""
17
- models = client.llms.list_embeddings()
18
- assert isinstance(models, list)
19
- assert len(models) > 0
20
- assert all(isinstance(model, LLMModel) for model in models)
12
+ @pytest.mark.parametrize(
13
+ "model_list_function", ["list_llm_models", "list_embedding_llm_models"]
14
+ )
15
+ def test_list_available_models(llm_utils, model_list_function):
16
+ models = getattr(llm_utils, model_list_function)()
17
+ assert_that(models, all_of(instance_of(list), has_length(greater_than(0))))
18
+ for model in models:
19
+ assert_that(model, instance_of(LLMModel))
@@ -1,14 +1,19 @@
1
- """Integration tests for TaskService."""
2
-
3
1
  import time
4
2
  import uuid
5
3
  from datetime import datetime
6
4
 
7
5
  import pytest
6
+ from hamcrest import (
7
+ assert_that,
8
+ has_length,
9
+ greater_than,
10
+ equal_to,
11
+ is_in,
12
+ contains_string,
13
+ is_not,
14
+ )
8
15
 
9
- from codemie_sdk import CodeMieClient
10
16
  from codemie_sdk.models.assistant import (
11
- AssistantCreateRequest,
12
17
  AssistantChatRequest,
13
18
  ToolKitDetails,
14
19
  ToolDetails,
@@ -16,136 +21,119 @@ from codemie_sdk.models.assistant import (
16
21
  ChatRole,
17
22
  )
18
23
  from codemie_sdk.models.task import BackgroundTaskEntity
19
- from codemie_test_harness.tests import PROJECT, LANGFUSE_TRACES_ENABLED
20
- from codemie_test_harness.tests.utils.base_utils import get_random_name
24
+ from codemie_test_harness.tests import PROJECT
21
25
 
22
26
 
23
- def test_background_task_flow(client: CodeMieClient, default_llm):
24
- """Test the complete flow of a background task with assistant chat."""
25
- # Step 1: Get available toolkits and tools
26
- toolkits = client.assistants.get_tools()
27
- assert len(toolkits) > 0, "At least one toolkit is required for testing"
27
+ def test_run_flow_of_background_task(assistant_utils, default_llm):
28
+ toolkits = assistant_utils.get_assistant_tools()
29
+ assert_that(
30
+ toolkits,
31
+ has_length(greater_than(0)),
32
+ "At least one toolkit is required for testing",
33
+ )
28
34
 
29
35
  first_toolkit = toolkits[0]
30
- assert len(first_toolkit.tools) > 0, "No tools in the first toolkit"
36
+ assert_that(
37
+ first_toolkit.tools,
38
+ has_length(greater_than(0)),
39
+ "No tools in the first toolkit",
40
+ )
31
41
  first_tool = first_toolkit.tools[0]
32
42
 
33
- # Step 2: Create assistant for testing
34
- assistant_project = PROJECT
35
- assistant_name = get_random_name()
36
- request = AssistantCreateRequest(
37
- name=assistant_name,
38
- slug=assistant_name,
39
- description="Integration test assistant for background tasks",
40
- system_prompt="You are a helpful integration test assistant. Please provide detailed responses.",
43
+ created = assistant_utils.create_assistant(
41
44
  llm_model_type=default_llm.base_name,
42
- project=assistant_project,
43
45
  toolkits=[
44
46
  ToolKitDetails(
45
47
  toolkit=first_toolkit.toolkit, tools=[ToolDetails(name=first_tool.name)]
46
48
  )
47
49
  ],
50
+ system_prompt="You are a helpful integration test assistant. Please provide detailed responses.",
51
+ description="Integration test assistant for background tasks",
48
52
  )
53
+ filters = {"project": PROJECT, "shared": False}
54
+ assistants = assistant_utils.get_assistants(minimal_response=True, filters=filters)
55
+ found_assistant = next((a for a in assistants if a.name == created.name), None)
56
+
57
+ # Start a chat in background mode with a complex question
58
+ complex_question = """
59
+ Please provide a detailed analysis of software architecture patterns, including:
60
+ 1. Monolithic Architecture
61
+ 2. Microservices Architecture
62
+ 3. Event-Driven Architecture
63
+ 4. Layered Architecture
64
+ 5. Space-Based Architecture
65
+
66
+ For each pattern, include:
67
+ - Definition
68
+ - Key characteristics
69
+ - Advantages and disadvantages
70
+ - Best use cases
71
+ - Implementation challenges
72
+ - Real-world examples
73
+ """
74
+
75
+ chat_request = AssistantChatRequest(
76
+ text=complex_question,
77
+ conversation_id=str(uuid.uuid4()),
78
+ history=[
79
+ ChatMessage(
80
+ role=ChatRole.USER,
81
+ message="Hi, I need help with software architecture",
82
+ ),
83
+ ChatMessage(
84
+ role=ChatRole.ASSISTANT,
85
+ message="Of course! I'd be happy to help with software architecture. What would you like to know?",
86
+ ),
87
+ ],
88
+ stream=False,
89
+ background_task=True, # Enable background mode
90
+ )
91
+
92
+ response = assistant_utils.send_chat_request(
93
+ assistant=found_assistant, request=chat_request
94
+ )
95
+
96
+ assert_that(response.task_id, is_not(None))
97
+
98
+ # Poll task status until completion
99
+ max_attempts = 30 # Maximum number of polling attempts
100
+ polling_interval = 2 # Seconds between polling attempts
101
+ task_id = response.task_id
102
+ task_completed = False
103
+
104
+ for _ in range(max_attempts):
105
+ task = assistant_utils.get_tasks(task_id)
106
+ assert_that(isinstance(task, BackgroundTaskEntity))
107
+ assert_that(task.id, equal_to(task_id))
108
+ assert_that(isinstance(task.date, datetime))
109
+ assert_that(isinstance(task.update_date, datetime))
110
+ assert_that(task.status, is_in(["STARTED", "COMPLETED", "FAILED"]))
111
+ assert_that(task.user, is_not(None))
112
+ assert_that(task.task, is_not(None))
113
+
114
+ if task.status == "COMPLETED":
115
+ task_completed = True
116
+ assert_that(len(task.final_output), greater_than(0))
117
+
118
+ # The response should contain architecture patterns
119
+ assert_that(task.final_output, contains_string("Monolithic"))
120
+ assert_that(task.final_output, contains_string("Microservices"))
121
+ break
122
+ elif task.status == "FAILED":
123
+ pytest.fail(f"Task failed with output: {task.final_output}")
124
+
125
+ time.sleep(polling_interval)
126
+
127
+ assert_that(
128
+ task_completed,
129
+ equal_to(True),
130
+ "Task did not complete within the expected time",
131
+ )
132
+
49
133
 
50
- # Create assistant
51
- client.assistants.create(request)
52
- filters = {"project": assistant_project, "shared": False}
53
- assistants = client.assistants.list(minimal_response=True, filters=filters)
54
- found_assistant = next((a for a in assistants if a.name == assistant_name), None)
55
-
56
- try:
57
- # Step 3: Start a chat in background mode with a complex question
58
- complex_question = """
59
- Please provide a detailed analysis of software architecture patterns, including:
60
- 1. Monolithic Architecture
61
- 2. Microservices Architecture
62
- 3. Event-Driven Architecture
63
- 4. Layered Architecture
64
- 5. Space-Based Architecture
65
-
66
- For each pattern, include:
67
- - Definition
68
- - Key characteristics
69
- - Advantages and disadvantages
70
- - Best use cases
71
- - Implementation challenges
72
- - Real-world examples
73
- """
74
-
75
- chat_request = AssistantChatRequest(
76
- text=complex_question,
77
- conversation_id=str(uuid.uuid4()),
78
- history=[
79
- ChatMessage(
80
- role=ChatRole.USER,
81
- message="Hi, I need help with software architecture",
82
- ),
83
- ChatMessage(
84
- role=ChatRole.ASSISTANT,
85
- message="Of course! I'd be happy to help with software architecture. What would you like to know?",
86
- ),
87
- ],
88
- stream=False,
89
- background_task=True, # Enable background mode
90
- metadata={"langfuse_traces_enabled": LANGFUSE_TRACES_ENABLED},
91
- )
92
-
93
- # Send chat request
94
- response = client.assistants.chat(
95
- assistant_id=found_assistant.id, request=chat_request
96
- )
97
- # Verify response contains task ID
98
- assert response is not None
99
- assert response.task_id is not None
100
-
101
- # Step 4: Poll task status until completion
102
- max_attempts = 30 # Maximum number of polling attempts
103
- polling_interval = 2 # Seconds between polling attempts
104
- task_id = response.task_id
105
- task_completed = False
106
-
107
- for _ in range(max_attempts):
108
- # Get task status
109
- task = client.tasks.get(task_id)
110
- assert isinstance(task, BackgroundTaskEntity)
111
-
112
- # Verify task structure
113
- assert task.id == task_id
114
- assert isinstance(task.date, datetime)
115
- assert isinstance(task.update_date, datetime)
116
- assert task.status in ["STARTED", "COMPLETED", "FAILED"]
117
- assert task.user is not None
118
- assert task.task is not None
119
-
120
- if task.status == "COMPLETED":
121
- task_completed = True
122
- # Verify task output
123
- assert task.final_output is not None
124
- assert len(task.final_output) > 0
125
- # The response should contain architecture patterns
126
- assert "Monolithic" in task.final_output
127
- assert "Microservices" in task.final_output
128
- break
129
- elif task.status == "FAILED":
130
- pytest.fail(f"Task failed with output: {task.final_output}")
131
-
132
- time.sleep(polling_interval)
133
-
134
- assert task_completed, "Task did not complete within the expected time"
135
-
136
- finally:
137
- # Clean up - delete created assistant
138
- if found_assistant:
139
- try:
140
- client.assistants.delete(found_assistant.id)
141
- except Exception as e:
142
- pytest.fail(f"Failed to clean up assistant: {str(e)}")
143
-
144
-
145
- @pytest.mark.skip(reason="Need to fix API to return 404")
146
- def test_get_task_not_found(client: CodeMieClient):
134
+ def test_get_task_not_found(assistant_utils):
147
135
  """Test getting a non-existent task."""
148
136
  with pytest.raises(Exception) as exc_info:
149
- client.tasks.get("non-existent-task-id")
137
+ assistant_utils.get_tasks("non-existent-task-id")
150
138
  # assert "404" in str(exc_info.value) or "not found" in str(exc_info.value).lower(), Need to fix API to produce error
151
- assert "503" in str(exc_info.value)
139
+ assert_that(str(exc_info.value), contains_string("500"))