codemie-sdk-python 0.1.92__py3-none-any.whl → 0.1.258__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of codemie-sdk-python might be problematic. Click here for more details.

Files changed (33) hide show
  1. codemie_sdk/__init__.py +114 -2
  2. codemie_sdk/auth/credentials.py +5 -4
  3. codemie_sdk/client/client.py +66 -5
  4. codemie_sdk/models/assistant.py +111 -8
  5. codemie_sdk/models/conversation.py +169 -0
  6. codemie_sdk/models/datasource.py +80 -1
  7. codemie_sdk/models/file_operation.py +25 -0
  8. codemie_sdk/models/integration.py +3 -1
  9. codemie_sdk/models/vendor_assistant.py +187 -0
  10. codemie_sdk/models/vendor_guardrail.py +152 -0
  11. codemie_sdk/models/vendor_knowledgebase.py +151 -0
  12. codemie_sdk/models/vendor_workflow.py +145 -0
  13. codemie_sdk/models/workflow.py +1 -1
  14. codemie_sdk/models/workflow_execution_payload.py +21 -0
  15. codemie_sdk/models/workflow_state.py +6 -3
  16. codemie_sdk/models/workflow_thoughts.py +26 -0
  17. codemie_sdk/services/assistant.py +220 -1
  18. codemie_sdk/services/conversation.py +90 -0
  19. codemie_sdk/services/datasource.py +67 -0
  20. codemie_sdk/services/files.py +82 -0
  21. codemie_sdk/services/vendor_assistant.py +364 -0
  22. codemie_sdk/services/vendor_guardrail.py +375 -0
  23. codemie_sdk/services/vendor_knowledgebase.py +270 -0
  24. codemie_sdk/services/vendor_workflow.py +330 -0
  25. codemie_sdk/services/webhook.py +41 -0
  26. codemie_sdk/services/workflow.py +26 -2
  27. codemie_sdk/services/workflow_execution.py +54 -6
  28. codemie_sdk/utils/http.py +43 -16
  29. codemie_sdk_python-0.1.258.dist-info/METADATA +1404 -0
  30. codemie_sdk_python-0.1.258.dist-info/RECORD +45 -0
  31. codemie_sdk_python-0.1.92.dist-info/METADATA +0 -892
  32. codemie_sdk_python-0.1.92.dist-info/RECORD +0 -30
  33. {codemie_sdk_python-0.1.92.dist-info → codemie_sdk_python-0.1.258.dist-info}/WHEEL +0 -0
@@ -0,0 +1,145 @@
1
+ """Models for vendor workflow settings."""
2
+
3
+ from datetime import datetime
4
+ from enum import Enum
5
+ from typing import Optional, List
6
+
7
+ from pydantic import BaseModel, ConfigDict, Field
8
+
9
+ from .vendor_assistant import PaginationInfo, TokenPagination
10
+
11
+
12
+ class VendorWorkflowSetting(BaseModel):
13
+ """Model representing a vendor workflow setting."""
14
+
15
+ model_config = ConfigDict(extra="ignore")
16
+
17
+ setting_id: str = Field(..., description="Unique identifier for the setting")
18
+ setting_name: str = Field(..., description="Name of the setting")
19
+ project: str = Field(..., description="Project associated with the setting")
20
+ entities: List[str] = Field(
21
+ default_factory=list, description="List of entities associated with the setting"
22
+ )
23
+ invalid: Optional[bool] = Field(None, description="Whether the setting is invalid")
24
+ error: Optional[str] = Field(
25
+ None, description="Error message if the setting is invalid"
26
+ )
27
+
28
+
29
+ class VendorWorkflowSettingsResponse(BaseModel):
30
+ """Response model for vendor workflow settings list."""
31
+
32
+ model_config = ConfigDict(extra="ignore")
33
+
34
+ data: List[VendorWorkflowSetting] = Field(
35
+ ..., description="List of vendor workflow settings"
36
+ )
37
+ pagination: PaginationInfo = Field(..., description="Pagination information")
38
+
39
+
40
+ class VendorWorkflowStatus(str, Enum):
41
+ """Status of vendor workflow."""
42
+
43
+ PREPARED = "PREPARED"
44
+ NOT_PREPARED = "NOT_PREPARED"
45
+
46
+
47
+ class VendorWorkflow(BaseModel):
48
+ """Model representing a vendor workflow."""
49
+
50
+ model_config = ConfigDict(extra="ignore")
51
+
52
+ id: str = Field(..., description="Unique identifier for the workflow")
53
+ name: str = Field(..., description="Name of the workflow")
54
+ status: VendorWorkflowStatus = Field(..., description="Status of the workflow")
55
+ description: Optional[str] = Field(None, description="Description of the workflow")
56
+ version: str = Field(..., description="Version of the workflow")
57
+ createdAt: datetime = Field(
58
+ ..., description="Creation timestamp", alias="createdAt"
59
+ )
60
+ updatedAt: datetime = Field(
61
+ ..., description="Last update timestamp", alias="updatedAt"
62
+ )
63
+
64
+
65
+ class VendorWorkflowsResponse(BaseModel):
66
+ """Response model for vendor workflows list."""
67
+
68
+ model_config = ConfigDict(extra="ignore")
69
+
70
+ data: List[VendorWorkflow] = Field(..., description="List of vendor workflows")
71
+ pagination: TokenPagination = Field(
72
+ ..., description="Token-based pagination information"
73
+ )
74
+
75
+
76
+ class VendorWorkflowAlias(BaseModel):
77
+ """Model representing a vendor workflow alias."""
78
+
79
+ model_config = ConfigDict(extra="ignore")
80
+
81
+ id: str = Field(..., description="Unique identifier for the alias")
82
+ name: str = Field(..., description="Name of the alias")
83
+ status: VendorWorkflowStatus = Field(..., description="Status of the alias")
84
+ description: Optional[str] = Field(None, description="Description of the alias")
85
+ version: str = Field(..., description="Version of the alias")
86
+ createdAt: datetime = Field(
87
+ ..., description="Creation timestamp", alias="createdAt"
88
+ )
89
+ updatedAt: datetime = Field(
90
+ ..., description="Last update timestamp", alias="updatedAt"
91
+ )
92
+ aiRunId: Optional[str] = Field(
93
+ None, description="AI run ID if the alias is installed", alias="aiRunId"
94
+ )
95
+
96
+
97
+ class VendorWorkflowAliasesResponse(BaseModel):
98
+ """Response model for vendor workflow aliases list."""
99
+
100
+ model_config = ConfigDict(extra="ignore")
101
+
102
+ data: List[VendorWorkflowAlias] = Field(
103
+ ..., description="List of vendor workflow aliases"
104
+ )
105
+ pagination: TokenPagination = Field(
106
+ ..., description="Token-based pagination information"
107
+ )
108
+
109
+
110
+ class VendorWorkflowInstallRequest(BaseModel):
111
+ """Model for a single workflow installation request."""
112
+
113
+ model_config = ConfigDict(extra="ignore")
114
+
115
+ id: str = Field(..., description="Workflow ID to install")
116
+ flowAliasId: str = Field(..., description="Flow alias ID to use for the workflow")
117
+ setting_id: str = Field(..., description="Vendor setting ID")
118
+
119
+
120
+ class VendorWorkflowInstallSummary(BaseModel):
121
+ """Model for workflow installation summary."""
122
+
123
+ model_config = ConfigDict(extra="ignore")
124
+
125
+ flowId: str = Field(..., description="Installed workflow ID")
126
+ flowAliasId: str = Field(..., description="Flow alias ID used for installation")
127
+ aiRunId: str = Field(..., description="AI run ID for the installation")
128
+
129
+
130
+ class VendorWorkflowInstallResponse(BaseModel):
131
+ """Response model for workflow installation."""
132
+
133
+ model_config = ConfigDict(extra="ignore")
134
+
135
+ summary: List[VendorWorkflowInstallSummary] = Field(
136
+ ..., description="List of installation summaries"
137
+ )
138
+
139
+
140
+ class VendorWorkflowUninstallResponse(BaseModel):
141
+ """Response model for workflow uninstallation."""
142
+
143
+ model_config = ConfigDict(extra="ignore")
144
+
145
+ success: bool = Field(..., description="Whether the uninstallation was successful")
@@ -84,6 +84,6 @@ class WorkflowExecution(BaseModel):
84
84
  status: ExecutionStatus = Field(alias="overall_status")
85
85
  created_date: datetime = Field(alias="date")
86
86
  prompt: str
87
- updated_date: Optional[datetime] = None
87
+ updated_date: Optional[datetime] = Field(alias="update_date")
88
88
  created_by: User
89
89
  tokens_usage: Optional[TokensUsage] = None
@@ -0,0 +1,21 @@
1
+ """Workflow execution payload models."""
2
+
3
+ from typing import Optional, Union
4
+ from pydantic import BaseModel, ConfigDict, Field
5
+
6
+
7
+ class WorkflowExecutionCreateRequest(BaseModel):
8
+ """Request model for workflow execution creation."""
9
+
10
+ model_config = ConfigDict(populate_by_name=True)
11
+
12
+ user_input: Optional[Union[str, dict, list, int, float, bool]] = Field(
13
+ None, description="User input for the workflow execution"
14
+ )
15
+ file_name: Optional[str] = Field(
16
+ None, description="File name associated with the workflow execution"
17
+ )
18
+ propagate_headers: bool = Field(
19
+ default=False,
20
+ description="Enable propagation of X-* HTTP headers to MCP servers during tool execution",
21
+ )
@@ -2,7 +2,7 @@
2
2
 
3
3
  from datetime import datetime
4
4
  from enum import Enum
5
- from typing import Optional
5
+ from typing import List, Optional
6
6
 
7
7
  from pydantic import BaseModel, ConfigDict
8
8
 
@@ -24,9 +24,11 @@ class WorkflowExecutionStateThought(BaseModel):
24
24
  model_config = ConfigDict(populate_by_name=True)
25
25
 
26
26
  id: str
27
- text: str
28
- created_at: datetime
27
+ execution_state_id: str
29
28
  parent_id: Optional[str] = None
29
+ author_name: str
30
+ author_type: str
31
+ date: datetime
30
32
 
31
33
 
32
34
  class WorkflowExecutionState(BaseModel):
@@ -41,6 +43,7 @@ class WorkflowExecutionState(BaseModel):
41
43
  status: WorkflowExecutionStatusEnum = WorkflowExecutionStatusEnum.NOT_STARTED
42
44
  started_at: Optional[datetime] = None
43
45
  completed_at: Optional[datetime] = None
46
+ thoughts: Optional[List[WorkflowExecutionStateThought]] = None
44
47
 
45
48
 
46
49
  class WorkflowExecutionStateOutput(BaseModel):
@@ -0,0 +1,26 @@
1
+ """Workflow execution thoughts models."""
2
+
3
+ from datetime import datetime
4
+ from typing import List, Optional
5
+
6
+ from pydantic import BaseModel, ConfigDict
7
+
8
+
9
+ class WorkflowExecutionThought(BaseModel):
10
+ """Model for workflow execution thought."""
11
+
12
+ model_config = ConfigDict(populate_by_name=True)
13
+
14
+ id: str
15
+ execution_state_id: str
16
+ parent_id: Optional[str] = None
17
+ author_name: str
18
+ author_type: str
19
+ input_text: str
20
+ content: str
21
+ date: datetime
22
+ children: List["WorkflowExecutionThought"] = []
23
+
24
+
25
+ # Update forward references for recursive model
26
+ WorkflowExecutionThought.model_rebuild()
@@ -1,5 +1,6 @@
1
1
  """Assistant service implementation."""
2
2
 
3
+ import inspect
3
4
  import json
4
5
  from pathlib import Path
5
6
  from typing import List, Union, Optional, Dict, Any, Literal
@@ -169,22 +170,163 @@ class AssistantService:
169
170
  """
170
171
  return self._api.get(f"/v1/assistants/prebuilt/{slug}", Assistant)
171
172
 
173
+ def list_versions(
174
+ self, assistant_id: str, page: int = 0, per_page: Optional[int] = None
175
+ ):
176
+ """List assistant versions.
177
+
178
+ Args:
179
+ assistant_id: Assistant identifier
180
+ page: Page number for pagination
181
+ per_page: Items per page (optional). If not provided, backend defaults are used.
182
+
183
+ Returns:
184
+ List of AssistantVersion objects
185
+ """
186
+
187
+ params: Dict[str, Any] = {"page": page}
188
+ if per_page is not None:
189
+ params["per_page"] = per_page
190
+ from ..models.assistant import AssistantVersion
191
+
192
+ raw = self._api.get(
193
+ f"/v1/assistants/{assistant_id}/versions",
194
+ dict,
195
+ params=params,
196
+ wrap_response=False,
197
+ )
198
+ items = []
199
+ if isinstance(raw, list):
200
+ items = raw
201
+ elif isinstance(raw, dict):
202
+ items = raw.get("data") or raw.get("versions") or []
203
+ else:
204
+ items = []
205
+ return [AssistantVersion.model_validate(it) for it in items]
206
+
207
+ def get_version(self, assistant_id: str, version_number: int):
208
+ """Get a specific assistant version by number.
209
+
210
+ Args:
211
+ assistant_id: Assistant identifier
212
+ version_number: Version number to retrieve
213
+
214
+ Returns:
215
+ AssistantVersion object
216
+ """
217
+ from ..models.assistant import AssistantVersion
218
+
219
+ raw = self._api.get(
220
+ f"/v1/assistants/{assistant_id}/versions/{version_number}", AssistantVersion
221
+ )
222
+ if isinstance(raw, dict):
223
+ return AssistantVersion.model_validate(raw)
224
+ return raw
225
+
226
+ def compare_versions(self, assistant_id: str, v1: int, v2: int) -> Dict[str, Any]:
227
+ """Compare two assistant versions and return diff summary.
228
+
229
+ Args:
230
+ assistant_id: Assistant identifier
231
+ v1: First version number
232
+ v2: Second version number
233
+
234
+ Returns:
235
+ Generic dictionary with comparison result (diff, summary, etc.)
236
+ """
237
+ return self._api.get(
238
+ f"/v1/assistants/{assistant_id}/versions/{v1}/compare/{v2}",
239
+ dict,
240
+ )
241
+
242
+ def rollback_to_version(
243
+ self, assistant_id: str, version_number: int, change_notes: Optional[str] = None
244
+ ) -> dict:
245
+ """Rollback assistant to a specific version. Creates a new version mirroring target.
246
+
247
+ Args:
248
+ assistant_id: Assistant identifier
249
+ version_number: Target version to rollback to
250
+ change_notes: Optional description of why rollback is performed
251
+
252
+ Returns:
253
+ Backend response (dict)
254
+ """
255
+ payload: Dict[str, Any] = {}
256
+ if change_notes:
257
+ payload["change_notes"] = change_notes
258
+ try:
259
+ return self._api.post(
260
+ f"/v1/assistants/{assistant_id}/versions/{version_number}/rollback",
261
+ dict,
262
+ json_data=payload,
263
+ )
264
+ except requests.HTTPError as err:
265
+ try:
266
+ assistant = self.get(assistant_id)
267
+ version = self.get_version(assistant_id, version_number)
268
+
269
+ update_req = AssistantUpdateRequest(
270
+ name=assistant.name,
271
+ description=assistant.description or "",
272
+ system_prompt=version.system_prompt,
273
+ project=assistant.project,
274
+ llm_model_type=version.llm_model_type or assistant.llm_model_type,
275
+ temperature=version.temperature
276
+ if hasattr(version, "temperature")
277
+ else assistant.temperature,
278
+ top_p=version.top_p
279
+ if hasattr(version, "top_p")
280
+ else assistant.top_p,
281
+ context=version.context
282
+ if hasattr(version, "context")
283
+ else assistant.context,
284
+ toolkits=version.toolkits
285
+ if hasattr(version, "toolkits")
286
+ else assistant.toolkits,
287
+ user_prompts=assistant.user_prompts,
288
+ shared=assistant.shared,
289
+ is_react=assistant.is_react,
290
+ is_global=assistant.is_global,
291
+ slug=assistant.slug,
292
+ mcp_servers=version.mcp_servers
293
+ if hasattr(version, "mcp_servers")
294
+ else assistant.mcp_servers,
295
+ assistant_ids=version.assistant_ids
296
+ if hasattr(version, "assistant_ids")
297
+ else assistant.assistant_ids,
298
+ )
299
+ resp = self.update(assistant_id, update_req)
300
+ resp["_rollback_fallback"] = True
301
+ resp["_target_version"] = version_number
302
+ if change_notes:
303
+ resp["change_notes"] = change_notes
304
+ return resp
305
+ except Exception:
306
+ raise err
307
+
172
308
  def chat(
173
309
  self,
174
310
  assistant_id: str,
175
311
  request: AssistantChatRequest,
312
+ headers: Optional[Dict[str, str]] = None,
176
313
  ) -> Union[requests.Response, BaseModelResponse]:
177
314
  """Send a chat request to an assistant.
178
315
 
179
316
  Args:
180
317
  assistant_id: ID of the assistant to chat with
181
318
  request: Chat request details
319
+ headers: Optional additional HTTP headers (e.g., X-* for MCP propagation)
182
320
 
183
321
  Returns:
184
322
  Chat response or streaming response
185
323
  """
186
324
  pydantic_schema = None
187
- if issubclass(request.output_schema, BaseModel):
325
+ if (
326
+ request.output_schema is not None
327
+ and inspect.isclass(request.output_schema)
328
+ and issubclass(request.output_schema, BaseModel)
329
+ ):
188
330
  pydantic_schema = deepcopy(request.output_schema)
189
331
  request.output_schema = request.output_schema.model_json_schema()
190
332
 
@@ -193,6 +335,7 @@ class AssistantService:
193
335
  BaseModelResponse,
194
336
  json_data=request.model_dump(exclude_none=True, by_alias=True),
195
337
  stream=request.stream,
338
+ extra_headers=headers,
196
339
  )
197
340
  if not request.stream and pydantic_schema:
198
341
  # we do conversion to the BaseModel here because self._parse_response don't see actual request model,
@@ -201,6 +344,82 @@ class AssistantService:
201
344
 
202
345
  return response
203
346
 
347
+ def chat_with_version(
348
+ self,
349
+ assistant_id: str,
350
+ version_number: int,
351
+ request: AssistantChatRequest,
352
+ ) -> Union[requests.Response, BaseModelResponse]:
353
+ """Send a chat request to a specific assistant version.
354
+
355
+ Uses the stable chat endpoint with an explicit `version` parameter to
356
+ ensure compatibility with environments that don't expose
357
+ /versions/{version}/model.
358
+
359
+ Args:
360
+ assistant_id: ID of the assistant to chat with
361
+ version_number: version to pin chat to
362
+ request: Chat request details
363
+
364
+ Returns:
365
+ Chat response or streaming response
366
+ """
367
+ pydantic_schema = None
368
+ if issubclass(request.output_schema, BaseModel):
369
+ pydantic_schema = deepcopy(request.output_schema)
370
+ request.output_schema = request.output_schema.model_json_schema()
371
+
372
+ payload = request.model_dump(exclude_none=True, by_alias=True)
373
+ payload["version"] = version_number
374
+
375
+ response = self._api.post(
376
+ f"/v1/assistants/{assistant_id}/model",
377
+ BaseModelResponse,
378
+ json_data=payload,
379
+ stream=request.stream,
380
+ )
381
+ if not request.stream and pydantic_schema:
382
+ response.generated = pydantic_schema.model_validate(response.generated)
383
+
384
+ return response
385
+
386
+ def chat_by_slug(
387
+ self,
388
+ assistant_slug: str,
389
+ request: AssistantChatRequest,
390
+ headers: Optional[Dict[str, str]] = None,
391
+ ) -> Union[requests.Response, BaseModelResponse]:
392
+ """Send a chat request to an assistant by slug.
393
+
394
+ Args:
395
+ assistant_slug: Slug of the assistant to chat with
396
+ request: Chat request details
397
+ headers: Optional additional HTTP headers (e.g., X-* for MCP propagation)
398
+
399
+ Returns:
400
+ Chat response or streaming response
401
+ """
402
+ pydantic_schema = None
403
+ if (
404
+ request.output_schema is not None
405
+ and inspect.isclass(request.output_schema)
406
+ and issubclass(request.output_schema, BaseModel)
407
+ ):
408
+ pydantic_schema = deepcopy(request.output_schema)
409
+ request.output_schema = request.output_schema.model_json_schema()
410
+
411
+ response = self._api.post(
412
+ f"/v1/assistants/slug/{assistant_slug}/model",
413
+ BaseModelResponse,
414
+ json_data=request.model_dump(exclude_none=True, by_alias=True),
415
+ stream=request.stream,
416
+ extra_headers=headers,
417
+ )
418
+ if not request.stream and pydantic_schema:
419
+ response.generated = pydantic_schema.model_validate(response.generated)
420
+
421
+ return response
422
+
204
423
  def upload_file_to_chat(self, file_path: Path):
205
424
  """Upload a file to assistant chat and return the response containing file_url."""
206
425
 
@@ -0,0 +1,90 @@
1
+ """Conversation service implementation."""
2
+
3
+ from typing import List
4
+
5
+ from ..models.conversation import (
6
+ Conversation,
7
+ ConversationDetails,
8
+ ConversationCreateRequest,
9
+ )
10
+ from ..utils import ApiRequestHandler
11
+
12
+
13
+ class ConversationService:
14
+ """Service for managing user conversations."""
15
+
16
+ def __init__(self, api_domain: str, token: str, verify_ssl: bool = True):
17
+ """Initialize the conversation service.
18
+
19
+ Args:
20
+ api_domain: Base URL for the API
21
+ token: Authentication token
22
+ verify_ssl: Whether to verify SSL certificates
23
+ """
24
+ self._api = ApiRequestHandler(api_domain, token, verify_ssl)
25
+
26
+ def list(self) -> List[Conversation]:
27
+ """Get list of all conversations for the current user.
28
+
29
+ Returns:
30
+ List of all conversations for the current user.
31
+ """
32
+ return self._api.get("/v1/conversations", List[Conversation])
33
+
34
+ def list_by_assistant_id(self, assistant_id: str) -> List[Conversation]:
35
+ """Get list of all conversations for the current user that include the specified assistant.
36
+
37
+ Args:
38
+ assistant_id: Assistant ID
39
+
40
+ Returns:
41
+ List of conversations for the specified assistant.
42
+ """
43
+ return [
44
+ conv
45
+ for conv in self._api.get("/v1/conversations", List[Conversation])
46
+ if assistant_id in conv.assistant_ids
47
+ ]
48
+
49
+ def get_conversation(self, conversation_id: str) -> ConversationDetails:
50
+ """Get details for a specific conversation by its ID.
51
+
52
+ Args:
53
+ conversation_id: Conversation ID
54
+
55
+ Returns:
56
+ Conversation details
57
+ """
58
+ return self._api.get(
59
+ f"/v1/conversations/{conversation_id}",
60
+ ConversationDetails,
61
+ )
62
+
63
+ def create(self, request: ConversationCreateRequest) -> dict:
64
+ """Create a new conversation.
65
+
66
+ Args:
67
+ request: Conversation creation request
68
+
69
+ Returns:
70
+ Created conversation details
71
+ """
72
+ return self._api.post(
73
+ "/v1/conversations",
74
+ dict,
75
+ json_data=request.model_dump(exclude_none=True),
76
+ )
77
+
78
+ def delete(self, conversation_id: str) -> dict:
79
+ """Delete a specific conversation by its ID.
80
+
81
+ Args:
82
+ conversation_id: Conversation ID to delete
83
+
84
+ Returns:
85
+ Deletion confirmation
86
+ """
87
+ return self._api.delete(
88
+ f"/v1/conversations/{conversation_id}",
89
+ dict,
90
+ )
@@ -15,7 +15,11 @@ from ..models.datasource import (
15
15
  UpdateCodeDataSourceRequest,
16
16
  BaseUpdateDataSourceRequest,
17
17
  FileDataSourceRequest,
18
+ CodeAnalysisDataSourceRequest,
19
+ CodeExplorationDataSourceRequest,
20
+ ElasticsearchStatsResponse,
18
21
  )
22
+ from ..models.assistant import AssistantListResponse
19
23
  from ..utils import ApiRequestHandler
20
24
 
21
25
 
@@ -206,3 +210,66 @@ class DatasourceService:
206
210
  Deletion confirmation
207
211
  """
208
212
  return self._api.delete(f"/v1/index/{datasource_id}", dict)
213
+
214
+ def get_assistants_using_datasource(
215
+ self, datasource_id: str
216
+ ) -> List[AssistantListResponse]:
217
+ """Get list of assistants that are using this datasource.
218
+
219
+ Args:
220
+ datasource_id: ID of the datasource
221
+
222
+ Returns:
223
+ List of AssistantListResponse objects containing assistants using this datasource
224
+
225
+ Raises:
226
+ ApiError: If the datasource is not found or other API errors occur.
227
+ """
228
+ return self._api.get(
229
+ f"/v1/index/{datasource_id}/assistants", List[AssistantListResponse]
230
+ )
231
+
232
+ def create_provider_datasource(
233
+ self,
234
+ toolkit_id: str,
235
+ provider_name: str,
236
+ request: Union[CodeAnalysisDataSourceRequest, CodeExplorationDataSourceRequest],
237
+ ) -> dict:
238
+ """Create a provider-based datasource.
239
+
240
+ Args:
241
+ toolkit_id: ID of the toolkit to use
242
+ provider_name: Name of the provider
243
+ request: Provider datasource creation request (CodeAnalysisDataSourceRequest or CodeExplorationDataSourceRequest)
244
+
245
+ Returns:
246
+ dict: Response from the server containing operation status
247
+ """
248
+ endpoint = (
249
+ f"/v1/index/provider?toolkit_id={toolkit_id}&provider_name={provider_name}"
250
+ )
251
+
252
+ return self._api.post(
253
+ endpoint,
254
+ dict,
255
+ json_data=request.model_dump(by_alias=True, exclude_none=True),
256
+ )
257
+
258
+ def get_elasticsearch_stats(self, datasource_id: str) -> ElasticsearchStatsResponse:
259
+ """Get Elasticsearch statistics for a specific datasource index.
260
+
261
+ Args:
262
+ datasource_id: ID of the datasource
263
+
264
+ Returns:
265
+ ElasticsearchStatsResponse with Elasticsearch statistics including:
266
+ - index_name: Name of the index in Elasticsearch
267
+ - size_in_bytes: Size of the index in bytes
268
+
269
+ Raises:
270
+ ApiError: If the datasource is not found, platform datasources are not supported,
271
+ or Elasticsearch statistics are not available.
272
+ """
273
+ return self._api.get(
274
+ f"/v1/index/{datasource_id}/elasticsearch", ElasticsearchStatsResponse
275
+ )