codemie-sdk-python 0.1.226__py3-none-any.whl → 0.1.273__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -170,16 +170,153 @@ class AssistantService:
170
170
  """
171
171
  return self._api.get(f"/v1/assistants/prebuilt/{slug}", Assistant)
172
172
 
173
+ def list_versions(
174
+ self, assistant_id: str, page: int = 0, per_page: Optional[int] = None
175
+ ):
176
+ """List assistant versions.
177
+
178
+ Args:
179
+ assistant_id: Assistant identifier
180
+ page: Page number for pagination
181
+ per_page: Items per page (optional). If not provided, backend defaults are used.
182
+
183
+ Returns:
184
+ List of AssistantVersion objects
185
+ """
186
+
187
+ params: Dict[str, Any] = {"page": page}
188
+ if per_page is not None:
189
+ params["per_page"] = per_page
190
+ from ..models.assistant import AssistantVersion
191
+
192
+ raw = self._api.get(
193
+ f"/v1/assistants/{assistant_id}/versions",
194
+ dict,
195
+ params=params,
196
+ wrap_response=False,
197
+ )
198
+ items = []
199
+ if isinstance(raw, list):
200
+ items = raw
201
+ elif isinstance(raw, dict):
202
+ items = raw.get("data") or raw.get("versions") or []
203
+ else:
204
+ items = []
205
+ return [AssistantVersion.model_validate(it) for it in items]
206
+
207
+ def get_version(self, assistant_id: str, version_number: int):
208
+ """Get a specific assistant version by number.
209
+
210
+ Args:
211
+ assistant_id: Assistant identifier
212
+ version_number: Version number to retrieve
213
+
214
+ Returns:
215
+ AssistantVersion object
216
+ """
217
+ from ..models.assistant import AssistantVersion
218
+
219
+ raw = self._api.get(
220
+ f"/v1/assistants/{assistant_id}/versions/{version_number}", AssistantVersion
221
+ )
222
+ if isinstance(raw, dict):
223
+ return AssistantVersion.model_validate(raw)
224
+ return raw
225
+
226
+ def compare_versions(self, assistant_id: str, v1: int, v2: int) -> Dict[str, Any]:
227
+ """Compare two assistant versions and return diff summary.
228
+
229
+ Args:
230
+ assistant_id: Assistant identifier
231
+ v1: First version number
232
+ v2: Second version number
233
+
234
+ Returns:
235
+ Generic dictionary with comparison result (diff, summary, etc.)
236
+ """
237
+ return self._api.get(
238
+ f"/v1/assistants/{assistant_id}/versions/{v1}/compare/{v2}",
239
+ dict,
240
+ )
241
+
242
+ def rollback_to_version(
243
+ self, assistant_id: str, version_number: int, change_notes: Optional[str] = None
244
+ ) -> dict:
245
+ """Rollback assistant to a specific version. Creates a new version mirroring target.
246
+
247
+ Args:
248
+ assistant_id: Assistant identifier
249
+ version_number: Target version to rollback to
250
+ change_notes: Optional description of why rollback is performed
251
+
252
+ Returns:
253
+ Backend response (dict)
254
+ """
255
+ payload: Dict[str, Any] = {}
256
+ if change_notes:
257
+ payload["change_notes"] = change_notes
258
+ try:
259
+ return self._api.post(
260
+ f"/v1/assistants/{assistant_id}/versions/{version_number}/rollback",
261
+ dict,
262
+ json_data=payload,
263
+ )
264
+ except requests.HTTPError as err:
265
+ try:
266
+ assistant = self.get(assistant_id)
267
+ version = self.get_version(assistant_id, version_number)
268
+
269
+ update_req = AssistantUpdateRequest(
270
+ name=assistant.name,
271
+ description=assistant.description or "",
272
+ system_prompt=version.system_prompt,
273
+ project=assistant.project,
274
+ llm_model_type=version.llm_model_type or assistant.llm_model_type,
275
+ temperature=version.temperature
276
+ if hasattr(version, "temperature")
277
+ else assistant.temperature,
278
+ top_p=version.top_p
279
+ if hasattr(version, "top_p")
280
+ else assistant.top_p,
281
+ context=version.context
282
+ if hasattr(version, "context")
283
+ else assistant.context,
284
+ toolkits=version.toolkits
285
+ if hasattr(version, "toolkits")
286
+ else assistant.toolkits,
287
+ user_prompts=assistant.user_prompts,
288
+ shared=assistant.shared,
289
+ is_react=assistant.is_react,
290
+ is_global=assistant.is_global,
291
+ slug=assistant.slug,
292
+ mcp_servers=version.mcp_servers
293
+ if hasattr(version, "mcp_servers")
294
+ else assistant.mcp_servers,
295
+ assistant_ids=version.assistant_ids
296
+ if hasattr(version, "assistant_ids")
297
+ else assistant.assistant_ids,
298
+ )
299
+ resp = self.update(assistant_id, update_req)
300
+ resp["_rollback_fallback"] = True
301
+ resp["_target_version"] = version_number
302
+ if change_notes:
303
+ resp["change_notes"] = change_notes
304
+ return resp
305
+ except Exception:
306
+ raise err
307
+
173
308
  def chat(
174
309
  self,
175
310
  assistant_id: str,
176
311
  request: AssistantChatRequest,
312
+ headers: Optional[Dict[str, str]] = None,
177
313
  ) -> Union[requests.Response, BaseModelResponse]:
178
314
  """Send a chat request to an assistant.
179
315
 
180
316
  Args:
181
317
  assistant_id: ID of the assistant to chat with
182
318
  request: Chat request details
319
+ headers: Optional additional HTTP headers (e.g., X-* for MCP propagation)
183
320
 
184
321
  Returns:
185
322
  Chat response or streaming response
@@ -198,6 +335,7 @@ class AssistantService:
198
335
  BaseModelResponse,
199
336
  json_data=request.model_dump(exclude_none=True, by_alias=True),
200
337
  stream=request.stream,
338
+ extra_headers=headers,
201
339
  )
202
340
  if not request.stream and pydantic_schema:
203
341
  # we do conversion to the BaseModel here because self._parse_response don't see actual request model,
@@ -206,6 +344,82 @@ class AssistantService:
206
344
 
207
345
  return response
208
346
 
347
+ def chat_with_version(
348
+ self,
349
+ assistant_id: str,
350
+ version_number: int,
351
+ request: AssistantChatRequest,
352
+ ) -> Union[requests.Response, BaseModelResponse]:
353
+ """Send a chat request to a specific assistant version.
354
+
355
+ Uses the stable chat endpoint with an explicit `version` parameter to
356
+ ensure compatibility with environments that don't expose
357
+ /versions/{version}/model.
358
+
359
+ Args:
360
+ assistant_id: ID of the assistant to chat with
361
+ version_number: version to pin chat to
362
+ request: Chat request details
363
+
364
+ Returns:
365
+ Chat response or streaming response
366
+ """
367
+ pydantic_schema = None
368
+ if issubclass(request.output_schema, BaseModel):
369
+ pydantic_schema = deepcopy(request.output_schema)
370
+ request.output_schema = request.output_schema.model_json_schema()
371
+
372
+ payload = request.model_dump(exclude_none=True, by_alias=True)
373
+ payload["version"] = version_number
374
+
375
+ response = self._api.post(
376
+ f"/v1/assistants/{assistant_id}/model",
377
+ BaseModelResponse,
378
+ json_data=payload,
379
+ stream=request.stream,
380
+ )
381
+ if not request.stream and pydantic_schema:
382
+ response.generated = pydantic_schema.model_validate(response.generated)
383
+
384
+ return response
385
+
386
+ def chat_by_slug(
387
+ self,
388
+ assistant_slug: str,
389
+ request: AssistantChatRequest,
390
+ headers: Optional[Dict[str, str]] = None,
391
+ ) -> Union[requests.Response, BaseModelResponse]:
392
+ """Send a chat request to an assistant by slug.
393
+
394
+ Args:
395
+ assistant_slug: Slug of the assistant to chat with
396
+ request: Chat request details
397
+ headers: Optional additional HTTP headers (e.g., X-* for MCP propagation)
398
+
399
+ Returns:
400
+ Chat response or streaming response
401
+ """
402
+ pydantic_schema = None
403
+ if (
404
+ request.output_schema is not None
405
+ and inspect.isclass(request.output_schema)
406
+ and issubclass(request.output_schema, BaseModel)
407
+ ):
408
+ pydantic_schema = deepcopy(request.output_schema)
409
+ request.output_schema = request.output_schema.model_json_schema()
410
+
411
+ response = self._api.post(
412
+ f"/v1/assistants/slug/{assistant_slug}/model",
413
+ BaseModelResponse,
414
+ json_data=request.model_dump(exclude_none=True, by_alias=True),
415
+ stream=request.stream,
416
+ extra_headers=headers,
417
+ )
418
+ if not request.stream and pydantic_schema:
419
+ response.generated = pydantic_schema.model_validate(response.generated)
420
+
421
+ return response
422
+
209
423
  def upload_file_to_chat(self, file_path: Path):
210
424
  """Upload a file to assistant chat and return the response containing file_url."""
211
425
 
@@ -15,7 +15,11 @@ from ..models.datasource import (
15
15
  UpdateCodeDataSourceRequest,
16
16
  BaseUpdateDataSourceRequest,
17
17
  FileDataSourceRequest,
18
+ CodeAnalysisDataSourceRequest,
19
+ CodeExplorationDataSourceRequest,
20
+ ElasticsearchStatsResponse,
18
21
  )
22
+ from ..models.assistant import AssistantListResponse
19
23
  from ..utils import ApiRequestHandler
20
24
 
21
25
 
@@ -206,3 +210,66 @@ class DatasourceService:
206
210
  Deletion confirmation
207
211
  """
208
212
  return self._api.delete(f"/v1/index/{datasource_id}", dict)
213
+
214
+ def get_assistants_using_datasource(
215
+ self, datasource_id: str
216
+ ) -> List[AssistantListResponse]:
217
+ """Get list of assistants that are using this datasource.
218
+
219
+ Args:
220
+ datasource_id: ID of the datasource
221
+
222
+ Returns:
223
+ List of AssistantListResponse objects containing assistants using this datasource
224
+
225
+ Raises:
226
+ ApiError: If the datasource is not found or other API errors occur.
227
+ """
228
+ return self._api.get(
229
+ f"/v1/index/{datasource_id}/assistants", List[AssistantListResponse]
230
+ )
231
+
232
+ def create_provider_datasource(
233
+ self,
234
+ toolkit_id: str,
235
+ provider_name: str,
236
+ request: Union[CodeAnalysisDataSourceRequest, CodeExplorationDataSourceRequest],
237
+ ) -> dict:
238
+ """Create a provider-based datasource.
239
+
240
+ Args:
241
+ toolkit_id: ID of the toolkit to use
242
+ provider_name: Name of the provider
243
+ request: Provider datasource creation request (CodeAnalysisDataSourceRequest or CodeExplorationDataSourceRequest)
244
+
245
+ Returns:
246
+ dict: Response from the server containing operation status
247
+ """
248
+ endpoint = (
249
+ f"/v1/index/provider?toolkit_id={toolkit_id}&provider_name={provider_name}"
250
+ )
251
+
252
+ return self._api.post(
253
+ endpoint,
254
+ dict,
255
+ json_data=request.model_dump(by_alias=True, exclude_none=True),
256
+ )
257
+
258
+ def get_elasticsearch_stats(self, datasource_id: str) -> ElasticsearchStatsResponse:
259
+ """Get Elasticsearch statistics for a specific datasource index.
260
+
261
+ Args:
262
+ datasource_id: ID of the datasource
263
+
264
+ Returns:
265
+ ElasticsearchStatsResponse with Elasticsearch statistics including:
266
+ - index_name: Name of the index in Elasticsearch
267
+ - size_in_bytes: Size of the index in bytes
268
+
269
+ Raises:
270
+ ApiError: If the datasource is not found, platform datasources are not supported,
271
+ or Elasticsearch statistics are not available.
272
+ """
273
+ return self._api.get(
274
+ f"/v1/index/{datasource_id}/elasticsearch", ElasticsearchStatsResponse
275
+ )
@@ -59,3 +59,24 @@ class FileOperationService:
59
59
  )
60
60
 
61
61
  return response
62
+
63
+ def get_file(self, file_id: str) -> bytes:
64
+ """Get a file by its ID.
65
+
66
+ Args:
67
+ file_id: The file identifier (base64 encoded ID from file_url)
68
+
69
+ Returns:
70
+ bytes: The file content as binary data
71
+
72
+ Raises:
73
+ ApiError: If the file doesn't exist or there's an API error
74
+ """
75
+ import requests
76
+
77
+ response = self._api.get(
78
+ f"/v1/files/{file_id}",
79
+ response_model=requests.Response,
80
+ wrap_response=False,
81
+ )
82
+ return response.content