litellm-enterprise 0.1.28__py3-none-any.whl → 0.1.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -36,7 +36,7 @@ class EnterpriseRouteChecks:
36
36
  if not premium_user:
37
37
  raise HTTPException(
38
38
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
39
- detail=f"🚨🚨🚨 DISABLING LLM API ENDPOINTS is an Enterprise feature\n🚨 {CommonProxyErrors.not_premium_user.value}",
39
+ detail=f"🚨🚨🚨 DISABLING ADMIN ENDPOINTS is an Enterprise feature\n🚨 {CommonProxyErrors.not_premium_user.value}",
40
40
  )
41
41
 
42
42
  return get_secret_bool("DISABLE_ADMIN_ENDPOINTS") is True
@@ -244,6 +244,78 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
244
244
  return managed_object.created_by == user_id
245
245
  return True # don't raise error if managed object is not found
246
246
 
247
+ async def list_user_batches(
248
+ self,
249
+ user_api_key_dict: UserAPIKeyAuth,
250
+ limit: Optional[int] = None,
251
+ after: Optional[str] = None,
252
+ provider: Optional[str] = None,
253
+ target_model_names: Optional[str] = None,
254
+ llm_router: Optional[Router] = None,
255
+ ) -> Dict[str, Any]:
256
+ # Provider filtering is not supported for managed batches
257
+ # This is because the encoded object ids stored in the managed objects table do not contain the provider information
258
+ # To support provider filtering, we would need to store the provider information in the encoded object ids
259
+ if provider:
260
+ raise Exception(
261
+ "Filtering by 'provider' is not supported when using managed batches."
262
+ )
263
+
264
+ # Model name filtering is not supported for managed batches
265
+ # This is because the encoded object ids stored in the managed objects table do not contain the model name
266
+ # A hash of the model name + litellm_params for the model name is encoded as the model id. This is not sufficient to reliably map the target model names to the model ids.
267
+ if target_model_names:
268
+ raise Exception(
269
+ "Filtering by 'target_model_names' is not supported when using managed batches."
270
+ )
271
+
272
+ where_clause: Dict[str, Any] = {"file_purpose": "batch"}
273
+
274
+ # Filter by user who created the batch
275
+ if user_api_key_dict.user_id:
276
+ where_clause["created_by"] = user_api_key_dict.user_id
277
+
278
+ if after:
279
+ where_clause["id"] = {"gt": after}
280
+
281
+ # Fetch more than needed to allow for post-fetch filtering
282
+ fetch_limit = limit or 20
283
+ if target_model_names:
284
+ # Fetch extra to account for filtering
285
+ fetch_limit = max(fetch_limit * 3, 100)
286
+
287
+ batches = await self.prisma_client.db.litellm_managedobjecttable.find_many(
288
+ where=where_clause,
289
+ take=fetch_limit,
290
+ order={"created_at": "desc"},
291
+ )
292
+
293
+ batch_objects: List[LiteLLMBatch] = []
294
+ for batch in batches:
295
+ try:
296
+ # Stop once we have enough after filtering
297
+ if len(batch_objects) >= (limit or 20):
298
+ break
299
+
300
+ batch_data = json.loads(batch.file_object) if isinstance(batch.file_object, str) else batch.file_object
301
+ batch_obj = LiteLLMBatch(**batch_data)
302
+ batch_obj.id = batch.unified_object_id
303
+ batch_objects.append(batch_obj)
304
+
305
+ except Exception as e:
306
+ verbose_logger.warning(
307
+ f"Failed to parse batch object {batch.unified_object_id}: {e}"
308
+ )
309
+ continue
310
+
311
+ return {
312
+ "object": "list",
313
+ "data": batch_objects,
314
+ "first_id": batch_objects[0].id if batch_objects else None,
315
+ "last_id": batch_objects[-1].id if batch_objects else None,
316
+ "has_more": len(batch_objects) == (limit or 20),
317
+ }
318
+
247
319
  async def get_user_created_file_ids(
248
320
  self, user_api_key_dict: UserAPIKeyAuth, model_object_ids: List[str]
249
321
  ) -> List[OpenAIFileObject]:
@@ -297,6 +369,8 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
297
369
  if (
298
370
  call_type == CallTypes.afile_content.value
299
371
  or call_type == CallTypes.afile_delete.value
372
+ or call_type == CallTypes.afile_retrieve.value
373
+ or call_type == CallTypes.afile_content.value
300
374
  ):
301
375
  await self.check_managed_file_id_access(data, user_api_key_dict)
302
376
 
@@ -361,12 +435,16 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
361
435
  data["model_file_id_mapping"] = model_file_id_mapping
362
436
  elif (
363
437
  call_type == CallTypes.aretrieve_batch.value
438
+ or call_type == CallTypes.acancel_batch.value
364
439
  or call_type == CallTypes.acancel_fine_tuning_job.value
365
440
  or call_type == CallTypes.aretrieve_fine_tuning_job.value
366
441
  ):
367
442
  accessor_key: Optional[str] = None
368
443
  retrieve_object_id: Optional[str] = None
369
- if call_type == CallTypes.aretrieve_batch.value:
444
+ if (
445
+ call_type == CallTypes.aretrieve_batch.value
446
+ or call_type == CallTypes.acancel_batch.value
447
+ ):
370
448
  accessor_key = "batch_id"
371
449
  elif (
372
450
  call_type == CallTypes.acancel_fine_tuning_job.value
@@ -382,6 +460,8 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
382
460
  if retrieve_object_id
383
461
  else False
384
462
  )
463
+ print(f"🔥potential_llm_object_id: {potential_llm_object_id}")
464
+ print(f"🔥retrieve_object_id: {retrieve_object_id}")
385
465
  if potential_llm_object_id and retrieve_object_id:
386
466
  ## VALIDATE USER HAS ACCESS TO THE OBJECT ##
387
467
  if not await self.can_user_call_unified_object_id(
@@ -673,6 +753,7 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
673
753
  bytes=file_objects[0].bytes,
674
754
  filename=file_objects[0].filename,
675
755
  status="uploaded",
756
+ expires_at=file_objects[0].expires_at,
676
757
  )
677
758
 
678
759
  return response
@@ -893,8 +974,10 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
893
974
  delete_response = None
894
975
  specific_model_file_id_mapping = model_file_id_mapping.get(file_id)
895
976
  if specific_model_file_id_mapping:
977
+ # Remove conflicting keys from data to avoid duplicate keyword arguments
978
+ filtered_data = {k: v for k, v in data.items() if k not in ("model", "file_id")}
896
979
  for model_id, model_file_id in specific_model_file_id_mapping.items():
897
- delete_response = await llm_router.afile_delete(model=model_id, file_id=model_file_id, **data) # type: ignore
980
+ delete_response = await llm_router.afile_delete(model=model_id, file_id=model_file_id, **filtered_data) # type: ignore
898
981
 
899
982
  stored_file_object = await self.delete_unified_file_id(
900
983
  file_id, litellm_parent_otel_span
@@ -282,6 +282,8 @@ async def get_vector_store_info(
282
282
  updated_at=vector_store.get("updated_at") or None,
283
283
  litellm_credential_name=vector_store.get("litellm_credential_name"),
284
284
  litellm_params=vector_store.get("litellm_params") or None,
285
+ team_id=vector_store.get("team_id"),
286
+ user_id=vector_store.get("user_id"),
285
287
  )
286
288
  return {"vector_store": vector_store_pydantic_obj}
287
289
 
@@ -1,7 +1,8 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: litellm-enterprise
3
- Version: 0.1.28
3
+ Version: 0.1.29
4
4
  Summary: Package for LiteLLM Enterprise features
5
+ License-File: LICENSE.md
5
6
  Author: BerriAI
6
7
  Requires-Python: >=3.8, !=2.7.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*
7
8
  Classifier: Programming Language :: Python :: 3
@@ -9,6 +10,8 @@ Classifier: Programming Language :: Python :: 3.9
9
10
  Classifier: Programming Language :: Python :: 3.10
10
11
  Classifier: Programming Language :: Python :: 3.11
11
12
  Classifier: Programming Language :: Python :: 3.12
13
+ Classifier: Programming Language :: Python :: 3.13
14
+ Classifier: Programming Language :: Python :: 3.14
12
15
  Project-URL: Documentation, https://docs.litellm.ai
13
16
  Project-URL: Homepage, https://litellm.ai
14
17
  Project-URL: Repository, https://github.com/BerriAI/litellm
@@ -110,24 +110,24 @@ litellm_enterprise/proxy/__init__.py,sha256=mnCBJ6Y98ai1RfVpYpCxavTPSTmdZ0e-kxX1
110
110
  litellm_enterprise/proxy/audit_logging_endpoints.py,sha256=BnHczmi4bnW1GpMNsq4CvnbwL3rgQ-pnrtFd5WBbbHY,5304
111
111
  litellm_enterprise/proxy/auth/__init__.py,sha256=wTXtbDcLrD_qecxJfEJtraeCvGfldDgLz8qdVggLoSI,301
112
112
  litellm_enterprise/proxy/auth/custom_sso_handler.py,sha256=ITML9dRKL-LuJhU3WKKVPDp0ECfYxvxTvuX8GpSM0gE,3439
113
- litellm_enterprise/proxy/auth/route_checks.py,sha256=FbXwbrOkFr1dODH6XxoIpLG1nKowC7kyNaRR0WR6ujU,2490
113
+ litellm_enterprise/proxy/auth/route_checks.py,sha256=_0b-B6Lg5Ni2oQLCYrRdQdTi5dyX4yxoIsKy8P6dD34,2488
114
114
  litellm_enterprise/proxy/auth/user_api_key_auth.py,sha256=7t5Q-JoKFyoymylaOT8KWAAOFVz0JOTl7PPOmTkpj5c,1144
115
115
  litellm_enterprise/proxy/common_utils/__init__.py,sha256=zmdmvktxSsUOpGp1TxwJeV8zyLPt4938NXtato4YqLI,56
116
116
  litellm_enterprise/proxy/common_utils/check_batch_cost.py,sha256=V0CCHtN-JV-_d-ydXV-cVs3zCImt1699JnICGF3oPOk,7360
117
117
  litellm_enterprise/proxy/common_utils/check_responses_cost.py,sha256=EKZ78mzpYjxmOtSXeoxfXRZX88MJtaoMyx6NQEPC8AU,4301
118
118
  litellm_enterprise/proxy/enterprise_routes.py,sha256=ToJVSSNaYUotzgIg-kWsfsqh2E0GnQirOPkpE4YkHNg,907
119
- litellm_enterprise/proxy/hooks/managed_files.py,sha256=Bbcd4mipfELbNz9VULv_DJBoKutfryEBZvCG1fC4Uf8,43898
119
+ litellm_enterprise/proxy/hooks/managed_files.py,sha256=NFgzBXMsU-0QrT91CCZ79y1d-FTTZVB8wUasoJUC8NE,47664
120
120
  litellm_enterprise/proxy/management_endpoints/__init__.py,sha256=zfaqryxzmFu6se-w4yR2nlHKxDOOtHAWEehA2xFbFNg,270
121
121
  litellm_enterprise/proxy/management_endpoints/internal_user_endpoints.py,sha256=GEoOVujrtKXDHfko2KQaLn-ms64zkutFE9PP5IhBBLM,2175
122
122
  litellm_enterprise/proxy/management_endpoints/key_management_endpoints.py,sha256=-IXRzVrNQ3_krL-gxngelYQftwyPlB_HmgI3RN-HdvM,1147
123
123
  litellm_enterprise/proxy/proxy_server.py,sha256=fzOeTyiyevLWi2767-2W1Co7reR-0wnoUIhOgVlJFQc,1183
124
124
  litellm_enterprise/proxy/readme.md,sha256=ZcigMJYSHWs4SWnYriWjrSVDJKsu44c2HsbYbma0EHU,397
125
125
  litellm_enterprise/proxy/utils.py,sha256=y4ADfhlEG_mH0x5rfIg7D9FjS586lVgQ9DL0tTdgrMQ,962
126
- litellm_enterprise/proxy/vector_stores/endpoints.py,sha256=6Guh6zIH00dh2XXStn6GblTGpGyE4hZJ9WThVZggDQg,12944
126
+ litellm_enterprise/proxy/vector_stores/endpoints.py,sha256=3F5A9kdttA72PGzVP3aR2d1kkDkdGvc5O196k2iOmRc,13058
127
127
  litellm_enterprise/types/enterprise_callbacks/send_emails.py,sha256=AouBXqb1EB1-Mg3fM_3UjUDihIA45zIjRgA6M4vQ7Zw,2150
128
128
  litellm_enterprise/types/proxy/audit_logging_endpoints.py,sha256=oSJVAuRD9r6ZjRCqNBFM-J5HSgOltsXts400b2aynRE,894
129
129
  litellm_enterprise/types/proxy/proxy_server.py,sha256=kdhtxsU2uok6-XO_ebugCv7PzYYmGgv4vh-XemHJnpM,146
130
- litellm_enterprise-0.1.28.dist-info/LICENSE.md,sha256=nq3D9ZqOvRDT6hLkypQFTc3XsE15kbkg5rkkLJVSqKY,2251
131
- litellm_enterprise-0.1.28.dist-info/METADATA,sha256=d9TUgJdtXBhVu8JQsTyTk0W9lyVuQPLI2ajs_I8Pmqk,1314
132
- litellm_enterprise-0.1.28.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
133
- litellm_enterprise-0.1.28.dist-info/RECORD,,
130
+ litellm_enterprise-0.1.29.dist-info/METADATA,sha256=cXR-b-_hiodo6to929eOoe7zPfqy-fpoKbAaU0hT578,1441
131
+ litellm_enterprise-0.1.29.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
132
+ litellm_enterprise-0.1.29.dist-info/licenses/LICENSE.md,sha256=nq3D9ZqOvRDT6hLkypQFTc3XsE15kbkg5rkkLJVSqKY,2251
133
+ litellm_enterprise-0.1.29.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.0
2
+ Generator: poetry-core 2.3.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any