litellm-enterprise 0.1.28__py3-none-any.whl → 0.1.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- litellm_enterprise/enterprise_callbacks/send_emails/base_email.py +95 -7
- litellm_enterprise/proxy/auth/route_checks.py +1 -1
- litellm_enterprise/proxy/common_utils/check_batch_cost.py +1 -1
- litellm_enterprise/proxy/hooks/managed_files.py +170 -10
- litellm_enterprise/proxy/vector_stores/endpoints.py +2 -0
- {litellm_enterprise-0.1.28.dist-info → litellm_enterprise-0.1.30.dist-info}/METADATA +5 -2
- {litellm_enterprise-0.1.28.dist-info → litellm_enterprise-0.1.30.dist-info}/RECORD +9 -9
- {litellm_enterprise-0.1.28.dist-info → litellm_enterprise-0.1.30.dist-info}/WHEEL +1 -1
- {litellm_enterprise-0.1.28.dist-info → litellm_enterprise-0.1.30.dist-info/licenses}/LICENSE.md +0 -0
|
@@ -30,8 +30,15 @@ from litellm.integrations.email_templates.user_invitation_email import (
|
|
|
30
30
|
from litellm.integrations.email_templates.templates import (
|
|
31
31
|
MAX_BUDGET_ALERT_EMAIL_TEMPLATE,
|
|
32
32
|
SOFT_BUDGET_ALERT_EMAIL_TEMPLATE,
|
|
33
|
+
TEAM_SOFT_BUDGET_ALERT_EMAIL_TEMPLATE,
|
|
34
|
+
)
|
|
35
|
+
from litellm.proxy._types import (
|
|
36
|
+
CallInfo,
|
|
37
|
+
InvitationNew,
|
|
38
|
+
Litellm_EntityType,
|
|
39
|
+
UserAPIKeyAuth,
|
|
40
|
+
WebhookEvent,
|
|
33
41
|
)
|
|
34
|
-
from litellm.proxy._types import CallInfo, InvitationNew, UserAPIKeyAuth, WebhookEvent
|
|
35
42
|
from litellm.secret_managers.main import get_secret_bool
|
|
36
43
|
from litellm.types.integrations.slack_alerting import LITELLM_LOGO_URL
|
|
37
44
|
from litellm.constants import (
|
|
@@ -217,6 +224,68 @@ class BaseEmailLogger(CustomLogger):
|
|
|
217
224
|
)
|
|
218
225
|
pass
|
|
219
226
|
|
|
227
|
+
async def send_team_soft_budget_alert_email(self, event: WebhookEvent):
|
|
228
|
+
"""
|
|
229
|
+
Send email to team members when team soft budget is crossed
|
|
230
|
+
Supports multiple recipients via alert_emails field from team metadata
|
|
231
|
+
"""
|
|
232
|
+
# Collect all recipient emails
|
|
233
|
+
recipient_emails: List[str] = []
|
|
234
|
+
|
|
235
|
+
# Add additional alert emails from team metadata.soft_budget_alert_emails
|
|
236
|
+
if hasattr(event, "alert_emails") and event.alert_emails:
|
|
237
|
+
for email in event.alert_emails:
|
|
238
|
+
if email and email not in recipient_emails: # Avoid duplicates
|
|
239
|
+
recipient_emails.append(email)
|
|
240
|
+
|
|
241
|
+
# If no recipients found, skip sending
|
|
242
|
+
if not recipient_emails:
|
|
243
|
+
verbose_proxy_logger.warning(
|
|
244
|
+
f"No recipient emails found for team soft budget alert. event={event.model_dump(exclude_none=True)}"
|
|
245
|
+
)
|
|
246
|
+
return
|
|
247
|
+
|
|
248
|
+
verbose_proxy_logger.debug(
|
|
249
|
+
f"send_team_soft_budget_alert_email_event: {json.dumps(event.model_dump(exclude_none=True), indent=4, default=str)}"
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
# Get email params using the first recipient email (for template formatting)
|
|
253
|
+
email_params = await self._get_email_params(
|
|
254
|
+
email_event=EmailEvent.soft_budget_crossed,
|
|
255
|
+
user_id=event.user_id,
|
|
256
|
+
user_email=recipient_emails[0],
|
|
257
|
+
event_message=event.event_message,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# Format budget values
|
|
261
|
+
soft_budget_str = f"${event.soft_budget}" if event.soft_budget is not None else "N/A"
|
|
262
|
+
spend_str = f"${event.spend}" if event.spend is not None else "$0.00"
|
|
263
|
+
max_budget_info = ""
|
|
264
|
+
if event.max_budget is not None:
|
|
265
|
+
max_budget_info = f"<b>Maximum Budget:</b> ${event.max_budget} <br />"
|
|
266
|
+
|
|
267
|
+
# Use team alias or generic greeting
|
|
268
|
+
team_alias = event.team_alias or "Team"
|
|
269
|
+
|
|
270
|
+
email_html_content = TEAM_SOFT_BUDGET_ALERT_EMAIL_TEMPLATE.format(
|
|
271
|
+
email_logo_url=email_params.logo_url,
|
|
272
|
+
team_alias=team_alias,
|
|
273
|
+
soft_budget=soft_budget_str,
|
|
274
|
+
spend=spend_str,
|
|
275
|
+
max_budget_info=max_budget_info,
|
|
276
|
+
base_url=email_params.base_url,
|
|
277
|
+
email_support_contact=email_params.support_contact,
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
# Send email to all recipients
|
|
281
|
+
await self.send_email(
|
|
282
|
+
from_email=self.DEFAULT_LITELLM_EMAIL,
|
|
283
|
+
to_email=recipient_emails,
|
|
284
|
+
subject=email_params.subject,
|
|
285
|
+
html_body=email_html_content,
|
|
286
|
+
)
|
|
287
|
+
pass
|
|
288
|
+
|
|
220
289
|
async def send_max_budget_alert_email(self, event: WebhookEvent):
|
|
221
290
|
"""
|
|
222
291
|
Send email to user when max budget alert threshold is reached
|
|
@@ -285,15 +354,29 @@ class BaseEmailLogger(CustomLogger):
|
|
|
285
354
|
# - Don't re-alert, if alert already sent
|
|
286
355
|
_cache: DualCache = self.internal_usage_cache
|
|
287
356
|
|
|
288
|
-
# percent of max_budget left to spend
|
|
289
|
-
if user_info.max_budget is None and user_info.soft_budget is None:
|
|
290
|
-
return
|
|
291
|
-
|
|
292
357
|
# For soft_budget alerts, check if we've already sent an alert
|
|
293
358
|
if type == "soft_budget":
|
|
359
|
+
# For team soft budget alerts, we only need team soft_budget to be set
|
|
360
|
+
# For other entity types, we need either max_budget or soft_budget
|
|
361
|
+
if user_info.event_group == Litellm_EntityType.TEAM:
|
|
362
|
+
if user_info.soft_budget is None:
|
|
363
|
+
return
|
|
364
|
+
else:
|
|
365
|
+
# For non-team alerts, require either max_budget or soft_budget
|
|
366
|
+
if user_info.max_budget is None and user_info.soft_budget is None:
|
|
367
|
+
return
|
|
294
368
|
if user_info.soft_budget is not None and user_info.spend >= user_info.soft_budget:
|
|
295
369
|
# Generate cache key based on event type and identifier
|
|
296
|
-
|
|
370
|
+
# Use appropriate ID based on event_group to ensure unique cache keys per entity type
|
|
371
|
+
if user_info.event_group == Litellm_EntityType.TEAM:
|
|
372
|
+
_id = user_info.team_id or "default_id"
|
|
373
|
+
elif user_info.event_group == Litellm_EntityType.ORGANIZATION:
|
|
374
|
+
_id = user_info.organization_id or "default_id"
|
|
375
|
+
elif user_info.event_group == Litellm_EntityType.USER:
|
|
376
|
+
_id = user_info.user_id or "default_id"
|
|
377
|
+
else:
|
|
378
|
+
# For KEY and other types, use token or user_id
|
|
379
|
+
_id = user_info.token or user_info.user_id or "default_id"
|
|
297
380
|
_cache_key = f"email_budget_alerts:soft_budget_crossed:{_id}"
|
|
298
381
|
|
|
299
382
|
# Check if we've already sent this alert
|
|
@@ -318,10 +401,15 @@ class BaseEmailLogger(CustomLogger):
|
|
|
318
401
|
projected_exceeded_date=user_info.projected_exceeded_date,
|
|
319
402
|
projected_spend=user_info.projected_spend,
|
|
320
403
|
event_group=user_info.event_group,
|
|
404
|
+
alert_emails=user_info.alert_emails,
|
|
321
405
|
)
|
|
322
406
|
|
|
323
407
|
try:
|
|
324
|
-
|
|
408
|
+
# Use team-specific function for team alerts, otherwise use standard function
|
|
409
|
+
if user_info.event_group == Litellm_EntityType.TEAM:
|
|
410
|
+
await self.send_team_soft_budget_alert_email(webhook_event)
|
|
411
|
+
else:
|
|
412
|
+
await self.send_soft_budget_alert_email(webhook_event)
|
|
325
413
|
|
|
326
414
|
# Cache the alert to prevent duplicate sends
|
|
327
415
|
await _cache.async_set_cache(
|
|
@@ -36,7 +36,7 @@ class EnterpriseRouteChecks:
|
|
|
36
36
|
if not premium_user:
|
|
37
37
|
raise HTTPException(
|
|
38
38
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
39
|
-
detail=f"🚨🚨🚨 DISABLING
|
|
39
|
+
detail=f"🚨🚨🚨 DISABLING ADMIN ENDPOINTS is an Enterprise feature\n🚨 {CommonProxyErrors.not_premium_user.value}",
|
|
40
40
|
)
|
|
41
41
|
|
|
42
42
|
return get_secret_bool("DISABLE_ADMIN_ENDPOINTS") is True
|
|
@@ -166,7 +166,11 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
166
166
|
"updated_by": user_api_key_dict.user_id,
|
|
167
167
|
"status": file_object.status,
|
|
168
168
|
},
|
|
169
|
-
"update": {
|
|
169
|
+
"update": {
|
|
170
|
+
"file_object": file_object.model_dump_json(),
|
|
171
|
+
"status": file_object.status,
|
|
172
|
+
"updated_by": user_api_key_dict.user_id,
|
|
173
|
+
}, # FIX: Update status and file_object on every operation to keep state in sync
|
|
170
174
|
},
|
|
171
175
|
)
|
|
172
176
|
|
|
@@ -244,6 +248,78 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
244
248
|
return managed_object.created_by == user_id
|
|
245
249
|
return True # don't raise error if managed object is not found
|
|
246
250
|
|
|
251
|
+
async def list_user_batches(
|
|
252
|
+
self,
|
|
253
|
+
user_api_key_dict: UserAPIKeyAuth,
|
|
254
|
+
limit: Optional[int] = None,
|
|
255
|
+
after: Optional[str] = None,
|
|
256
|
+
provider: Optional[str] = None,
|
|
257
|
+
target_model_names: Optional[str] = None,
|
|
258
|
+
llm_router: Optional[Router] = None,
|
|
259
|
+
) -> Dict[str, Any]:
|
|
260
|
+
# Provider filtering is not supported for managed batches
|
|
261
|
+
# This is because the encoded object ids stored in the managed objects table do not contain the provider information
|
|
262
|
+
# To support provider filtering, we would need to store the provider information in the encoded object ids
|
|
263
|
+
if provider:
|
|
264
|
+
raise Exception(
|
|
265
|
+
"Filtering by 'provider' is not supported when using managed batches."
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# Model name filtering is not supported for managed batches
|
|
269
|
+
# This is because the encoded object ids stored in the managed objects table do not contain the model name
|
|
270
|
+
# A hash of the model name + litellm_params for the model name is encoded as the model id. This is not sufficient to reliably map the target model names to the model ids.
|
|
271
|
+
if target_model_names:
|
|
272
|
+
raise Exception(
|
|
273
|
+
"Filtering by 'target_model_names' is not supported when using managed batches."
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
where_clause: Dict[str, Any] = {"file_purpose": "batch"}
|
|
277
|
+
|
|
278
|
+
# Filter by user who created the batch
|
|
279
|
+
if user_api_key_dict.user_id:
|
|
280
|
+
where_clause["created_by"] = user_api_key_dict.user_id
|
|
281
|
+
|
|
282
|
+
if after:
|
|
283
|
+
where_clause["id"] = {"gt": after}
|
|
284
|
+
|
|
285
|
+
# Fetch more than needed to allow for post-fetch filtering
|
|
286
|
+
fetch_limit = limit or 20
|
|
287
|
+
if target_model_names:
|
|
288
|
+
# Fetch extra to account for filtering
|
|
289
|
+
fetch_limit = max(fetch_limit * 3, 100)
|
|
290
|
+
|
|
291
|
+
batches = await self.prisma_client.db.litellm_managedobjecttable.find_many(
|
|
292
|
+
where=where_clause,
|
|
293
|
+
take=fetch_limit,
|
|
294
|
+
order={"created_at": "desc"},
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
batch_objects: List[LiteLLMBatch] = []
|
|
298
|
+
for batch in batches:
|
|
299
|
+
try:
|
|
300
|
+
# Stop once we have enough after filtering
|
|
301
|
+
if len(batch_objects) >= (limit or 20):
|
|
302
|
+
break
|
|
303
|
+
|
|
304
|
+
batch_data = json.loads(batch.file_object) if isinstance(batch.file_object, str) else batch.file_object
|
|
305
|
+
batch_obj = LiteLLMBatch(**batch_data)
|
|
306
|
+
batch_obj.id = batch.unified_object_id
|
|
307
|
+
batch_objects.append(batch_obj)
|
|
308
|
+
|
|
309
|
+
except Exception as e:
|
|
310
|
+
verbose_logger.warning(
|
|
311
|
+
f"Failed to parse batch object {batch.unified_object_id}: {e}"
|
|
312
|
+
)
|
|
313
|
+
continue
|
|
314
|
+
|
|
315
|
+
return {
|
|
316
|
+
"object": "list",
|
|
317
|
+
"data": batch_objects,
|
|
318
|
+
"first_id": batch_objects[0].id if batch_objects else None,
|
|
319
|
+
"last_id": batch_objects[-1].id if batch_objects else None,
|
|
320
|
+
"has_more": len(batch_objects) == (limit or 20),
|
|
321
|
+
}
|
|
322
|
+
|
|
247
323
|
async def get_user_created_file_ids(
|
|
248
324
|
self, user_api_key_dict: UserAPIKeyAuth, model_object_ids: List[str]
|
|
249
325
|
) -> List[OpenAIFileObject]:
|
|
@@ -282,6 +358,31 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
282
358
|
)
|
|
283
359
|
return False
|
|
284
360
|
|
|
361
|
+
async def check_file_ids_access(
|
|
362
|
+
self, file_ids: List[str], user_api_key_dict: UserAPIKeyAuth
|
|
363
|
+
) -> None:
|
|
364
|
+
"""
|
|
365
|
+
Check if the user has access to a list of file IDs.
|
|
366
|
+
Only checks managed (unified) file IDs.
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
file_ids: List of file IDs to check access for
|
|
370
|
+
user_api_key_dict: User API key authentication details
|
|
371
|
+
|
|
372
|
+
Raises:
|
|
373
|
+
HTTPException: If user doesn't have access to any of the files
|
|
374
|
+
"""
|
|
375
|
+
for file_id in file_ids:
|
|
376
|
+
is_unified_file_id = _is_base64_encoded_unified_file_id(file_id)
|
|
377
|
+
if is_unified_file_id:
|
|
378
|
+
if not await self.can_user_call_unified_file_id(
|
|
379
|
+
file_id, user_api_key_dict
|
|
380
|
+
):
|
|
381
|
+
raise HTTPException(
|
|
382
|
+
status_code=403,
|
|
383
|
+
detail=f"User {user_api_key_dict.user_id} does not have access to the file {file_id}",
|
|
384
|
+
)
|
|
385
|
+
|
|
285
386
|
async def async_pre_call_hook( # noqa: PLR0915
|
|
286
387
|
self,
|
|
287
388
|
user_api_key_dict: UserAPIKeyAuth,
|
|
@@ -297,6 +398,8 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
297
398
|
if (
|
|
298
399
|
call_type == CallTypes.afile_content.value
|
|
299
400
|
or call_type == CallTypes.afile_delete.value
|
|
401
|
+
or call_type == CallTypes.afile_retrieve.value
|
|
402
|
+
or call_type == CallTypes.afile_content.value
|
|
300
403
|
):
|
|
301
404
|
await self.check_managed_file_id_access(data, user_api_key_dict)
|
|
302
405
|
|
|
@@ -313,6 +416,9 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
313
416
|
if messages:
|
|
314
417
|
file_ids = self.get_file_ids_from_messages(messages)
|
|
315
418
|
if file_ids:
|
|
419
|
+
# Check user has access to all managed files
|
|
420
|
+
await self.check_file_ids_access(file_ids, user_api_key_dict)
|
|
421
|
+
|
|
316
422
|
# Check if any files are stored in storage backends and need base64 conversion
|
|
317
423
|
# This is needed for Vertex AI/Gemini which requires base64 content
|
|
318
424
|
is_vertex_ai = model and ("vertex_ai" in model or "gemini" in model.lower())
|
|
@@ -328,15 +434,27 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
328
434
|
)
|
|
329
435
|
data["model_file_id_mapping"] = model_file_id_mapping
|
|
330
436
|
elif call_type == CallTypes.aresponses.value or call_type == CallTypes.responses.value:
|
|
331
|
-
# Handle managed files in responses API input
|
|
437
|
+
# Handle managed files in responses API input and tools
|
|
438
|
+
file_ids = []
|
|
439
|
+
|
|
440
|
+
# Extract file IDs from input parameter
|
|
332
441
|
input_data = data.get("input")
|
|
333
442
|
if input_data:
|
|
334
|
-
file_ids
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
443
|
+
file_ids.extend(self.get_file_ids_from_responses_input(input_data))
|
|
444
|
+
|
|
445
|
+
# Extract file IDs from tools parameter (e.g., code_interpreter container)
|
|
446
|
+
tools = data.get("tools")
|
|
447
|
+
if tools:
|
|
448
|
+
file_ids.extend(self.get_file_ids_from_responses_tools(tools))
|
|
449
|
+
|
|
450
|
+
if file_ids:
|
|
451
|
+
# Check user has access to all managed files
|
|
452
|
+
await self.check_file_ids_access(file_ids, user_api_key_dict)
|
|
453
|
+
|
|
454
|
+
model_file_id_mapping = await self.get_model_file_id_mapping(
|
|
455
|
+
file_ids, user_api_key_dict.parent_otel_span
|
|
456
|
+
)
|
|
457
|
+
data["model_file_id_mapping"] = model_file_id_mapping
|
|
340
458
|
elif call_type == CallTypes.afile_content.value:
|
|
341
459
|
retrieve_file_id = cast(Optional[str], data.get("file_id"))
|
|
342
460
|
potential_file_id = (
|
|
@@ -361,12 +479,16 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
361
479
|
data["model_file_id_mapping"] = model_file_id_mapping
|
|
362
480
|
elif (
|
|
363
481
|
call_type == CallTypes.aretrieve_batch.value
|
|
482
|
+
or call_type == CallTypes.acancel_batch.value
|
|
364
483
|
or call_type == CallTypes.acancel_fine_tuning_job.value
|
|
365
484
|
or call_type == CallTypes.aretrieve_fine_tuning_job.value
|
|
366
485
|
):
|
|
367
486
|
accessor_key: Optional[str] = None
|
|
368
487
|
retrieve_object_id: Optional[str] = None
|
|
369
|
-
if
|
|
488
|
+
if (
|
|
489
|
+
call_type == CallTypes.aretrieve_batch.value
|
|
490
|
+
or call_type == CallTypes.acancel_batch.value
|
|
491
|
+
):
|
|
370
492
|
accessor_key = "batch_id"
|
|
371
493
|
elif (
|
|
372
494
|
call_type == CallTypes.acancel_fine_tuning_job.value
|
|
@@ -534,6 +656,41 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
534
656
|
|
|
535
657
|
return file_ids
|
|
536
658
|
|
|
659
|
+
def get_file_ids_from_responses_tools(
|
|
660
|
+
self, tools: List[Dict[str, Any]]
|
|
661
|
+
) -> List[str]:
|
|
662
|
+
"""
|
|
663
|
+
Gets file ids from responses API tools parameter.
|
|
664
|
+
|
|
665
|
+
The tools can contain code_interpreter with container.file_ids:
|
|
666
|
+
[
|
|
667
|
+
{
|
|
668
|
+
"type": "code_interpreter",
|
|
669
|
+
"container": {"type": "auto", "file_ids": ["file-123", "file-456"]}
|
|
670
|
+
}
|
|
671
|
+
]
|
|
672
|
+
"""
|
|
673
|
+
file_ids: List[str] = []
|
|
674
|
+
|
|
675
|
+
if not isinstance(tools, list):
|
|
676
|
+
return file_ids
|
|
677
|
+
|
|
678
|
+
for tool in tools:
|
|
679
|
+
if not isinstance(tool, dict):
|
|
680
|
+
continue
|
|
681
|
+
|
|
682
|
+
# Check for code_interpreter with container file_ids
|
|
683
|
+
if tool.get("type") == "code_interpreter":
|
|
684
|
+
container = tool.get("container")
|
|
685
|
+
if isinstance(container, dict):
|
|
686
|
+
container_file_ids = container.get("file_ids")
|
|
687
|
+
if isinstance(container_file_ids, list):
|
|
688
|
+
for file_id in container_file_ids:
|
|
689
|
+
if isinstance(file_id, str):
|
|
690
|
+
file_ids.append(file_id)
|
|
691
|
+
|
|
692
|
+
return file_ids
|
|
693
|
+
|
|
537
694
|
async def get_model_file_id_mapping(
|
|
538
695
|
self, file_ids: List[str], litellm_parent_otel_span: Span
|
|
539
696
|
) -> dict:
|
|
@@ -673,6 +830,7 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
673
830
|
bytes=file_objects[0].bytes,
|
|
674
831
|
filename=file_objects[0].filename,
|
|
675
832
|
status="uploaded",
|
|
833
|
+
expires_at=file_objects[0].expires_at,
|
|
676
834
|
)
|
|
677
835
|
|
|
678
836
|
return response
|
|
@@ -893,8 +1051,10 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
893
1051
|
delete_response = None
|
|
894
1052
|
specific_model_file_id_mapping = model_file_id_mapping.get(file_id)
|
|
895
1053
|
if specific_model_file_id_mapping:
|
|
1054
|
+
# Remove conflicting keys from data to avoid duplicate keyword arguments
|
|
1055
|
+
filtered_data = {k: v for k, v in data.items() if k not in ("model", "file_id")}
|
|
896
1056
|
for model_id, model_file_id in specific_model_file_id_mapping.items():
|
|
897
|
-
delete_response = await llm_router.afile_delete(model=model_id, file_id=model_file_id, **
|
|
1057
|
+
delete_response = await llm_router.afile_delete(model=model_id, file_id=model_file_id, **filtered_data) # type: ignore
|
|
898
1058
|
|
|
899
1059
|
stored_file_object = await self.delete_unified_file_id(
|
|
900
1060
|
file_id, litellm_parent_otel_span
|
|
@@ -282,6 +282,8 @@ async def get_vector_store_info(
|
|
|
282
282
|
updated_at=vector_store.get("updated_at") or None,
|
|
283
283
|
litellm_credential_name=vector_store.get("litellm_credential_name"),
|
|
284
284
|
litellm_params=vector_store.get("litellm_params") or None,
|
|
285
|
+
team_id=vector_store.get("team_id"),
|
|
286
|
+
user_id=vector_store.get("user_id"),
|
|
285
287
|
)
|
|
286
288
|
return {"vector_store": vector_store_pydantic_obj}
|
|
287
289
|
|
|
@@ -1,7 +1,8 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: litellm-enterprise
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.30
|
|
4
4
|
Summary: Package for LiteLLM Enterprise features
|
|
5
|
+
License-File: LICENSE.md
|
|
5
6
|
Author: BerriAI
|
|
6
7
|
Requires-Python: >=3.8, !=2.7.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*
|
|
7
8
|
Classifier: Programming Language :: Python :: 3
|
|
@@ -9,6 +10,8 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
9
10
|
Classifier: Programming Language :: Python :: 3.10
|
|
10
11
|
Classifier: Programming Language :: Python :: 3.11
|
|
11
12
|
Classifier: Programming Language :: Python :: 3.12
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
12
15
|
Project-URL: Documentation, https://docs.litellm.ai
|
|
13
16
|
Project-URL: Homepage, https://litellm.ai
|
|
14
17
|
Project-URL: Repository, https://github.com/BerriAI/litellm
|
|
@@ -99,7 +99,7 @@ litellm_enterprise/enterprise_callbacks/secrets_plugins/typeform_api_token.py,sh
|
|
|
99
99
|
litellm_enterprise/enterprise_callbacks/secrets_plugins/vault.py,sha256=fqtHTQTC6QaNMIZpuvntBnCSAgAhY2Ka-XOz4ZLafGk,653
|
|
100
100
|
litellm_enterprise/enterprise_callbacks/secrets_plugins/yandex.py,sha256=BVtFVzCTtpAkRJVudeZIEBBz1W8wueDzpu6TBvxngxo,1183
|
|
101
101
|
litellm_enterprise/enterprise_callbacks/secrets_plugins/zendesk_secret_key.py,sha256=3E21lWz12WUAmdnKDZH8znfTp6hRJbE3yImtfEP52qE,613
|
|
102
|
-
litellm_enterprise/enterprise_callbacks/send_emails/base_email.py,sha256=
|
|
102
|
+
litellm_enterprise/enterprise_callbacks/send_emails/base_email.py,sha256=3nntD4U1StM75rNevkgwDxLMxtsVFk6G_IgLDRCUCuc,30115
|
|
103
103
|
litellm_enterprise/enterprise_callbacks/send_emails/endpoints.py,sha256=hOEpM_q8MJAXlKMOtC9KbgvDVr_YFtF3reu9bjXkpsI,7017
|
|
104
104
|
litellm_enterprise/enterprise_callbacks/send_emails/resend_email.py,sha256=KxNfvONZxSWbNg0HmWwfC0rvHzpN7MBJXAPKGLcy_tU,1541
|
|
105
105
|
litellm_enterprise/enterprise_callbacks/send_emails/sendgrid_email.py,sha256=4bvSOfV-WzCGIJX2V32Ug91I8GBQAmypDDp40qsZbQU,2318
|
|
@@ -110,24 +110,24 @@ litellm_enterprise/proxy/__init__.py,sha256=mnCBJ6Y98ai1RfVpYpCxavTPSTmdZ0e-kxX1
|
|
|
110
110
|
litellm_enterprise/proxy/audit_logging_endpoints.py,sha256=BnHczmi4bnW1GpMNsq4CvnbwL3rgQ-pnrtFd5WBbbHY,5304
|
|
111
111
|
litellm_enterprise/proxy/auth/__init__.py,sha256=wTXtbDcLrD_qecxJfEJtraeCvGfldDgLz8qdVggLoSI,301
|
|
112
112
|
litellm_enterprise/proxy/auth/custom_sso_handler.py,sha256=ITML9dRKL-LuJhU3WKKVPDp0ECfYxvxTvuX8GpSM0gE,3439
|
|
113
|
-
litellm_enterprise/proxy/auth/route_checks.py,sha256=
|
|
113
|
+
litellm_enterprise/proxy/auth/route_checks.py,sha256=_0b-B6Lg5Ni2oQLCYrRdQdTi5dyX4yxoIsKy8P6dD34,2488
|
|
114
114
|
litellm_enterprise/proxy/auth/user_api_key_auth.py,sha256=7t5Q-JoKFyoymylaOT8KWAAOFVz0JOTl7PPOmTkpj5c,1144
|
|
115
115
|
litellm_enterprise/proxy/common_utils/__init__.py,sha256=zmdmvktxSsUOpGp1TxwJeV8zyLPt4938NXtato4YqLI,56
|
|
116
|
-
litellm_enterprise/proxy/common_utils/check_batch_cost.py,sha256=
|
|
116
|
+
litellm_enterprise/proxy/common_utils/check_batch_cost.py,sha256=2lrg8pDFqTbNJBAFCvEv1Egifg2l-iyd2lKgripOLpA,7399
|
|
117
117
|
litellm_enterprise/proxy/common_utils/check_responses_cost.py,sha256=EKZ78mzpYjxmOtSXeoxfXRZX88MJtaoMyx6NQEPC8AU,4301
|
|
118
118
|
litellm_enterprise/proxy/enterprise_routes.py,sha256=ToJVSSNaYUotzgIg-kWsfsqh2E0GnQirOPkpE4YkHNg,907
|
|
119
|
-
litellm_enterprise/proxy/hooks/managed_files.py,sha256=
|
|
119
|
+
litellm_enterprise/proxy/hooks/managed_files.py,sha256=_Xq4GmMp_b5ikcf7bo0mXLIlFhJncAqIATyJ_xIV684,50636
|
|
120
120
|
litellm_enterprise/proxy/management_endpoints/__init__.py,sha256=zfaqryxzmFu6se-w4yR2nlHKxDOOtHAWEehA2xFbFNg,270
|
|
121
121
|
litellm_enterprise/proxy/management_endpoints/internal_user_endpoints.py,sha256=GEoOVujrtKXDHfko2KQaLn-ms64zkutFE9PP5IhBBLM,2175
|
|
122
122
|
litellm_enterprise/proxy/management_endpoints/key_management_endpoints.py,sha256=-IXRzVrNQ3_krL-gxngelYQftwyPlB_HmgI3RN-HdvM,1147
|
|
123
123
|
litellm_enterprise/proxy/proxy_server.py,sha256=fzOeTyiyevLWi2767-2W1Co7reR-0wnoUIhOgVlJFQc,1183
|
|
124
124
|
litellm_enterprise/proxy/readme.md,sha256=ZcigMJYSHWs4SWnYriWjrSVDJKsu44c2HsbYbma0EHU,397
|
|
125
125
|
litellm_enterprise/proxy/utils.py,sha256=y4ADfhlEG_mH0x5rfIg7D9FjS586lVgQ9DL0tTdgrMQ,962
|
|
126
|
-
litellm_enterprise/proxy/vector_stores/endpoints.py,sha256=
|
|
126
|
+
litellm_enterprise/proxy/vector_stores/endpoints.py,sha256=3F5A9kdttA72PGzVP3aR2d1kkDkdGvc5O196k2iOmRc,13058
|
|
127
127
|
litellm_enterprise/types/enterprise_callbacks/send_emails.py,sha256=AouBXqb1EB1-Mg3fM_3UjUDihIA45zIjRgA6M4vQ7Zw,2150
|
|
128
128
|
litellm_enterprise/types/proxy/audit_logging_endpoints.py,sha256=oSJVAuRD9r6ZjRCqNBFM-J5HSgOltsXts400b2aynRE,894
|
|
129
129
|
litellm_enterprise/types/proxy/proxy_server.py,sha256=kdhtxsU2uok6-XO_ebugCv7PzYYmGgv4vh-XemHJnpM,146
|
|
130
|
-
litellm_enterprise-0.1.
|
|
131
|
-
litellm_enterprise-0.1.
|
|
132
|
-
litellm_enterprise-0.1.
|
|
133
|
-
litellm_enterprise-0.1.
|
|
130
|
+
litellm_enterprise-0.1.30.dist-info/METADATA,sha256=nq8Y1I3mSEf8m4VnGeWnkul6dKo5iRg97QYV14noVU0,1441
|
|
131
|
+
litellm_enterprise-0.1.30.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
|
|
132
|
+
litellm_enterprise-0.1.30.dist-info/licenses/LICENSE.md,sha256=nq3D9ZqOvRDT6hLkypQFTc3XsE15kbkg5rkkLJVSqKY,2251
|
|
133
|
+
litellm_enterprise-0.1.30.dist-info/RECORD,,
|
{litellm_enterprise-0.1.28.dist-info → litellm_enterprise-0.1.30.dist-info/licenses}/LICENSE.md
RENAMED
|
File without changes
|