litellm-enterprise 0.1.26__py3-none-any.whl → 0.1.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,11 +28,16 @@ from litellm.integrations.email_templates.user_invitation_email import (
28
28
  USER_INVITATION_EMAIL_TEMPLATE,
29
29
  )
30
30
  from litellm.integrations.email_templates.templates import (
31
+ MAX_BUDGET_ALERT_EMAIL_TEMPLATE,
31
32
  SOFT_BUDGET_ALERT_EMAIL_TEMPLATE,
32
33
  )
33
34
  from litellm.proxy._types import CallInfo, InvitationNew, UserAPIKeyAuth, WebhookEvent
34
35
  from litellm.secret_managers.main import get_secret_bool
35
36
  from litellm.types.integrations.slack_alerting import LITELLM_LOGO_URL
37
+ from litellm.constants import (
38
+ EMAIL_BUDGET_ALERT_MAX_SPEND_ALERT_PERCENTAGE,
39
+ EMAIL_BUDGET_ALERT_TTL,
40
+ )
36
41
 
37
42
 
38
43
  class BaseEmailLogger(CustomLogger):
@@ -43,7 +48,6 @@ class BaseEmailLogger(CustomLogger):
43
48
  EmailEvent.virtual_key_created: "LiteLLM: {event_message}",
44
49
  EmailEvent.virtual_key_rotated: "LiteLLM: {event_message}",
45
50
  }
46
- DEFAULT_BUDGET_ALERT_TTL = 24 * 60 * 60 # 24 hours in seconds
47
51
 
48
52
  def __init__(
49
53
  self,
@@ -213,11 +217,53 @@ class BaseEmailLogger(CustomLogger):
213
217
  )
214
218
  pass
215
219
 
220
+ async def send_max_budget_alert_email(self, event: WebhookEvent):
221
+ """
222
+ Send email to user when max budget alert threshold is reached
223
+ """
224
+ email_params = await self._get_email_params(
225
+ email_event=EmailEvent.max_budget_alert,
226
+ user_id=event.user_id,
227
+ user_email=event.user_email,
228
+ event_message=event.event_message,
229
+ )
230
+
231
+ verbose_proxy_logger.debug(
232
+ f"send_max_budget_alert_email_event: {json.dumps(event.model_dump(exclude_none=True), indent=4, default=str)}"
233
+ )
234
+
235
+ # Format budget values
236
+ spend_str = f"${event.spend}" if event.spend is not None else "$0.00"
237
+ max_budget_str = f"${event.max_budget}" if event.max_budget is not None else "N/A"
238
+
239
+ # Calculate percentage and alert threshold
240
+ percentage = int(EMAIL_BUDGET_ALERT_MAX_SPEND_ALERT_PERCENTAGE * 100)
241
+ alert_threshold_str = f"${event.max_budget * EMAIL_BUDGET_ALERT_MAX_SPEND_ALERT_PERCENTAGE:.2f}" if event.max_budget is not None else "N/A"
242
+
243
+ email_html_content = MAX_BUDGET_ALERT_EMAIL_TEMPLATE.format(
244
+ email_logo_url=email_params.logo_url,
245
+ recipient_email=email_params.recipient_email,
246
+ percentage=percentage,
247
+ spend=spend_str,
248
+ max_budget=max_budget_str,
249
+ alert_threshold=alert_threshold_str,
250
+ base_url=email_params.base_url,
251
+ email_support_contact=email_params.support_contact,
252
+ )
253
+ await self.send_email(
254
+ from_email=self.DEFAULT_LITELLM_EMAIL,
255
+ to_email=[email_params.recipient_email],
256
+ subject=email_params.subject,
257
+ html_body=email_html_content,
258
+ )
259
+ pass
260
+
216
261
  async def budget_alerts(
217
262
  self,
218
263
  type: Literal[
219
264
  "token_budget",
220
265
  "soft_budget",
266
+ "max_budget_alert",
221
267
  "user_budget",
222
268
  "team_budget",
223
269
  "organization_budget",
@@ -281,7 +327,7 @@ class BaseEmailLogger(CustomLogger):
281
327
  await _cache.async_set_cache(
282
328
  key=_cache_key,
283
329
  value="SENT",
284
- ttl=self.DEFAULT_BUDGET_ALERT_TTL,
330
+ ttl=EMAIL_BUDGET_ALERT_TTL,
285
331
  )
286
332
  except Exception as e:
287
333
  verbose_proxy_logger.error(
@@ -290,6 +336,60 @@ class BaseEmailLogger(CustomLogger):
290
336
  )
291
337
  return
292
338
 
339
+ # For max_budget_alert, check if we've already sent an alert
340
+ if type == "max_budget_alert":
341
+ if user_info.max_budget is not None and user_info.spend is not None:
342
+ alert_threshold = user_info.max_budget * EMAIL_BUDGET_ALERT_MAX_SPEND_ALERT_PERCENTAGE
343
+
344
+ # Only alert if we've crossed the threshold but haven't exceeded max_budget yet
345
+ if user_info.spend >= alert_threshold and user_info.spend < user_info.max_budget:
346
+ # Generate cache key based on event type and identifier
347
+ _id = user_info.token or user_info.user_id or "default_id"
348
+ _cache_key = f"email_budget_alerts:max_budget_alert:{_id}"
349
+
350
+ # Check if we've already sent this alert
351
+ result = await _cache.async_get_cache(key=_cache_key)
352
+ if result is None:
353
+ # Calculate percentage
354
+ percentage = int(EMAIL_BUDGET_ALERT_MAX_SPEND_ALERT_PERCENTAGE * 100)
355
+
356
+ # Create WebhookEvent for max budget alert
357
+ event_message = f"Max Budget Alert - {percentage}% of Maximum Budget Reached"
358
+ webhook_event = WebhookEvent(
359
+ event="max_budget_alert",
360
+ event_message=event_message,
361
+ spend=user_info.spend,
362
+ max_budget=user_info.max_budget,
363
+ soft_budget=user_info.soft_budget,
364
+ token=user_info.token,
365
+ customer_id=user_info.customer_id,
366
+ user_id=user_info.user_id,
367
+ team_id=user_info.team_id,
368
+ team_alias=user_info.team_alias,
369
+ organization_id=user_info.organization_id,
370
+ user_email=user_info.user_email,
371
+ key_alias=user_info.key_alias,
372
+ projected_exceeded_date=user_info.projected_exceeded_date,
373
+ projected_spend=user_info.projected_spend,
374
+ event_group=user_info.event_group,
375
+ )
376
+
377
+ try:
378
+ await self.send_max_budget_alert_email(webhook_event)
379
+
380
+ # Cache the alert to prevent duplicate sends
381
+ await _cache.async_set_cache(
382
+ key=_cache_key,
383
+ value="SENT",
384
+ ttl=EMAIL_BUDGET_ALERT_TTL,
385
+ )
386
+ except Exception as e:
387
+ verbose_proxy_logger.error(
388
+ f"Error sending max budget alert email: {e}",
389
+ exc_info=True,
390
+ )
391
+ return
392
+
293
393
  async def _get_email_params(
294
394
  self,
295
395
  email_event: EmailEvent,
@@ -0,0 +1 @@
1
+ # Package marker for enterprise proxy components.
@@ -0,0 +1 @@
1
+ # Package marker for enterprise proxy common utilities.
@@ -0,0 +1,110 @@
1
+ """
2
+ Polls LiteLLM_ManagedObjectTable to check if the response is complete.
3
+ Cost tracking is handled automatically by litellm.aget_responses().
4
+ """
5
+
6
+ from typing import TYPE_CHECKING
7
+
8
+ import litellm
9
+ from litellm._logging import verbose_proxy_logger
10
+
11
+ if TYPE_CHECKING:
12
+ from litellm.proxy.utils import PrismaClient, ProxyLogging
13
+ from litellm.router import Router
14
+
15
+
16
+ class CheckResponsesCost:
17
+ def __init__(
18
+ self,
19
+ proxy_logging_obj: "ProxyLogging",
20
+ prisma_client: "PrismaClient",
21
+ llm_router: "Router",
22
+ ):
23
+ from litellm.proxy.utils import PrismaClient, ProxyLogging
24
+ from litellm.router import Router
25
+
26
+ self.proxy_logging_obj: ProxyLogging = proxy_logging_obj
27
+ self.prisma_client: PrismaClient = prisma_client
28
+ self.llm_router: Router = llm_router
29
+
30
+ async def check_responses_cost(self):
31
+ """
32
+ Check if background responses are complete and track their cost.
33
+ - Get all status="queued" or "in_progress" and file_purpose="response" jobs
34
+ - Query the provider to check if response is complete
35
+ - Cost is automatically tracked by litellm.aget_responses()
36
+ - Mark completed/failed/cancelled responses as complete in the database
37
+ """
38
+ jobs = await self.prisma_client.db.litellm_managedobjecttable.find_many(
39
+ where={
40
+ "status": {"in": ["queued", "in_progress"]},
41
+ "file_purpose": "response",
42
+ }
43
+ )
44
+
45
+ verbose_proxy_logger.debug(f"Found {len(jobs)} response jobs to check")
46
+ completed_jobs = []
47
+
48
+ for job in jobs:
49
+ unified_object_id = job.unified_object_id
50
+
51
+ try:
52
+ from litellm.proxy.hooks.responses_id_security import (
53
+ ResponsesIDSecurity,
54
+ )
55
+
56
+ # Get the stored response object to extract model information
57
+ stored_response = job.file_object
58
+ model_name = stored_response.get("model", None)
59
+
60
+ # Decrypt the response ID
61
+ responses_id_security, _, _ = ResponsesIDSecurity()._decrypt_response_id(unified_object_id)
62
+
63
+ # Prepare metadata with model information for cost tracking
64
+ litellm_metadata = {
65
+ "user_api_key_user_id": job.created_by or "default-user-id",
66
+ }
67
+
68
+ # Add model information if available
69
+ if model_name:
70
+ litellm_metadata["model"] = model_name
71
+ litellm_metadata["model_group"] = model_name # Use same value for model_group
72
+
73
+ response = await litellm.aget_responses(
74
+ response_id=responses_id_security,
75
+ litellm_metadata=litellm_metadata,
76
+ )
77
+
78
+ verbose_proxy_logger.debug(
79
+ f"Response {unified_object_id} status: {response.status}, model: {model_name}"
80
+ )
81
+
82
+ except Exception as e:
83
+ verbose_proxy_logger.info(
84
+ f"Skipping job {unified_object_id} due to error: {e}"
85
+ )
86
+ continue
87
+
88
+ # Check if response is in a terminal state
89
+ if response.status == "completed":
90
+ verbose_proxy_logger.info(
91
+ f"Response {unified_object_id} is complete. Cost automatically tracked by aget_responses."
92
+ )
93
+ completed_jobs.append(job)
94
+
95
+ elif response.status in ["failed", "cancelled"]:
96
+ verbose_proxy_logger.info(
97
+ f"Response {unified_object_id} has status {response.status}, marking as complete"
98
+ )
99
+ completed_jobs.append(job)
100
+
101
+ # Mark completed jobs in the database
102
+ if len(completed_jobs) > 0:
103
+ await self.prisma_client.db.litellm_managedobjecttable.update_many(
104
+ where={"id": {"in": [job.id for job in completed_jobs]}},
105
+ data={"status": "completed"},
106
+ )
107
+ verbose_proxy_logger.info(
108
+ f"Marked {len(completed_jobs)} response jobs as completed"
109
+ )
110
+
@@ -8,6 +8,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Union, cas
8
8
 
9
9
  from fastapi import HTTPException
10
10
 
11
+ import litellm
11
12
  from litellm import Router, verbose_logger
12
13
  from litellm._uuid import uuid
13
14
  from litellm.caching.caching import DualCache
@@ -23,7 +24,9 @@ from litellm.proxy._types import (
23
24
  from litellm.proxy.openai_files_endpoints.common_utils import (
24
25
  _is_base64_encoded_unified_file_id,
25
26
  get_batch_id_from_unified_batch_id,
27
+ get_content_type_from_file_object,
26
28
  get_model_id_from_unified_batch_id,
29
+ normalize_mime_type_for_provider,
27
30
  )
28
31
  from litellm.types.llms.openai import (
29
32
  AllMessageValues,
@@ -33,6 +36,7 @@ from litellm.types.llms.openai import (
33
36
  FileObject,
34
37
  OpenAIFileObject,
35
38
  OpenAIFilesPurpose,
39
+ ResponsesAPIResponse,
36
40
  )
37
41
  from litellm.types.utils import (
38
42
  CallTypesLiteral,
@@ -41,10 +45,6 @@ from litellm.types.utils import (
41
45
  LLMResponseTypes,
42
46
  SpecialEnums,
43
47
  )
44
- from litellm.proxy.openai_files_endpoints.common_utils import (
45
- get_content_type_from_file_object,
46
- normalize_mime_type_for_provider,
47
- )
48
48
 
49
49
  if TYPE_CHECKING:
50
50
  from litellm.types.llms.openai import HttpxBinaryResponseContent
@@ -133,10 +133,10 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
133
133
  async def store_unified_object_id(
134
134
  self,
135
135
  unified_object_id: str,
136
- file_object: Union[LiteLLMBatch, LiteLLMFineTuningJob],
136
+ file_object: Union[LiteLLMBatch, LiteLLMFineTuningJob, "ResponsesAPIResponse"],
137
137
  litellm_parent_otel_span: Optional[Span],
138
138
  model_object_id: str,
139
- file_purpose: Literal["batch", "fine-tune"],
139
+ file_purpose: Literal["batch", "fine-tune", "response"],
140
140
  user_api_key_dict: UserAPIKeyAuth,
141
141
  ) -> None:
142
142
  verbose_logger.info(
@@ -750,9 +750,27 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
750
750
  model_id=model_id,
751
751
  model_name=model_name,
752
752
  )
753
- await self.store_unified_file_id( # need to store otherwise any retrieve call will fail
753
+
754
+ # Fetch the actual file object for the output file
755
+ file_object = None
756
+ try:
757
+ # Use litellm to retrieve the file object from the provider
758
+ from litellm import afile_retrieve
759
+ file_object = await afile_retrieve(
760
+ custom_llm_provider=model_name.split("/")[0] if model_name and "/" in model_name else "openai",
761
+ file_id=original_output_file_id
762
+ )
763
+ verbose_logger.debug(
764
+ f"Successfully retrieved file object for output_file_id={original_output_file_id}"
765
+ )
766
+ except Exception as e:
767
+ verbose_logger.warning(
768
+ f"Failed to retrieve file object for output_file_id={original_output_file_id}: {str(e)}. Storing with None and will fetch on-demand."
769
+ )
770
+
771
+ await self.store_unified_file_id(
754
772
  file_id=response.output_file_id,
755
- file_object=None,
773
+ file_object=file_object,
756
774
  litellm_parent_otel_span=user_api_key_dict.parent_otel_span,
757
775
  model_mappings={model_id: original_output_file_id},
758
776
  user_api_key_dict=user_api_key_dict,
@@ -819,15 +837,36 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
819
837
  return response
820
838
 
821
839
  async def afile_retrieve(
822
- self, file_id: str, litellm_parent_otel_span: Optional[Span]
840
+ self, file_id: str, litellm_parent_otel_span: Optional[Span], llm_router=None
823
841
  ) -> OpenAIFileObject:
824
842
  stored_file_object = await self.get_unified_file_id(
825
843
  file_id, litellm_parent_otel_span
826
844
  )
827
- if stored_file_object:
828
- return stored_file_object.file_object
829
- else:
845
+
846
+ # Case 1 : This is not a managed file
847
+ if not stored_file_object:
830
848
  raise Exception(f"LiteLLM Managed File object with id={file_id} not found")
849
+
850
+ # Case 2: Managed file and the file object exists in the database
851
+ if stored_file_object and stored_file_object.file_object:
852
+ return stored_file_object.file_object
853
+
854
+ # Case 3: Managed file exists in the database but not the file object (for. e.g the batch task might not have run)
855
+ # So we fetch the file object from the provider. We deliberately do not store the result to avoid interfering with batch cost tracking code.
856
+ if not llm_router:
857
+ raise Exception(
858
+ f"LiteLLM Managed File object with id={file_id} has no file_object "
859
+ f"and llm_router is required to fetch from provider"
860
+ )
861
+
862
+ try:
863
+ model_id, model_file_id = next(iter(stored_file_object.model_mappings.items()))
864
+ credentials = llm_router.get_deployment_credentials_with_provider(model_id) or {}
865
+ response = await litellm.afile_retrieve(file_id=model_file_id, **credentials)
866
+ response.id = file_id # Replace with unified ID
867
+ return response
868
+ except Exception as e:
869
+ raise Exception(f"Failed to retrieve file {file_id} from provider: {str(e)}") from e
831
870
 
832
871
  async def afile_list(
833
872
  self,
@@ -851,10 +890,11 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
851
890
  [file_id], litellm_parent_otel_span
852
891
  )
853
892
 
893
+ delete_response = None
854
894
  specific_model_file_id_mapping = model_file_id_mapping.get(file_id)
855
895
  if specific_model_file_id_mapping:
856
896
  for model_id, model_file_id in specific_model_file_id_mapping.items():
857
- await llm_router.afile_delete(model=model_id, file_id=model_file_id, **data) # type: ignore
897
+ delete_response = await llm_router.afile_delete(model=model_id, file_id=model_file_id, **data) # type: ignore
858
898
 
859
899
  stored_file_object = await self.delete_unified_file_id(
860
900
  file_id, litellm_parent_otel_span
@@ -862,6 +902,9 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
862
902
 
863
903
  if stored_file_object:
864
904
  return stored_file_object
905
+ elif delete_response:
906
+ delete_response.id = file_id
907
+ return delete_response
865
908
  else:
866
909
  raise Exception(f"LiteLLM Managed File object with id={file_id} not found")
867
910
 
@@ -928,7 +971,9 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
928
971
 
929
972
  # File is stored in a storage backend, download and convert to base64
930
973
  try:
931
- from litellm.llms.base_llm.files.storage_backend_factory import get_storage_backend
974
+ from litellm.llms.base_llm.files.storage_backend_factory import (
975
+ get_storage_backend,
976
+ )
932
977
 
933
978
  storage_backend_name = db_file.storage_backend
934
979
  storage_url = db_file.storage_url
@@ -37,6 +37,7 @@ class EmailEvent(str, enum.Enum):
37
37
  new_user_invitation = "New User Invitation"
38
38
  virtual_key_rotated = "Virtual Key Rotated"
39
39
  soft_budget_crossed = "Soft Budget Crossed"
40
+ max_budget_alert = "Max Budget Alert"
40
41
 
41
42
  class EmailEventSettings(BaseModel):
42
43
  event: EmailEvent
@@ -53,6 +54,7 @@ class DefaultEmailSettings(BaseModel):
53
54
  EmailEvent.new_user_invitation: True, # On by default
54
55
  EmailEvent.virtual_key_rotated: True, # On by default
55
56
  EmailEvent.soft_budget_crossed: True, # On by default
57
+ EmailEvent.max_budget_alert: True, # On by default
56
58
  }
57
59
  )
58
60
  def to_dict(self) -> Dict[str, bool]:
@@ -1,8 +1,7 @@
1
- Metadata-Version: 2.4
1
+ Metadata-Version: 2.1
2
2
  Name: litellm-enterprise
3
- Version: 0.1.26
3
+ Version: 0.1.28
4
4
  Summary: Package for LiteLLM Enterprise features
5
- License-File: LICENSE.md
6
5
  Author: BerriAI
7
6
  Requires-Python: >=3.8, !=2.7.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*
8
7
  Classifier: Programming Language :: Python :: 3
@@ -10,8 +9,6 @@ Classifier: Programming Language :: Python :: 3.9
10
9
  Classifier: Programming Language :: Python :: 3.10
11
10
  Classifier: Programming Language :: Python :: 3.11
12
11
  Classifier: Programming Language :: Python :: 3.12
13
- Classifier: Programming Language :: Python :: 3.13
14
- Classifier: Programming Language :: Python :: 3.14
15
12
  Project-URL: Documentation, https://docs.litellm.ai
16
13
  Project-URL: Homepage, https://litellm.ai
17
14
  Project-URL: Repository, https://github.com/BerriAI/litellm
@@ -99,21 +99,24 @@ litellm_enterprise/enterprise_callbacks/secrets_plugins/typeform_api_token.py,sh
99
99
  litellm_enterprise/enterprise_callbacks/secrets_plugins/vault.py,sha256=fqtHTQTC6QaNMIZpuvntBnCSAgAhY2Ka-XOz4ZLafGk,653
100
100
  litellm_enterprise/enterprise_callbacks/secrets_plugins/yandex.py,sha256=BVtFVzCTtpAkRJVudeZIEBBz1W8wueDzpu6TBvxngxo,1183
101
101
  litellm_enterprise/enterprise_callbacks/secrets_plugins/zendesk_secret_key.py,sha256=3E21lWz12WUAmdnKDZH8znfTp6hRJbE3yImtfEP52qE,613
102
- litellm_enterprise/enterprise_callbacks/send_emails/base_email.py,sha256=hUdNvzjE5MM05BWlkFnaM232B8jwABWskNntd4mPmSo,21174
102
+ litellm_enterprise/enterprise_callbacks/send_emails/base_email.py,sha256=O_xNeGIQckN0wEhyC3jf-LyNKiI2YaQnqLMUxAljUiU,26121
103
103
  litellm_enterprise/enterprise_callbacks/send_emails/endpoints.py,sha256=hOEpM_q8MJAXlKMOtC9KbgvDVr_YFtF3reu9bjXkpsI,7017
104
104
  litellm_enterprise/enterprise_callbacks/send_emails/resend_email.py,sha256=KxNfvONZxSWbNg0HmWwfC0rvHzpN7MBJXAPKGLcy_tU,1541
105
105
  litellm_enterprise/enterprise_callbacks/send_emails/sendgrid_email.py,sha256=4bvSOfV-WzCGIJX2V32Ug91I8GBQAmypDDp40qsZbQU,2318
106
106
  litellm_enterprise/enterprise_callbacks/send_emails/smtp_email.py,sha256=CGXmT-7EwftreMQXqUL7OC-XSh0cOje4s16Ptt9wBxc,1245
107
107
  litellm_enterprise/integrations/custom_guardrail.py,sha256=ZLVpqUZq9bR0vEFqVrlTJk0bYCZuFsXlw9XsdyK9t2E,1555
108
108
  litellm_enterprise/litellm_core_utils/litellm_logging.py,sha256=BKkQLPqebFbN-KeCbipGIPgdxHEfQkczImdhhzxKoFg,868
109
+ litellm_enterprise/proxy/__init__.py,sha256=mnCBJ6Y98ai1RfVpYpCxavTPSTmdZ0e-kxX1qtaGIeo,50
109
110
  litellm_enterprise/proxy/audit_logging_endpoints.py,sha256=BnHczmi4bnW1GpMNsq4CvnbwL3rgQ-pnrtFd5WBbbHY,5304
110
111
  litellm_enterprise/proxy/auth/__init__.py,sha256=wTXtbDcLrD_qecxJfEJtraeCvGfldDgLz8qdVggLoSI,301
111
112
  litellm_enterprise/proxy/auth/custom_sso_handler.py,sha256=ITML9dRKL-LuJhU3WKKVPDp0ECfYxvxTvuX8GpSM0gE,3439
112
113
  litellm_enterprise/proxy/auth/route_checks.py,sha256=FbXwbrOkFr1dODH6XxoIpLG1nKowC7kyNaRR0WR6ujU,2490
113
114
  litellm_enterprise/proxy/auth/user_api_key_auth.py,sha256=7t5Q-JoKFyoymylaOT8KWAAOFVz0JOTl7PPOmTkpj5c,1144
115
+ litellm_enterprise/proxy/common_utils/__init__.py,sha256=zmdmvktxSsUOpGp1TxwJeV8zyLPt4938NXtato4YqLI,56
114
116
  litellm_enterprise/proxy/common_utils/check_batch_cost.py,sha256=V0CCHtN-JV-_d-ydXV-cVs3zCImt1699JnICGF3oPOk,7360
117
+ litellm_enterprise/proxy/common_utils/check_responses_cost.py,sha256=EKZ78mzpYjxmOtSXeoxfXRZX88MJtaoMyx6NQEPC8AU,4301
115
118
  litellm_enterprise/proxy/enterprise_routes.py,sha256=ToJVSSNaYUotzgIg-kWsfsqh2E0GnQirOPkpE4YkHNg,907
116
- litellm_enterprise/proxy/hooks/managed_files.py,sha256=_LxdyyhHJa_uZDbkwpdUCbDSCbM6qK-4z8_BHoj189o,41480
119
+ litellm_enterprise/proxy/hooks/managed_files.py,sha256=Bbcd4mipfELbNz9VULv_DJBoKutfryEBZvCG1fC4Uf8,43898
117
120
  litellm_enterprise/proxy/management_endpoints/__init__.py,sha256=zfaqryxzmFu6se-w4yR2nlHKxDOOtHAWEehA2xFbFNg,270
118
121
  litellm_enterprise/proxy/management_endpoints/internal_user_endpoints.py,sha256=GEoOVujrtKXDHfko2KQaLn-ms64zkutFE9PP5IhBBLM,2175
119
122
  litellm_enterprise/proxy/management_endpoints/key_management_endpoints.py,sha256=-IXRzVrNQ3_krL-gxngelYQftwyPlB_HmgI3RN-HdvM,1147
@@ -121,10 +124,10 @@ litellm_enterprise/proxy/proxy_server.py,sha256=fzOeTyiyevLWi2767-2W1Co7reR-0wno
121
124
  litellm_enterprise/proxy/readme.md,sha256=ZcigMJYSHWs4SWnYriWjrSVDJKsu44c2HsbYbma0EHU,397
122
125
  litellm_enterprise/proxy/utils.py,sha256=y4ADfhlEG_mH0x5rfIg7D9FjS586lVgQ9DL0tTdgrMQ,962
123
126
  litellm_enterprise/proxy/vector_stores/endpoints.py,sha256=6Guh6zIH00dh2XXStn6GblTGpGyE4hZJ9WThVZggDQg,12944
124
- litellm_enterprise/types/enterprise_callbacks/send_emails.py,sha256=to7W50SCGKh14FB3gsnlzY_snyxa8M8LAAi_zXbJsb8,2044
127
+ litellm_enterprise/types/enterprise_callbacks/send_emails.py,sha256=AouBXqb1EB1-Mg3fM_3UjUDihIA45zIjRgA6M4vQ7Zw,2150
125
128
  litellm_enterprise/types/proxy/audit_logging_endpoints.py,sha256=oSJVAuRD9r6ZjRCqNBFM-J5HSgOltsXts400b2aynRE,894
126
129
  litellm_enterprise/types/proxy/proxy_server.py,sha256=kdhtxsU2uok6-XO_ebugCv7PzYYmGgv4vh-XemHJnpM,146
127
- litellm_enterprise-0.1.26.dist-info/METADATA,sha256=CUF0ZS8-TuSbpGp3-q4g3VV30wvf4HaSIIaz0A6Gz-Q,1441
128
- litellm_enterprise-0.1.26.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
129
- litellm_enterprise-0.1.26.dist-info/licenses/LICENSE.md,sha256=nq3D9ZqOvRDT6hLkypQFTc3XsE15kbkg5rkkLJVSqKY,2251
130
- litellm_enterprise-0.1.26.dist-info/RECORD,,
130
+ litellm_enterprise-0.1.28.dist-info/LICENSE.md,sha256=nq3D9ZqOvRDT6hLkypQFTc3XsE15kbkg5rkkLJVSqKY,2251
131
+ litellm_enterprise-0.1.28.dist-info/METADATA,sha256=d9TUgJdtXBhVu8JQsTyTk0W9lyVuQPLI2ajs_I8Pmqk,1314
132
+ litellm_enterprise-0.1.28.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
133
+ litellm_enterprise-0.1.28.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.2.1
2
+ Generator: poetry-core 1.9.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any