litellm-enterprise 0.1.27__py3-none-any.whl → 0.1.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- litellm_enterprise/enterprise_callbacks/send_emails/base_email.py +2 -0
- litellm_enterprise/proxy/__init__.py +1 -0
- litellm_enterprise/proxy/auth/route_checks.py +1 -1
- litellm_enterprise/proxy/common_utils/__init__.py +1 -0
- litellm_enterprise/proxy/common_utils/check_responses_cost.py +110 -0
- litellm_enterprise/proxy/hooks/managed_files.py +123 -13
- litellm_enterprise/proxy/vector_stores/endpoints.py +2 -0
- {litellm_enterprise-0.1.27.dist-info → litellm_enterprise-0.1.29.dist-info}/METADATA +1 -1
- {litellm_enterprise-0.1.27.dist-info → litellm_enterprise-0.1.29.dist-info}/RECORD +11 -8
- {litellm_enterprise-0.1.27.dist-info → litellm_enterprise-0.1.29.dist-info}/WHEEL +1 -1
- {litellm_enterprise-0.1.27.dist-info → litellm_enterprise-0.1.29.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -340,11 +340,13 @@ class BaseEmailLogger(CustomLogger):
|
|
|
340
340
|
if type == "max_budget_alert":
|
|
341
341
|
if user_info.max_budget is not None and user_info.spend is not None:
|
|
342
342
|
alert_threshold = user_info.max_budget * EMAIL_BUDGET_ALERT_MAX_SPEND_ALERT_PERCENTAGE
|
|
343
|
+
|
|
343
344
|
# Only alert if we've crossed the threshold but haven't exceeded max_budget yet
|
|
344
345
|
if user_info.spend >= alert_threshold and user_info.spend < user_info.max_budget:
|
|
345
346
|
# Generate cache key based on event type and identifier
|
|
346
347
|
_id = user_info.token or user_info.user_id or "default_id"
|
|
347
348
|
_cache_key = f"email_budget_alerts:max_budget_alert:{_id}"
|
|
349
|
+
|
|
348
350
|
# Check if we've already sent this alert
|
|
349
351
|
result = await _cache.async_get_cache(key=_cache_key)
|
|
350
352
|
if result is None:
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Package marker for enterprise proxy components.
|
|
@@ -36,7 +36,7 @@ class EnterpriseRouteChecks:
|
|
|
36
36
|
if not premium_user:
|
|
37
37
|
raise HTTPException(
|
|
38
38
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
39
|
-
detail=f"🚨🚨🚨 DISABLING
|
|
39
|
+
detail=f"🚨🚨🚨 DISABLING ADMIN ENDPOINTS is an Enterprise feature\n🚨 {CommonProxyErrors.not_premium_user.value}",
|
|
40
40
|
)
|
|
41
41
|
|
|
42
42
|
return get_secret_bool("DISABLE_ADMIN_ENDPOINTS") is True
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Package marker for enterprise proxy common utilities.
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Polls LiteLLM_ManagedObjectTable to check if the response is complete.
|
|
3
|
+
Cost tracking is handled automatically by litellm.aget_responses().
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
7
|
+
|
|
8
|
+
import litellm
|
|
9
|
+
from litellm._logging import verbose_proxy_logger
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from litellm.proxy.utils import PrismaClient, ProxyLogging
|
|
13
|
+
from litellm.router import Router
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class CheckResponsesCost:
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
proxy_logging_obj: "ProxyLogging",
|
|
20
|
+
prisma_client: "PrismaClient",
|
|
21
|
+
llm_router: "Router",
|
|
22
|
+
):
|
|
23
|
+
from litellm.proxy.utils import PrismaClient, ProxyLogging
|
|
24
|
+
from litellm.router import Router
|
|
25
|
+
|
|
26
|
+
self.proxy_logging_obj: ProxyLogging = proxy_logging_obj
|
|
27
|
+
self.prisma_client: PrismaClient = prisma_client
|
|
28
|
+
self.llm_router: Router = llm_router
|
|
29
|
+
|
|
30
|
+
async def check_responses_cost(self):
|
|
31
|
+
"""
|
|
32
|
+
Check if background responses are complete and track their cost.
|
|
33
|
+
- Get all status="queued" or "in_progress" and file_purpose="response" jobs
|
|
34
|
+
- Query the provider to check if response is complete
|
|
35
|
+
- Cost is automatically tracked by litellm.aget_responses()
|
|
36
|
+
- Mark completed/failed/cancelled responses as complete in the database
|
|
37
|
+
"""
|
|
38
|
+
jobs = await self.prisma_client.db.litellm_managedobjecttable.find_many(
|
|
39
|
+
where={
|
|
40
|
+
"status": {"in": ["queued", "in_progress"]},
|
|
41
|
+
"file_purpose": "response",
|
|
42
|
+
}
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
verbose_proxy_logger.debug(f"Found {len(jobs)} response jobs to check")
|
|
46
|
+
completed_jobs = []
|
|
47
|
+
|
|
48
|
+
for job in jobs:
|
|
49
|
+
unified_object_id = job.unified_object_id
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
from litellm.proxy.hooks.responses_id_security import (
|
|
53
|
+
ResponsesIDSecurity,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# Get the stored response object to extract model information
|
|
57
|
+
stored_response = job.file_object
|
|
58
|
+
model_name = stored_response.get("model", None)
|
|
59
|
+
|
|
60
|
+
# Decrypt the response ID
|
|
61
|
+
responses_id_security, _, _ = ResponsesIDSecurity()._decrypt_response_id(unified_object_id)
|
|
62
|
+
|
|
63
|
+
# Prepare metadata with model information for cost tracking
|
|
64
|
+
litellm_metadata = {
|
|
65
|
+
"user_api_key_user_id": job.created_by or "default-user-id",
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
# Add model information if available
|
|
69
|
+
if model_name:
|
|
70
|
+
litellm_metadata["model"] = model_name
|
|
71
|
+
litellm_metadata["model_group"] = model_name # Use same value for model_group
|
|
72
|
+
|
|
73
|
+
response = await litellm.aget_responses(
|
|
74
|
+
response_id=responses_id_security,
|
|
75
|
+
litellm_metadata=litellm_metadata,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
verbose_proxy_logger.debug(
|
|
79
|
+
f"Response {unified_object_id} status: {response.status}, model: {model_name}"
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
except Exception as e:
|
|
83
|
+
verbose_proxy_logger.info(
|
|
84
|
+
f"Skipping job {unified_object_id} due to error: {e}"
|
|
85
|
+
)
|
|
86
|
+
continue
|
|
87
|
+
|
|
88
|
+
# Check if response is in a terminal state
|
|
89
|
+
if response.status == "completed":
|
|
90
|
+
verbose_proxy_logger.info(
|
|
91
|
+
f"Response {unified_object_id} is complete. Cost automatically tracked by aget_responses."
|
|
92
|
+
)
|
|
93
|
+
completed_jobs.append(job)
|
|
94
|
+
|
|
95
|
+
elif response.status in ["failed", "cancelled"]:
|
|
96
|
+
verbose_proxy_logger.info(
|
|
97
|
+
f"Response {unified_object_id} has status {response.status}, marking as complete"
|
|
98
|
+
)
|
|
99
|
+
completed_jobs.append(job)
|
|
100
|
+
|
|
101
|
+
# Mark completed jobs in the database
|
|
102
|
+
if len(completed_jobs) > 0:
|
|
103
|
+
await self.prisma_client.db.litellm_managedobjecttable.update_many(
|
|
104
|
+
where={"id": {"in": [job.id for job in completed_jobs]}},
|
|
105
|
+
data={"status": "completed"},
|
|
106
|
+
)
|
|
107
|
+
verbose_proxy_logger.info(
|
|
108
|
+
f"Marked {len(completed_jobs)} response jobs as completed"
|
|
109
|
+
)
|
|
110
|
+
|
|
@@ -8,6 +8,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Union, cas
|
|
|
8
8
|
|
|
9
9
|
from fastapi import HTTPException
|
|
10
10
|
|
|
11
|
+
import litellm
|
|
11
12
|
from litellm import Router, verbose_logger
|
|
12
13
|
from litellm._uuid import uuid
|
|
13
14
|
from litellm.caching.caching import DualCache
|
|
@@ -23,7 +24,9 @@ from litellm.proxy._types import (
|
|
|
23
24
|
from litellm.proxy.openai_files_endpoints.common_utils import (
|
|
24
25
|
_is_base64_encoded_unified_file_id,
|
|
25
26
|
get_batch_id_from_unified_batch_id,
|
|
27
|
+
get_content_type_from_file_object,
|
|
26
28
|
get_model_id_from_unified_batch_id,
|
|
29
|
+
normalize_mime_type_for_provider,
|
|
27
30
|
)
|
|
28
31
|
from litellm.types.llms.openai import (
|
|
29
32
|
AllMessageValues,
|
|
@@ -33,6 +36,7 @@ from litellm.types.llms.openai import (
|
|
|
33
36
|
FileObject,
|
|
34
37
|
OpenAIFileObject,
|
|
35
38
|
OpenAIFilesPurpose,
|
|
39
|
+
ResponsesAPIResponse,
|
|
36
40
|
)
|
|
37
41
|
from litellm.types.utils import (
|
|
38
42
|
CallTypesLiteral,
|
|
@@ -41,10 +45,6 @@ from litellm.types.utils import (
|
|
|
41
45
|
LLMResponseTypes,
|
|
42
46
|
SpecialEnums,
|
|
43
47
|
)
|
|
44
|
-
from litellm.proxy.openai_files_endpoints.common_utils import (
|
|
45
|
-
get_content_type_from_file_object,
|
|
46
|
-
normalize_mime_type_for_provider,
|
|
47
|
-
)
|
|
48
48
|
|
|
49
49
|
if TYPE_CHECKING:
|
|
50
50
|
from litellm.types.llms.openai import HttpxBinaryResponseContent
|
|
@@ -133,10 +133,10 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
133
133
|
async def store_unified_object_id(
|
|
134
134
|
self,
|
|
135
135
|
unified_object_id: str,
|
|
136
|
-
file_object: Union[LiteLLMBatch, LiteLLMFineTuningJob],
|
|
136
|
+
file_object: Union[LiteLLMBatch, LiteLLMFineTuningJob, "ResponsesAPIResponse"],
|
|
137
137
|
litellm_parent_otel_span: Optional[Span],
|
|
138
138
|
model_object_id: str,
|
|
139
|
-
file_purpose: Literal["batch", "fine-tune"],
|
|
139
|
+
file_purpose: Literal["batch", "fine-tune", "response"],
|
|
140
140
|
user_api_key_dict: UserAPIKeyAuth,
|
|
141
141
|
) -> None:
|
|
142
142
|
verbose_logger.info(
|
|
@@ -244,6 +244,78 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
244
244
|
return managed_object.created_by == user_id
|
|
245
245
|
return True # don't raise error if managed object is not found
|
|
246
246
|
|
|
247
|
+
async def list_user_batches(
|
|
248
|
+
self,
|
|
249
|
+
user_api_key_dict: UserAPIKeyAuth,
|
|
250
|
+
limit: Optional[int] = None,
|
|
251
|
+
after: Optional[str] = None,
|
|
252
|
+
provider: Optional[str] = None,
|
|
253
|
+
target_model_names: Optional[str] = None,
|
|
254
|
+
llm_router: Optional[Router] = None,
|
|
255
|
+
) -> Dict[str, Any]:
|
|
256
|
+
# Provider filtering is not supported for managed batches
|
|
257
|
+
# This is because the encoded object ids stored in the managed objects table do not contain the provider information
|
|
258
|
+
# To support provider filtering, we would need to store the provider information in the encoded object ids
|
|
259
|
+
if provider:
|
|
260
|
+
raise Exception(
|
|
261
|
+
"Filtering by 'provider' is not supported when using managed batches."
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
# Model name filtering is not supported for managed batches
|
|
265
|
+
# This is because the encoded object ids stored in the managed objects table do not contain the model name
|
|
266
|
+
# A hash of the model name + litellm_params for the model name is encoded as the model id. This is not sufficient to reliably map the target model names to the model ids.
|
|
267
|
+
if target_model_names:
|
|
268
|
+
raise Exception(
|
|
269
|
+
"Filtering by 'target_model_names' is not supported when using managed batches."
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
where_clause: Dict[str, Any] = {"file_purpose": "batch"}
|
|
273
|
+
|
|
274
|
+
# Filter by user who created the batch
|
|
275
|
+
if user_api_key_dict.user_id:
|
|
276
|
+
where_clause["created_by"] = user_api_key_dict.user_id
|
|
277
|
+
|
|
278
|
+
if after:
|
|
279
|
+
where_clause["id"] = {"gt": after}
|
|
280
|
+
|
|
281
|
+
# Fetch more than needed to allow for post-fetch filtering
|
|
282
|
+
fetch_limit = limit or 20
|
|
283
|
+
if target_model_names:
|
|
284
|
+
# Fetch extra to account for filtering
|
|
285
|
+
fetch_limit = max(fetch_limit * 3, 100)
|
|
286
|
+
|
|
287
|
+
batches = await self.prisma_client.db.litellm_managedobjecttable.find_many(
|
|
288
|
+
where=where_clause,
|
|
289
|
+
take=fetch_limit,
|
|
290
|
+
order={"created_at": "desc"},
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
batch_objects: List[LiteLLMBatch] = []
|
|
294
|
+
for batch in batches:
|
|
295
|
+
try:
|
|
296
|
+
# Stop once we have enough after filtering
|
|
297
|
+
if len(batch_objects) >= (limit or 20):
|
|
298
|
+
break
|
|
299
|
+
|
|
300
|
+
batch_data = json.loads(batch.file_object) if isinstance(batch.file_object, str) else batch.file_object
|
|
301
|
+
batch_obj = LiteLLMBatch(**batch_data)
|
|
302
|
+
batch_obj.id = batch.unified_object_id
|
|
303
|
+
batch_objects.append(batch_obj)
|
|
304
|
+
|
|
305
|
+
except Exception as e:
|
|
306
|
+
verbose_logger.warning(
|
|
307
|
+
f"Failed to parse batch object {batch.unified_object_id}: {e}"
|
|
308
|
+
)
|
|
309
|
+
continue
|
|
310
|
+
|
|
311
|
+
return {
|
|
312
|
+
"object": "list",
|
|
313
|
+
"data": batch_objects,
|
|
314
|
+
"first_id": batch_objects[0].id if batch_objects else None,
|
|
315
|
+
"last_id": batch_objects[-1].id if batch_objects else None,
|
|
316
|
+
"has_more": len(batch_objects) == (limit or 20),
|
|
317
|
+
}
|
|
318
|
+
|
|
247
319
|
async def get_user_created_file_ids(
|
|
248
320
|
self, user_api_key_dict: UserAPIKeyAuth, model_object_ids: List[str]
|
|
249
321
|
) -> List[OpenAIFileObject]:
|
|
@@ -297,6 +369,8 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
297
369
|
if (
|
|
298
370
|
call_type == CallTypes.afile_content.value
|
|
299
371
|
or call_type == CallTypes.afile_delete.value
|
|
372
|
+
or call_type == CallTypes.afile_retrieve.value
|
|
373
|
+
or call_type == CallTypes.afile_content.value
|
|
300
374
|
):
|
|
301
375
|
await self.check_managed_file_id_access(data, user_api_key_dict)
|
|
302
376
|
|
|
@@ -361,12 +435,16 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
361
435
|
data["model_file_id_mapping"] = model_file_id_mapping
|
|
362
436
|
elif (
|
|
363
437
|
call_type == CallTypes.aretrieve_batch.value
|
|
438
|
+
or call_type == CallTypes.acancel_batch.value
|
|
364
439
|
or call_type == CallTypes.acancel_fine_tuning_job.value
|
|
365
440
|
or call_type == CallTypes.aretrieve_fine_tuning_job.value
|
|
366
441
|
):
|
|
367
442
|
accessor_key: Optional[str] = None
|
|
368
443
|
retrieve_object_id: Optional[str] = None
|
|
369
|
-
if
|
|
444
|
+
if (
|
|
445
|
+
call_type == CallTypes.aretrieve_batch.value
|
|
446
|
+
or call_type == CallTypes.acancel_batch.value
|
|
447
|
+
):
|
|
370
448
|
accessor_key = "batch_id"
|
|
371
449
|
elif (
|
|
372
450
|
call_type == CallTypes.acancel_fine_tuning_job.value
|
|
@@ -382,6 +460,8 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
382
460
|
if retrieve_object_id
|
|
383
461
|
else False
|
|
384
462
|
)
|
|
463
|
+
print(f"🔥potential_llm_object_id: {potential_llm_object_id}")
|
|
464
|
+
print(f"🔥retrieve_object_id: {retrieve_object_id}")
|
|
385
465
|
if potential_llm_object_id and retrieve_object_id:
|
|
386
466
|
## VALIDATE USER HAS ACCESS TO THE OBJECT ##
|
|
387
467
|
if not await self.can_user_call_unified_object_id(
|
|
@@ -673,6 +753,7 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
673
753
|
bytes=file_objects[0].bytes,
|
|
674
754
|
filename=file_objects[0].filename,
|
|
675
755
|
status="uploaded",
|
|
756
|
+
expires_at=file_objects[0].expires_at,
|
|
676
757
|
)
|
|
677
758
|
|
|
678
759
|
return response
|
|
@@ -837,15 +918,36 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
837
918
|
return response
|
|
838
919
|
|
|
839
920
|
async def afile_retrieve(
|
|
840
|
-
self, file_id: str, litellm_parent_otel_span: Optional[Span]
|
|
921
|
+
self, file_id: str, litellm_parent_otel_span: Optional[Span], llm_router=None
|
|
841
922
|
) -> OpenAIFileObject:
|
|
842
923
|
stored_file_object = await self.get_unified_file_id(
|
|
843
924
|
file_id, litellm_parent_otel_span
|
|
844
925
|
)
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
926
|
+
|
|
927
|
+
# Case 1 : This is not a managed file
|
|
928
|
+
if not stored_file_object:
|
|
848
929
|
raise Exception(f"LiteLLM Managed File object with id={file_id} not found")
|
|
930
|
+
|
|
931
|
+
# Case 2: Managed file and the file object exists in the database
|
|
932
|
+
if stored_file_object and stored_file_object.file_object:
|
|
933
|
+
return stored_file_object.file_object
|
|
934
|
+
|
|
935
|
+
# Case 3: Managed file exists in the database but not the file object (for. e.g the batch task might not have run)
|
|
936
|
+
# So we fetch the file object from the provider. We deliberately do not store the result to avoid interfering with batch cost tracking code.
|
|
937
|
+
if not llm_router:
|
|
938
|
+
raise Exception(
|
|
939
|
+
f"LiteLLM Managed File object with id={file_id} has no file_object "
|
|
940
|
+
f"and llm_router is required to fetch from provider"
|
|
941
|
+
)
|
|
942
|
+
|
|
943
|
+
try:
|
|
944
|
+
model_id, model_file_id = next(iter(stored_file_object.model_mappings.items()))
|
|
945
|
+
credentials = llm_router.get_deployment_credentials_with_provider(model_id) or {}
|
|
946
|
+
response = await litellm.afile_retrieve(file_id=model_file_id, **credentials)
|
|
947
|
+
response.id = file_id # Replace with unified ID
|
|
948
|
+
return response
|
|
949
|
+
except Exception as e:
|
|
950
|
+
raise Exception(f"Failed to retrieve file {file_id} from provider: {str(e)}") from e
|
|
849
951
|
|
|
850
952
|
async def afile_list(
|
|
851
953
|
self,
|
|
@@ -869,10 +971,13 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
869
971
|
[file_id], litellm_parent_otel_span
|
|
870
972
|
)
|
|
871
973
|
|
|
974
|
+
delete_response = None
|
|
872
975
|
specific_model_file_id_mapping = model_file_id_mapping.get(file_id)
|
|
873
976
|
if specific_model_file_id_mapping:
|
|
977
|
+
# Remove conflicting keys from data to avoid duplicate keyword arguments
|
|
978
|
+
filtered_data = {k: v for k, v in data.items() if k not in ("model", "file_id")}
|
|
874
979
|
for model_id, model_file_id in specific_model_file_id_mapping.items():
|
|
875
|
-
await llm_router.afile_delete(model=model_id, file_id=model_file_id, **
|
|
980
|
+
delete_response = await llm_router.afile_delete(model=model_id, file_id=model_file_id, **filtered_data) # type: ignore
|
|
876
981
|
|
|
877
982
|
stored_file_object = await self.delete_unified_file_id(
|
|
878
983
|
file_id, litellm_parent_otel_span
|
|
@@ -880,6 +985,9 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
880
985
|
|
|
881
986
|
if stored_file_object:
|
|
882
987
|
return stored_file_object
|
|
988
|
+
elif delete_response:
|
|
989
|
+
delete_response.id = file_id
|
|
990
|
+
return delete_response
|
|
883
991
|
else:
|
|
884
992
|
raise Exception(f"LiteLLM Managed File object with id={file_id} not found")
|
|
885
993
|
|
|
@@ -946,7 +1054,9 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger, BaseFileEndpoints):
|
|
|
946
1054
|
|
|
947
1055
|
# File is stored in a storage backend, download and convert to base64
|
|
948
1056
|
try:
|
|
949
|
-
from litellm.llms.base_llm.files.storage_backend_factory import
|
|
1057
|
+
from litellm.llms.base_llm.files.storage_backend_factory import (
|
|
1058
|
+
get_storage_backend,
|
|
1059
|
+
)
|
|
950
1060
|
|
|
951
1061
|
storage_backend_name = db_file.storage_backend
|
|
952
1062
|
storage_url = db_file.storage_url
|
|
@@ -282,6 +282,8 @@ async def get_vector_store_info(
|
|
|
282
282
|
updated_at=vector_store.get("updated_at") or None,
|
|
283
283
|
litellm_credential_name=vector_store.get("litellm_credential_name"),
|
|
284
284
|
litellm_params=vector_store.get("litellm_params") or None,
|
|
285
|
+
team_id=vector_store.get("team_id"),
|
|
286
|
+
user_id=vector_store.get("user_id"),
|
|
285
287
|
)
|
|
286
288
|
return {"vector_store": vector_store_pydantic_obj}
|
|
287
289
|
|
|
@@ -99,32 +99,35 @@ litellm_enterprise/enterprise_callbacks/secrets_plugins/typeform_api_token.py,sh
|
|
|
99
99
|
litellm_enterprise/enterprise_callbacks/secrets_plugins/vault.py,sha256=fqtHTQTC6QaNMIZpuvntBnCSAgAhY2Ka-XOz4ZLafGk,653
|
|
100
100
|
litellm_enterprise/enterprise_callbacks/secrets_plugins/yandex.py,sha256=BVtFVzCTtpAkRJVudeZIEBBz1W8wueDzpu6TBvxngxo,1183
|
|
101
101
|
litellm_enterprise/enterprise_callbacks/secrets_plugins/zendesk_secret_key.py,sha256=3E21lWz12WUAmdnKDZH8znfTp6hRJbE3yImtfEP52qE,613
|
|
102
|
-
litellm_enterprise/enterprise_callbacks/send_emails/base_email.py,sha256=
|
|
102
|
+
litellm_enterprise/enterprise_callbacks/send_emails/base_email.py,sha256=O_xNeGIQckN0wEhyC3jf-LyNKiI2YaQnqLMUxAljUiU,26121
|
|
103
103
|
litellm_enterprise/enterprise_callbacks/send_emails/endpoints.py,sha256=hOEpM_q8MJAXlKMOtC9KbgvDVr_YFtF3reu9bjXkpsI,7017
|
|
104
104
|
litellm_enterprise/enterprise_callbacks/send_emails/resend_email.py,sha256=KxNfvONZxSWbNg0HmWwfC0rvHzpN7MBJXAPKGLcy_tU,1541
|
|
105
105
|
litellm_enterprise/enterprise_callbacks/send_emails/sendgrid_email.py,sha256=4bvSOfV-WzCGIJX2V32Ug91I8GBQAmypDDp40qsZbQU,2318
|
|
106
106
|
litellm_enterprise/enterprise_callbacks/send_emails/smtp_email.py,sha256=CGXmT-7EwftreMQXqUL7OC-XSh0cOje4s16Ptt9wBxc,1245
|
|
107
107
|
litellm_enterprise/integrations/custom_guardrail.py,sha256=ZLVpqUZq9bR0vEFqVrlTJk0bYCZuFsXlw9XsdyK9t2E,1555
|
|
108
108
|
litellm_enterprise/litellm_core_utils/litellm_logging.py,sha256=BKkQLPqebFbN-KeCbipGIPgdxHEfQkczImdhhzxKoFg,868
|
|
109
|
+
litellm_enterprise/proxy/__init__.py,sha256=mnCBJ6Y98ai1RfVpYpCxavTPSTmdZ0e-kxX1qtaGIeo,50
|
|
109
110
|
litellm_enterprise/proxy/audit_logging_endpoints.py,sha256=BnHczmi4bnW1GpMNsq4CvnbwL3rgQ-pnrtFd5WBbbHY,5304
|
|
110
111
|
litellm_enterprise/proxy/auth/__init__.py,sha256=wTXtbDcLrD_qecxJfEJtraeCvGfldDgLz8qdVggLoSI,301
|
|
111
112
|
litellm_enterprise/proxy/auth/custom_sso_handler.py,sha256=ITML9dRKL-LuJhU3WKKVPDp0ECfYxvxTvuX8GpSM0gE,3439
|
|
112
|
-
litellm_enterprise/proxy/auth/route_checks.py,sha256=
|
|
113
|
+
litellm_enterprise/proxy/auth/route_checks.py,sha256=_0b-B6Lg5Ni2oQLCYrRdQdTi5dyX4yxoIsKy8P6dD34,2488
|
|
113
114
|
litellm_enterprise/proxy/auth/user_api_key_auth.py,sha256=7t5Q-JoKFyoymylaOT8KWAAOFVz0JOTl7PPOmTkpj5c,1144
|
|
115
|
+
litellm_enterprise/proxy/common_utils/__init__.py,sha256=zmdmvktxSsUOpGp1TxwJeV8zyLPt4938NXtato4YqLI,56
|
|
114
116
|
litellm_enterprise/proxy/common_utils/check_batch_cost.py,sha256=V0CCHtN-JV-_d-ydXV-cVs3zCImt1699JnICGF3oPOk,7360
|
|
117
|
+
litellm_enterprise/proxy/common_utils/check_responses_cost.py,sha256=EKZ78mzpYjxmOtSXeoxfXRZX88MJtaoMyx6NQEPC8AU,4301
|
|
115
118
|
litellm_enterprise/proxy/enterprise_routes.py,sha256=ToJVSSNaYUotzgIg-kWsfsqh2E0GnQirOPkpE4YkHNg,907
|
|
116
|
-
litellm_enterprise/proxy/hooks/managed_files.py,sha256=
|
|
119
|
+
litellm_enterprise/proxy/hooks/managed_files.py,sha256=NFgzBXMsU-0QrT91CCZ79y1d-FTTZVB8wUasoJUC8NE,47664
|
|
117
120
|
litellm_enterprise/proxy/management_endpoints/__init__.py,sha256=zfaqryxzmFu6se-w4yR2nlHKxDOOtHAWEehA2xFbFNg,270
|
|
118
121
|
litellm_enterprise/proxy/management_endpoints/internal_user_endpoints.py,sha256=GEoOVujrtKXDHfko2KQaLn-ms64zkutFE9PP5IhBBLM,2175
|
|
119
122
|
litellm_enterprise/proxy/management_endpoints/key_management_endpoints.py,sha256=-IXRzVrNQ3_krL-gxngelYQftwyPlB_HmgI3RN-HdvM,1147
|
|
120
123
|
litellm_enterprise/proxy/proxy_server.py,sha256=fzOeTyiyevLWi2767-2W1Co7reR-0wnoUIhOgVlJFQc,1183
|
|
121
124
|
litellm_enterprise/proxy/readme.md,sha256=ZcigMJYSHWs4SWnYriWjrSVDJKsu44c2HsbYbma0EHU,397
|
|
122
125
|
litellm_enterprise/proxy/utils.py,sha256=y4ADfhlEG_mH0x5rfIg7D9FjS586lVgQ9DL0tTdgrMQ,962
|
|
123
|
-
litellm_enterprise/proxy/vector_stores/endpoints.py,sha256=
|
|
126
|
+
litellm_enterprise/proxy/vector_stores/endpoints.py,sha256=3F5A9kdttA72PGzVP3aR2d1kkDkdGvc5O196k2iOmRc,13058
|
|
124
127
|
litellm_enterprise/types/enterprise_callbacks/send_emails.py,sha256=AouBXqb1EB1-Mg3fM_3UjUDihIA45zIjRgA6M4vQ7Zw,2150
|
|
125
128
|
litellm_enterprise/types/proxy/audit_logging_endpoints.py,sha256=oSJVAuRD9r6ZjRCqNBFM-J5HSgOltsXts400b2aynRE,894
|
|
126
129
|
litellm_enterprise/types/proxy/proxy_server.py,sha256=kdhtxsU2uok6-XO_ebugCv7PzYYmGgv4vh-XemHJnpM,146
|
|
127
|
-
litellm_enterprise-0.1.
|
|
128
|
-
litellm_enterprise-0.1.
|
|
129
|
-
litellm_enterprise-0.1.
|
|
130
|
-
litellm_enterprise-0.1.
|
|
130
|
+
litellm_enterprise-0.1.29.dist-info/METADATA,sha256=cXR-b-_hiodo6to929eOoe7zPfqy-fpoKbAaU0hT578,1441
|
|
131
|
+
litellm_enterprise-0.1.29.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
|
|
132
|
+
litellm_enterprise-0.1.29.dist-info/licenses/LICENSE.md,sha256=nq3D9ZqOvRDT6hLkypQFTc3XsE15kbkg5rkkLJVSqKY,2251
|
|
133
|
+
litellm_enterprise-0.1.29.dist-info/RECORD,,
|
{litellm_enterprise-0.1.27.dist-info → litellm_enterprise-0.1.29.dist-info}/licenses/LICENSE.md
RENAMED
|
File without changes
|