blaxel 0.2.37__py3-none-any.whl → 0.2.38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- blaxel/__init__.py +2 -2
- blaxel/core/tools/__init__.py +4 -0
- blaxel/core/volume/volume.py +4 -0
- blaxel/crewai/model.py +81 -44
- blaxel/crewai/tools.py +85 -2
- blaxel/googleadk/model.py +22 -3
- blaxel/googleadk/tools.py +25 -6
- blaxel/langgraph/custom/gemini.py +19 -12
- blaxel/langgraph/model.py +26 -18
- blaxel/langgraph/tools.py +6 -11
- blaxel/livekit/model.py +7 -2
- blaxel/livekit/tools.py +3 -1
- blaxel/llamaindex/model.py +145 -84
- blaxel/llamaindex/tools.py +6 -4
- blaxel/openai/model.py +7 -1
- blaxel/openai/tools.py +13 -3
- blaxel/pydantic/model.py +38 -24
- blaxel/pydantic/tools.py +37 -4
- {blaxel-0.2.37.dist-info → blaxel-0.2.38.dist-info}/METADATA +5 -46
- {blaxel-0.2.37.dist-info → blaxel-0.2.38.dist-info}/RECORD +22 -22
- {blaxel-0.2.37.dist-info → blaxel-0.2.38.dist-info}/WHEEL +0 -0
- {blaxel-0.2.37.dist-info → blaxel-0.2.38.dist-info}/licenses/LICENSE +0 -0
blaxel/__init__.py
CHANGED
|
@@ -4,8 +4,8 @@ from .core.common.autoload import autoload
|
|
|
4
4
|
from .core.common.env import env
|
|
5
5
|
from .core.common.settings import settings
|
|
6
6
|
|
|
7
|
-
__version__ = "0.2.
|
|
8
|
-
__commit__ = "
|
|
7
|
+
__version__ = "0.2.38"
|
|
8
|
+
__commit__ = "a95d23798044e6e88b9247801edf94612972b31c"
|
|
9
9
|
__sentry_dsn__ = "https://9711de13cd02b285ca4378c01de8dc30@o4508714045276160.ingest.us.sentry.io/4510461121462272"
|
|
10
10
|
__all__ = ["autoload", "settings", "env"]
|
|
11
11
|
|
blaxel/core/tools/__init__.py
CHANGED
|
@@ -254,6 +254,10 @@ class PersistentMcpClient:
|
|
|
254
254
|
await self.client_exit_stack.aclose()
|
|
255
255
|
except Exception as e:
|
|
256
256
|
logger.debug(f"Error closing client exit stack: {e}")
|
|
257
|
+
# Create fresh exit stacks so that future initialize() calls
|
|
258
|
+
# don't reuse stacks tainted by old cancel scopes
|
|
259
|
+
self.session_exit_stack = AsyncExitStack()
|
|
260
|
+
self.client_exit_stack = AsyncExitStack()
|
|
257
261
|
logger.debug("WebSocket connection closed due to inactivity.")
|
|
258
262
|
|
|
259
263
|
|
blaxel/core/volume/volume.py
CHANGED
|
@@ -137,6 +137,8 @@ class VolumeCreateConfiguration:
|
|
|
137
137
|
|
|
138
138
|
|
|
139
139
|
class VolumeInstance:
|
|
140
|
+
delete: "_AsyncDeleteDescriptor"
|
|
141
|
+
|
|
140
142
|
def __init__(self, volume: Volume):
|
|
141
143
|
self.volume = volume
|
|
142
144
|
|
|
@@ -275,6 +277,8 @@ class VolumeInstance:
|
|
|
275
277
|
|
|
276
278
|
|
|
277
279
|
class SyncVolumeInstance:
|
|
280
|
+
delete: "_SyncDeleteDescriptor"
|
|
281
|
+
|
|
278
282
|
"""Synchronous volume instance for managing persistent storage."""
|
|
279
283
|
|
|
280
284
|
def __init__(self, volume: Volume):
|
blaxel/crewai/model.py
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
from logging import getLogger
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
import httpx
|
|
4
|
+
from crewai import LLM # type: ignore[import-not-found]
|
|
5
|
+
from crewai.llms.hooks.base import BaseInterceptor # type: ignore[import-not-found]
|
|
4
6
|
|
|
5
7
|
from blaxel.core import bl_model as bl_model_core
|
|
6
8
|
from blaxel.core import settings
|
|
@@ -8,62 +10,97 @@ from blaxel.core import settings
|
|
|
8
10
|
logger = getLogger(__name__)
|
|
9
11
|
|
|
10
12
|
|
|
11
|
-
class
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
13
|
+
class AuthInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
|
14
|
+
"""Interceptor that injects dynamic auth headers into every HTTP request.
|
|
15
|
+
|
|
16
|
+
Used for crewai native providers (OpenAI, Anthropic, Gemini, etc.)
|
|
17
|
+
where the LLM.__new__ factory returns a provider-specific instance
|
|
18
|
+
and subclass overrides are not possible.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
|
22
|
+
auth_headers = settings.auth.get_headers()
|
|
23
|
+
# Remove the SDK's default "Authorization: Bearer replaced" header
|
|
24
|
+
# when our auth uses a different header (e.g. X-Blaxel-Authorization with API keys)
|
|
25
|
+
if "Authorization" not in auth_headers:
|
|
26
|
+
message.headers.pop("Authorization", None)
|
|
27
|
+
message.headers.pop("authorization", None)
|
|
28
|
+
for key, value in auth_headers.items():
|
|
29
|
+
message.headers[key] = value
|
|
30
|
+
return message
|
|
31
|
+
|
|
32
|
+
def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
|
33
|
+
return message
|
|
34
|
+
|
|
35
|
+
async def aon_outbound(self, message: httpx.Request) -> httpx.Request:
|
|
36
|
+
return self.on_outbound(message)
|
|
37
|
+
|
|
38
|
+
async def aon_inbound(self, message: httpx.Response) -> httpx.Response:
|
|
39
|
+
return message
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
# Provider types that crewai routes to native SDK implementations.
|
|
43
|
+
# These support the interceptor mechanism for auth.
|
|
44
|
+
_NATIVE_PROVIDER_PREFIXES = {"openai", "anthropic", "gemini", "azure", "bedrock"}
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _is_native_route(provider_prefix: str) -> bool:
|
|
48
|
+
"""Check if a provider prefix will be routed to a native SDK by crewai."""
|
|
49
|
+
return provider_prefix.lower() in _NATIVE_PROVIDER_PREFIXES
|
|
15
50
|
|
|
16
51
|
|
|
17
52
|
async def bl_model(name: str, **kwargs):
|
|
18
53
|
url, type, model = await bl_model_core(name).get_parameters()
|
|
54
|
+
|
|
55
|
+
# Map blaxel model types to crewai provider prefixes and base URLs
|
|
19
56
|
if type == "mistral":
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
api_key="replaced",
|
|
23
|
-
base_url=f"{url}/v1",
|
|
24
|
-
**kwargs,
|
|
25
|
-
)
|
|
57
|
+
provider_prefix = "mistral"
|
|
58
|
+
base_url = f"{url}/v1"
|
|
26
59
|
elif type == "xai":
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
api_key="replaced",
|
|
30
|
-
base_url=f"{url}/v1",
|
|
31
|
-
**kwargs,
|
|
32
|
-
)
|
|
60
|
+
provider_prefix = "groq"
|
|
61
|
+
base_url = f"{url}/v1"
|
|
33
62
|
elif type == "deepseek":
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
api_key="replaced",
|
|
37
|
-
base_url=f"{url}/v1",
|
|
38
|
-
**kwargs,
|
|
39
|
-
)
|
|
63
|
+
provider_prefix = "openai"
|
|
64
|
+
base_url = f"{url}/v1"
|
|
40
65
|
elif type == "anthropic":
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
api_key="replaced",
|
|
44
|
-
base_url=url,
|
|
45
|
-
**kwargs,
|
|
46
|
-
)
|
|
66
|
+
provider_prefix = "anthropic"
|
|
67
|
+
base_url = url
|
|
47
68
|
elif type == "gemini":
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
api_key="replaced",
|
|
51
|
-
base_url=f"{url}/v1beta/models/{model}",
|
|
52
|
-
**kwargs,
|
|
53
|
-
)
|
|
69
|
+
provider_prefix = "gemini"
|
|
70
|
+
base_url = f"{url}/v1beta/models/{model}"
|
|
54
71
|
elif type == "cerebras":
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
api_key="replaced",
|
|
58
|
-
base_url=f"{url}/v1",
|
|
59
|
-
**kwargs,
|
|
60
|
-
)
|
|
72
|
+
provider_prefix = "cerebras"
|
|
73
|
+
base_url = f"{url}/v1"
|
|
61
74
|
else:
|
|
62
75
|
if type != "openai":
|
|
63
76
|
logger.warning(f"Model {model} is not supported by CrewAI, defaulting to OpenAI")
|
|
64
|
-
|
|
65
|
-
|
|
77
|
+
provider_prefix = "openai"
|
|
78
|
+
base_url = f"{url}/v1"
|
|
79
|
+
|
|
80
|
+
model_string = f"{provider_prefix}/{model}"
|
|
81
|
+
auth_headers = settings.auth.get_headers()
|
|
82
|
+
|
|
83
|
+
if _is_native_route(provider_prefix):
|
|
84
|
+
# Native providers: use interceptor for dynamic auth headers.
|
|
85
|
+
# Always pass api_key="replaced" because crewai's native providers
|
|
86
|
+
# require a non-None api_key. The AuthInterceptor handles stripping
|
|
87
|
+
# the dummy Authorization header and injecting the real auth.
|
|
88
|
+
return LLM(
|
|
89
|
+
model=model_string,
|
|
66
90
|
api_key="replaced",
|
|
67
|
-
base_url=
|
|
91
|
+
base_url=base_url,
|
|
92
|
+
interceptor=AuthInterceptor(),
|
|
93
|
+
**kwargs,
|
|
94
|
+
)
|
|
95
|
+
else:
|
|
96
|
+
# LiteLLM fallback: pass auth headers via extra_headers param.
|
|
97
|
+
# Omit api_key when auth uses X-Blaxel-Authorization to prevent
|
|
98
|
+
# litellm from adding "Authorization: Bearer replaced".
|
|
99
|
+
llm_api_key = "replaced" if "Authorization" in auth_headers else None
|
|
100
|
+
return LLM(
|
|
101
|
+
model=model_string,
|
|
102
|
+
api_key=llm_api_key,
|
|
103
|
+
base_url=base_url,
|
|
104
|
+
extra_headers=auth_headers,
|
|
68
105
|
**kwargs,
|
|
69
106
|
)
|
blaxel/crewai/tools.py
CHANGED
|
@@ -1,10 +1,86 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
1
3
|
from crewai.tools import BaseTool
|
|
4
|
+
from pydantic import BaseModel
|
|
2
5
|
|
|
3
6
|
from blaxel.core.tools import bl_tools as bl_tools_core
|
|
4
|
-
from blaxel.core.tools.common import create_model_from_json_schema
|
|
5
7
|
from blaxel.core.tools.types import Tool
|
|
6
8
|
|
|
7
9
|
|
|
10
|
+
def _clean_schema_for_openai(schema: dict) -> dict:
|
|
11
|
+
"""Clean JSON schema to be compatible with OpenAI strict mode.
|
|
12
|
+
|
|
13
|
+
Recursively resolves anyOf patterns, ensures all schemas have type keys,
|
|
14
|
+
removes additionalProperties and $schema, and ensures object types have
|
|
15
|
+
properties and required fields.
|
|
16
|
+
"""
|
|
17
|
+
if not isinstance(schema, dict):
|
|
18
|
+
return schema
|
|
19
|
+
|
|
20
|
+
cleaned = schema.copy()
|
|
21
|
+
|
|
22
|
+
# Remove unsupported keys
|
|
23
|
+
cleaned.pop("$schema", None)
|
|
24
|
+
cleaned.pop("additionalProperties", None)
|
|
25
|
+
|
|
26
|
+
# Resolve anyOf: pick the non-null type
|
|
27
|
+
if "anyOf" in cleaned:
|
|
28
|
+
any_of = cleaned.pop("anyOf")
|
|
29
|
+
non_null = [s for s in any_of if s.get("type") != "null"]
|
|
30
|
+
if non_null:
|
|
31
|
+
# Merge the first non-null variant into current schema
|
|
32
|
+
resolved = _clean_schema_for_openai(non_null[0])
|
|
33
|
+
cleaned.update(resolved)
|
|
34
|
+
else:
|
|
35
|
+
cleaned["type"] = "string"
|
|
36
|
+
|
|
37
|
+
# Ensure type exists
|
|
38
|
+
if "type" not in cleaned and "properties" in cleaned:
|
|
39
|
+
cleaned["type"] = "object"
|
|
40
|
+
|
|
41
|
+
# Handle object types
|
|
42
|
+
if cleaned.get("type") == "object":
|
|
43
|
+
if "properties" not in cleaned:
|
|
44
|
+
cleaned["properties"] = {}
|
|
45
|
+
if "required" not in cleaned:
|
|
46
|
+
cleaned["required"] = list(cleaned["properties"].keys())
|
|
47
|
+
|
|
48
|
+
# Recursively clean properties
|
|
49
|
+
if "properties" in cleaned:
|
|
50
|
+
cleaned["properties"] = {
|
|
51
|
+
k: _clean_schema_for_openai(v) for k, v in cleaned["properties"].items()
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
# Recursively clean array items
|
|
55
|
+
if "items" in cleaned:
|
|
56
|
+
cleaned["items"] = _clean_schema_for_openai(cleaned["items"])
|
|
57
|
+
# Ensure items has a type
|
|
58
|
+
if "type" not in cleaned["items"]:
|
|
59
|
+
cleaned["items"]["type"] = "string"
|
|
60
|
+
|
|
61
|
+
return cleaned
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _make_clean_args_schema(tool: Tool) -> type[BaseModel]:
|
|
65
|
+
"""Create a Pydantic model whose JSON schema returns the pre-cleaned schema.
|
|
66
|
+
|
|
67
|
+
CrewAI calls model_json_schema() on args_schema to build the OpenAI tool
|
|
68
|
+
parameters. By overriding model_json_schema we ensure the cleaned schema
|
|
69
|
+
is used directly, avoiding issues with Pydantic re-introducing anyOf or
|
|
70
|
+
dropping type keys on array items.
|
|
71
|
+
"""
|
|
72
|
+
clean = _clean_schema_for_openai(tool.input_schema)
|
|
73
|
+
|
|
74
|
+
class CleanArgsSchema(BaseModel):
|
|
75
|
+
@classmethod
|
|
76
|
+
def model_json_schema(cls, *args: Any, **kwargs: Any) -> dict[str, Any]:
|
|
77
|
+
return clean
|
|
78
|
+
|
|
79
|
+
CleanArgsSchema.__name__ = f"{tool.name}Schema"
|
|
80
|
+
CleanArgsSchema.__qualname__ = f"{tool.name}Schema"
|
|
81
|
+
return CleanArgsSchema
|
|
82
|
+
|
|
83
|
+
|
|
8
84
|
class CrewAITool(BaseTool):
|
|
9
85
|
_tool: Tool
|
|
10
86
|
|
|
@@ -12,13 +88,20 @@ class CrewAITool(BaseTool):
|
|
|
12
88
|
super().__init__(
|
|
13
89
|
name=tool.name,
|
|
14
90
|
description=tool.description,
|
|
15
|
-
args_schema=
|
|
91
|
+
args_schema=_make_clean_args_schema(tool),
|
|
16
92
|
)
|
|
17
93
|
self._tool = tool
|
|
18
94
|
|
|
19
95
|
def _run(self, *args, **kwargs):
|
|
96
|
+
if not self._tool.sync_coroutine:
|
|
97
|
+
raise ValueError(f"Tool {self._tool.name} does not have a sync_coroutine defined")
|
|
20
98
|
return self._tool.sync_coroutine(**kwargs)
|
|
21
99
|
|
|
100
|
+
async def _arun(self, *args, **kwargs):
|
|
101
|
+
if not self._tool.coroutine:
|
|
102
|
+
raise ValueError(f"Tool {self._tool.name} does not have a coroutine defined")
|
|
103
|
+
return await self._tool.coroutine(**kwargs)
|
|
104
|
+
|
|
22
105
|
|
|
23
106
|
async def bl_tools(tools_names: list[str], **kwargs) -> list[BaseTool]:
|
|
24
107
|
tools = bl_tools_core(tools_names, **kwargs)
|
blaxel/googleadk/model.py
CHANGED
|
@@ -1,6 +1,9 @@
|
|
|
1
1
|
from logging import getLogger
|
|
2
2
|
|
|
3
|
-
from google.adk.models.lite_llm import
|
|
3
|
+
from google.adk.models.lite_llm import ( # type: ignore[import-not-found]
|
|
4
|
+
LiteLlm,
|
|
5
|
+
LiteLLMClient,
|
|
6
|
+
)
|
|
4
7
|
|
|
5
8
|
from blaxel.core import bl_model as bl_model_core
|
|
6
9
|
from blaxel.core import settings
|
|
@@ -23,7 +26,15 @@ class AuthenticatedLiteLLMClient(LiteLLMClient):
|
|
|
23
26
|
Returns:
|
|
24
27
|
The model response as a message.
|
|
25
28
|
"""
|
|
26
|
-
|
|
29
|
+
auth_headers = settings.auth.get_headers()
|
|
30
|
+
extra = dict(auth_headers)
|
|
31
|
+
# When auth uses X-Blaxel-Authorization (API keys), override the
|
|
32
|
+
# Authorization header that litellm sets from api_key or OPENAI_API_KEY
|
|
33
|
+
# env var. Without this, the server sees an invalid Authorization header
|
|
34
|
+
# and rejects the request.
|
|
35
|
+
if "Authorization" not in auth_headers:
|
|
36
|
+
extra["Authorization"] = ""
|
|
37
|
+
kwargs["extra_headers"] = extra
|
|
27
38
|
return await super().acompletion(
|
|
28
39
|
model=model,
|
|
29
40
|
messages=messages,
|
|
@@ -44,7 +55,15 @@ class AuthenticatedLiteLLMClient(LiteLLMClient):
|
|
|
44
55
|
Returns:
|
|
45
56
|
The response from the model.
|
|
46
57
|
"""
|
|
47
|
-
|
|
58
|
+
auth_headers = settings.auth.get_headers()
|
|
59
|
+
extra = dict(auth_headers)
|
|
60
|
+
# When auth uses X-Blaxel-Authorization (API keys), override the
|
|
61
|
+
# Authorization header that litellm sets from api_key or OPENAI_API_KEY
|
|
62
|
+
# env var. Without this, the server sees an invalid Authorization header
|
|
63
|
+
# and rejects the request.
|
|
64
|
+
if "Authorization" not in auth_headers:
|
|
65
|
+
extra["Authorization"] = ""
|
|
66
|
+
kwargs["extra_headers"] = extra
|
|
48
67
|
return super().completion(
|
|
49
68
|
model=model,
|
|
50
69
|
messages=messages,
|
blaxel/googleadk/tools.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import inspect
|
|
2
|
-
from typing import Any
|
|
2
|
+
from typing import Any
|
|
3
3
|
|
|
4
|
-
from google.adk.tools import BaseTool, ToolContext
|
|
5
|
-
from google.genai import types
|
|
4
|
+
from google.adk.tools import BaseTool, ToolContext # type: ignore[import-not-found]
|
|
5
|
+
from google.genai import types # type: ignore[import-not-found]
|
|
6
6
|
|
|
7
7
|
from blaxel.core.tools import bl_tools as bl_tools_core
|
|
8
8
|
from blaxel.core.tools.types import Tool
|
|
@@ -31,15 +31,33 @@ class GoogleADKTool(BaseTool):
|
|
|
31
31
|
if "additionalProperties" in cleaned_schema:
|
|
32
32
|
del cleaned_schema["additionalProperties"]
|
|
33
33
|
|
|
34
|
+
# Google genai Schema expects type as a single enum string (e.g. "STRING"),
|
|
35
|
+
# not a JSON Schema union list like ["null", "string"].
|
|
36
|
+
if "type" in cleaned_schema and isinstance(cleaned_schema["type"], list):
|
|
37
|
+
type_list = [t for t in cleaned_schema["type"] if t != "null"]
|
|
38
|
+
cleaned_schema["type"] = type_list[0].upper() if type_list else "STRING"
|
|
39
|
+
# Mark as nullable if "null" was in the original list
|
|
40
|
+
if "null" in schema["type"]:
|
|
41
|
+
cleaned_schema["nullable"] = True
|
|
42
|
+
elif "type" in cleaned_schema and isinstance(cleaned_schema["type"], str):
|
|
43
|
+
cleaned_schema["type"] = cleaned_schema["type"].upper()
|
|
44
|
+
|
|
45
|
+
# Ensure object types have properties
|
|
46
|
+
if cleaned_schema.get("type") == "OBJECT" and "properties" not in cleaned_schema:
|
|
47
|
+
cleaned_schema["properties"] = {}
|
|
48
|
+
|
|
34
49
|
# Recursively clean properties if they exist
|
|
35
50
|
if "properties" in cleaned_schema:
|
|
36
51
|
cleaned_schema["properties"] = {
|
|
37
52
|
k: self._clean_schema(v) for k, v in cleaned_schema["properties"].items()
|
|
38
53
|
}
|
|
39
54
|
|
|
55
|
+
# Recursively clean items for array types
|
|
56
|
+
if "items" in cleaned_schema and isinstance(cleaned_schema["items"], dict):
|
|
57
|
+
cleaned_schema["items"] = self._clean_schema(cleaned_schema["items"])
|
|
58
|
+
|
|
40
59
|
return cleaned_schema
|
|
41
60
|
|
|
42
|
-
@override
|
|
43
61
|
def _get_declaration(self) -> types.FunctionDeclaration | None:
|
|
44
62
|
# Clean the schema recursively
|
|
45
63
|
schema = self._clean_schema(self._tool.input_schema)
|
|
@@ -48,14 +66,15 @@ class GoogleADKTool(BaseTool):
|
|
|
48
66
|
types.FunctionDeclaration(
|
|
49
67
|
name=self._tool.name,
|
|
50
68
|
description=self._tool.description,
|
|
51
|
-
parameters=schema,
|
|
69
|
+
parameters=types.Schema(**schema),
|
|
52
70
|
)
|
|
53
71
|
)
|
|
54
72
|
|
|
55
73
|
return function_decl
|
|
56
74
|
|
|
57
|
-
@override
|
|
58
75
|
async def run_async(self, *, args: dict[str, Any], tool_context: ToolContext) -> Any:
|
|
76
|
+
if not self._tool.coroutine:
|
|
77
|
+
raise ValueError(f"Tool {self._tool.name} does not have a coroutine defined")
|
|
59
78
|
args_to_call = args.copy()
|
|
60
79
|
signature = inspect.signature(self._tool.coroutine)
|
|
61
80
|
if "tool_context" in signature.parameters:
|
|
@@ -23,16 +23,18 @@ from typing import (
|
|
|
23
23
|
|
|
24
24
|
import httpx
|
|
25
25
|
import requests
|
|
26
|
-
from langchain_core.callbacks.manager import (
|
|
26
|
+
from langchain_core.callbacks.manager import ( # type: ignore[import-not-found]
|
|
27
27
|
AsyncCallbackManagerForLLMRun,
|
|
28
28
|
CallbackManagerForLLMRun,
|
|
29
29
|
)
|
|
30
|
-
from langchain_core.language_models import
|
|
31
|
-
|
|
30
|
+
from langchain_core.language_models import ( # type: ignore[import-not-found]
|
|
31
|
+
LanguageModelInput,
|
|
32
|
+
)
|
|
33
|
+
from langchain_core.language_models.chat_models import ( # type: ignore[import-not-found]
|
|
32
34
|
BaseChatModel,
|
|
33
35
|
LangSmithParams,
|
|
34
36
|
)
|
|
35
|
-
from langchain_core.messages import (
|
|
37
|
+
from langchain_core.messages import ( # type: ignore[import-not-found]
|
|
36
38
|
AIMessage,
|
|
37
39
|
AIMessageChunk,
|
|
38
40
|
BaseMessage,
|
|
@@ -41,25 +43,30 @@ from langchain_core.messages import (
|
|
|
41
43
|
SystemMessage,
|
|
42
44
|
ToolMessage,
|
|
43
45
|
)
|
|
44
|
-
from langchain_core.messages.ai import UsageMetadata
|
|
45
|
-
from langchain_core.messages.tool import (
|
|
46
|
+
from langchain_core.messages.ai import UsageMetadata # type: ignore[import-not-found]
|
|
47
|
+
from langchain_core.messages.tool import ( # type: ignore[import-not-found]
|
|
46
48
|
invalid_tool_call,
|
|
47
49
|
tool_call,
|
|
48
50
|
tool_call_chunk,
|
|
49
51
|
)
|
|
50
|
-
from langchain_core.output_parsers.openai_tools import (
|
|
52
|
+
from langchain_core.output_parsers.openai_tools import ( # type: ignore[import-not-found]
|
|
51
53
|
JsonOutputKeyToolsParser,
|
|
52
54
|
PydanticToolsParser,
|
|
53
55
|
parse_tool_calls,
|
|
54
56
|
)
|
|
55
|
-
from langchain_core.outputs import (
|
|
57
|
+
from langchain_core.outputs import ( # type: ignore[import-not-found]
|
|
56
58
|
ChatGeneration,
|
|
57
59
|
ChatGenerationChunk,
|
|
58
60
|
ChatResult,
|
|
59
61
|
)
|
|
60
|
-
from langchain_core.runnables import
|
|
61
|
-
|
|
62
|
-
|
|
62
|
+
from langchain_core.runnables import ( # type: ignore[import-not-found]
|
|
63
|
+
Runnable,
|
|
64
|
+
RunnablePassthrough,
|
|
65
|
+
)
|
|
66
|
+
from langchain_core.tools import BaseTool # type: ignore[import-not-found]
|
|
67
|
+
from langchain_core.utils.function_calling import ( # type: ignore[import-not-found]
|
|
68
|
+
convert_to_openai_tool,
|
|
69
|
+
)
|
|
63
70
|
from PIL import Image
|
|
64
71
|
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
|
|
65
72
|
from tenacity import (
|
|
@@ -1467,4 +1474,4 @@ def image_bytes_to_b64_string(image_bytes: bytes, image_format: str = "jpeg") ->
|
|
|
1467
1474
|
"""Convert image bytes to base64 string."""
|
|
1468
1475
|
import base64
|
|
1469
1476
|
|
|
1470
|
-
return f"data:image/{image_format};base64,{base64.b64encode(image_bytes).decode('utf-8')}"
|
|
1477
|
+
return f"data:image/{image_format};base64,{base64.b64encode(image_bytes).decode('utf-8')}"
|
blaxel/langgraph/model.py
CHANGED
|
@@ -7,11 +7,15 @@ from blaxel.core import bl_model as bl_model_core
|
|
|
7
7
|
from blaxel.core import settings
|
|
8
8
|
|
|
9
9
|
if TYPE_CHECKING:
|
|
10
|
-
from langchain_core.callbacks import Callbacks
|
|
11
|
-
from langchain_core.language_models import
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
from langchain_core.
|
|
10
|
+
from langchain_core.callbacks import Callbacks # type: ignore[import-not-found]
|
|
11
|
+
from langchain_core.language_models import ( # type: ignore[import-not-found]
|
|
12
|
+
LanguageModelInput,
|
|
13
|
+
)
|
|
14
|
+
from langchain_core.messages import BaseMessage # type: ignore[import-not-found]
|
|
15
|
+
from langchain_core.outputs import LLMResult # type: ignore[import-not-found]
|
|
16
|
+
from langchain_core.runnables import ( # type: ignore[import-not-found]
|
|
17
|
+
RunnableConfig,
|
|
18
|
+
)
|
|
15
19
|
|
|
16
20
|
logger = getLogger(__name__)
|
|
17
21
|
|
|
@@ -32,7 +36,7 @@ class TokenRefreshingWrapper:
|
|
|
32
36
|
kwargs = config.get("kwargs", {})
|
|
33
37
|
|
|
34
38
|
if model_type == "mistral":
|
|
35
|
-
from langchain_openai import ChatOpenAI
|
|
39
|
+
from langchain_openai import ChatOpenAI # type: ignore[import-not-found]
|
|
36
40
|
|
|
37
41
|
return ChatOpenAI(
|
|
38
42
|
api_key=settings.auth.token,
|
|
@@ -41,7 +45,7 @@ class TokenRefreshingWrapper:
|
|
|
41
45
|
**kwargs,
|
|
42
46
|
)
|
|
43
47
|
elif model_type == "cohere":
|
|
44
|
-
from langchain_cohere import ChatCohere
|
|
48
|
+
from langchain_cohere import ChatCohere # type: ignore[import-not-found]
|
|
45
49
|
|
|
46
50
|
return ChatCohere(
|
|
47
51
|
cohere_api_key=settings.auth.token,
|
|
@@ -50,7 +54,7 @@ class TokenRefreshingWrapper:
|
|
|
50
54
|
**kwargs,
|
|
51
55
|
)
|
|
52
56
|
elif model_type == "xai":
|
|
53
|
-
from langchain_xai import ChatXAI
|
|
57
|
+
from langchain_xai import ChatXAI # type: ignore[import-not-found]
|
|
54
58
|
|
|
55
59
|
return ChatXAI(
|
|
56
60
|
model=model,
|
|
@@ -59,7 +63,9 @@ class TokenRefreshingWrapper:
|
|
|
59
63
|
**kwargs,
|
|
60
64
|
)
|
|
61
65
|
elif model_type == "deepseek":
|
|
62
|
-
from langchain_deepseek import
|
|
66
|
+
from langchain_deepseek import ( # type: ignore[import-not-found]
|
|
67
|
+
ChatDeepSeek,
|
|
68
|
+
)
|
|
63
69
|
|
|
64
70
|
return ChatDeepSeek(
|
|
65
71
|
api_key=settings.auth.token,
|
|
@@ -68,7 +74,9 @@ class TokenRefreshingWrapper:
|
|
|
68
74
|
**kwargs,
|
|
69
75
|
)
|
|
70
76
|
elif model_type == "anthropic":
|
|
71
|
-
from langchain_anthropic import
|
|
77
|
+
from langchain_anthropic import ( # type: ignore[import-not-found]
|
|
78
|
+
ChatAnthropic,
|
|
79
|
+
)
|
|
72
80
|
|
|
73
81
|
return ChatAnthropic(
|
|
74
82
|
api_key=settings.auth.token,
|
|
@@ -78,7 +86,9 @@ class TokenRefreshingWrapper:
|
|
|
78
86
|
**kwargs,
|
|
79
87
|
)
|
|
80
88
|
elif model_type == "gemini":
|
|
81
|
-
from .custom.gemini import
|
|
89
|
+
from .custom.gemini import (
|
|
90
|
+
ChatGoogleGenerativeAI, # type: ignore[import-not-found]
|
|
91
|
+
)
|
|
82
92
|
|
|
83
93
|
return ChatGoogleGenerativeAI(
|
|
84
94
|
model=model,
|
|
@@ -88,7 +98,9 @@ class TokenRefreshingWrapper:
|
|
|
88
98
|
**kwargs,
|
|
89
99
|
)
|
|
90
100
|
elif model_type == "cerebras":
|
|
91
|
-
from langchain_cerebras import
|
|
101
|
+
from langchain_cerebras import ( # type: ignore[import-not-found]
|
|
102
|
+
ChatCerebras,
|
|
103
|
+
)
|
|
92
104
|
|
|
93
105
|
return ChatCerebras(
|
|
94
106
|
api_key=settings.auth.token,
|
|
@@ -97,7 +109,7 @@ class TokenRefreshingWrapper:
|
|
|
97
109
|
**kwargs,
|
|
98
110
|
)
|
|
99
111
|
else:
|
|
100
|
-
from langchain_openai import ChatOpenAI
|
|
112
|
+
from langchain_openai import ChatOpenAI # type: ignore[import-not-found]
|
|
101
113
|
|
|
102
114
|
if model_type != "openai":
|
|
103
115
|
logger.warning(f"Model {model} is not supported by Langchain, defaulting to OpenAI")
|
|
@@ -113,10 +125,6 @@ class TokenRefreshingWrapper:
|
|
|
113
125
|
# Only refresh if using ClientCredentials (which has get_token method)
|
|
114
126
|
current_token = settings.auth.token
|
|
115
127
|
|
|
116
|
-
if hasattr(settings.auth, "get_token"):
|
|
117
|
-
# This will trigger token refresh if needed
|
|
118
|
-
settings.auth.get_token()
|
|
119
|
-
|
|
120
128
|
new_token = settings.auth.token
|
|
121
129
|
|
|
122
130
|
# If token changed, recreate the model
|
|
@@ -251,4 +259,4 @@ async def bl_model(name: str, **kwargs):
|
|
|
251
259
|
model_config = {"type": type, "model": model, "url": url, "kwargs": kwargs}
|
|
252
260
|
|
|
253
261
|
# Create and return the wrapper
|
|
254
|
-
return TokenRefreshingChatModel(model_config)
|
|
262
|
+
return TokenRefreshingChatModel(model_config)
|
blaxel/langgraph/tools.py
CHANGED
|
@@ -4,7 +4,7 @@ from blaxel.core.tools import bl_tools as bl_tools_core
|
|
|
4
4
|
from blaxel.core.tools.types import Tool, ToolException
|
|
5
5
|
|
|
6
6
|
if TYPE_CHECKING:
|
|
7
|
-
from langchain_core.tools import StructuredTool
|
|
7
|
+
from langchain_core.tools import StructuredTool # type: ignore[import-not-found]
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
def _clean_schema_for_openai(schema: Dict[str, Any]) -> Dict[str, Any]:
|
|
@@ -37,19 +37,14 @@ def _clean_schema_for_openai(schema: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
37
37
|
|
|
38
38
|
|
|
39
39
|
def get_langchain_tool(tool: Tool) -> "StructuredTool":
|
|
40
|
-
from langchain_core.tools import StructuredTool
|
|
41
|
-
from mcp.types import
|
|
42
|
-
CallToolResult,
|
|
43
|
-
EmbeddedResource,
|
|
44
|
-
ImageContent,
|
|
45
|
-
TextContent,
|
|
46
|
-
)
|
|
47
|
-
|
|
48
|
-
NonTextContent = ImageContent | EmbeddedResource
|
|
40
|
+
from langchain_core.tools import StructuredTool # type: ignore[import-not-found]
|
|
41
|
+
from mcp.types import CallToolResult, EmbeddedResource, ImageContent, TextContent
|
|
49
42
|
|
|
50
43
|
async def langchain_coroutine(
|
|
51
44
|
**arguments: dict[str, Any],
|
|
52
|
-
) -> tuple[str | list[str], list[
|
|
45
|
+
) -> tuple[str | list[str], list[ImageContent | EmbeddedResource] | None]:
|
|
46
|
+
if not tool.coroutine:
|
|
47
|
+
raise ValueError(f"Tool {tool.name} does not have a coroutine defined")
|
|
53
48
|
result: CallToolResult = await tool.coroutine(**arguments)
|
|
54
49
|
text_contents: list[TextContent] = []
|
|
55
50
|
non_text_contents = []
|
blaxel/livekit/model.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
from logging import getLogger
|
|
2
2
|
|
|
3
3
|
import httpx
|
|
4
|
-
from livekit.plugins import openai
|
|
5
|
-
from openai import AsyncOpenAI
|
|
4
|
+
from livekit.plugins import openai # type: ignore[import-not-found]
|
|
5
|
+
from openai import AsyncOpenAI # type: ignore[import-not-found]
|
|
6
6
|
|
|
7
7
|
from blaxel.core import bl_model as bl_model_core
|
|
8
8
|
from blaxel.core import settings
|
|
@@ -20,6 +20,11 @@ class DynamicHeadersHTTPClient(httpx.AsyncClient):
|
|
|
20
20
|
async def send(self, request, *args, **kwargs):
|
|
21
21
|
# Update headers with the latest auth headers before each request
|
|
22
22
|
auth_headers = settings.auth.get_headers()
|
|
23
|
+
# Remove the SDK's default "Authorization: Bearer replaced" header
|
|
24
|
+
# when our auth uses a different header (e.g. X-Blaxel-Authorization with API keys)
|
|
25
|
+
if "Authorization" not in auth_headers:
|
|
26
|
+
request.headers.pop("Authorization", None)
|
|
27
|
+
request.headers.pop("authorization", None)
|
|
23
28
|
for key, value in auth_headers.items():
|
|
24
29
|
request.headers[key] = value
|
|
25
30
|
return await super().send(request, *args, **kwargs)
|
blaxel/livekit/tools.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from livekit.agents import function_tool, llm
|
|
1
|
+
from livekit.agents import function_tool, llm # type: ignore[import-not-found]
|
|
2
2
|
|
|
3
3
|
from blaxel.core.tools import bl_tools as bl_tools_core
|
|
4
4
|
from blaxel.core.tools.types import Tool
|
|
@@ -6,6 +6,8 @@ from blaxel.core.tools.types import Tool
|
|
|
6
6
|
|
|
7
7
|
def livekit_coroutine(tool: Tool):
|
|
8
8
|
async def livekit_coroutine_wrapper(raw_arguments: dict[str, object]):
|
|
9
|
+
if not tool.coroutine:
|
|
10
|
+
raise ValueError(f"Tool {tool.name} does not have a coroutine defined")
|
|
9
11
|
result = await tool.coroutine(**raw_arguments)
|
|
10
12
|
return result.model_dump_json()
|
|
11
13
|
|