blaxel 0.2.35__py3-none-any.whl → 0.2.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. blaxel/__init__.py +2 -2
  2. blaxel/core/client/api/compute/create_sandbox.py +21 -1
  3. blaxel/core/client/api/jobs/create_job_execution.py +12 -12
  4. blaxel/core/client/api/volumes/update_volume.py +187 -0
  5. blaxel/core/client/models/__init__.py +10 -6
  6. blaxel/core/client/models/{create_job_execution_response.py → create_job_execution_output.py} +11 -13
  7. blaxel/core/client/models/{create_job_execution_response_tasks_item.py → create_job_execution_output_tasks_item.py} +5 -5
  8. blaxel/core/client/models/create_job_execution_request.py +31 -0
  9. blaxel/core/client/models/create_job_execution_request_env.py +50 -0
  10. blaxel/core/client/models/function_runtime.py +18 -0
  11. blaxel/core/client/models/{function_spec_transport.py → function_runtime_transport.py} +2 -2
  12. blaxel/core/client/models/function_spec.py +0 -18
  13. blaxel/core/client/models/job_execution_spec.py +35 -0
  14. blaxel/core/client/models/job_execution_spec_env_override.py +50 -0
  15. blaxel/core/client/models/port_protocol.py +1 -0
  16. blaxel/core/common/settings.py +5 -0
  17. blaxel/core/jobs/__init__.py +60 -88
  18. blaxel/core/sandbox/default/sandbox.py +69 -2
  19. blaxel/core/sandbox/sync/sandbox.py +69 -2
  20. blaxel/core/volume/volume.py +203 -4
  21. blaxel/langgraph/model.py +25 -14
  22. blaxel/langgraph/tools.py +16 -12
  23. blaxel/llamaindex/model.py +33 -24
  24. blaxel/llamaindex/tools.py +9 -4
  25. blaxel/pydantic/model.py +26 -12
  26. {blaxel-0.2.35.dist-info → blaxel-0.2.36.dist-info}/METADATA +1 -1
  27. {blaxel-0.2.35.dist-info → blaxel-0.2.36.dist-info}/RECORD +29 -28
  28. blaxel/core/client/api/invitations/__init__.py +0 -0
  29. blaxel/core/client/api/invitations/list_all_pending_invitations.py +0 -142
  30. {blaxel-0.2.35.dist-info → blaxel-0.2.36.dist-info}/WHEEL +0 -0
  31. {blaxel-0.2.35.dist-info → blaxel-0.2.36.dist-info}/licenses/LICENSE +0 -0
@@ -8,10 +8,11 @@ from ...client.api.compute.get_sandbox import sync as get_sandbox
8
8
  from ...client.api.compute.list_sandboxes import sync as list_sandboxes
9
9
  from ...client.api.compute.update_sandbox import sync as update_sandbox
10
10
  from ...client.client import client
11
- from ...client.models import Metadata, Sandbox, SandboxRuntime, SandboxSpec
11
+ from ...client.models import Metadata, Sandbox, SandboxLifecycle, SandboxRuntime, SandboxSpec
12
12
  from ...client.models.error import Error
13
13
  from ...client.models.sandbox_error import SandboxError
14
14
  from ...client.types import UNSET
15
+ from ...common.settings import settings
15
16
  from ..default.sandbox import SandboxAPIError
16
17
  from ..types import (
17
18
  SandboxConfiguration,
@@ -144,7 +145,7 @@ class SyncSandboxInstance:
144
145
  volumes = config._normalize_volumes() or UNSET
145
146
  ttl = config.ttl
146
147
  expires = config.expires
147
- region = config.region
148
+ region = config.region or settings.region
148
149
  lifecycle = config.lifecycle
149
150
  sandbox = Sandbox(
150
151
  metadata=Metadata(name=name, labels=config.labels),
@@ -250,6 +251,72 @@ class SyncSandboxInstance:
250
251
  )
251
252
  return cls(response)
252
253
 
254
+ @classmethod
255
+ def update_ttl(cls, sandbox_name: str, ttl: str) -> "SyncSandboxInstance":
256
+ """Update sandbox TTL without recreating it.
257
+
258
+ Args:
259
+ sandbox_name: The name of the sandbox to update
260
+ ttl: The new TTL value (e.g., "5m", "1h", "30s")
261
+
262
+ Returns:
263
+ A new SyncSandboxInstance with updated TTL
264
+ """
265
+ # Get the existing sandbox
266
+ sandbox_instance = cls.get(sandbox_name)
267
+ sandbox = sandbox_instance.sandbox
268
+
269
+ # Prepare the updated sandbox object
270
+ updated_sandbox = Sandbox.from_dict(sandbox.to_dict())
271
+ if updated_sandbox.spec is None or updated_sandbox.spec.runtime is None:
272
+ raise ValueError(f"Sandbox {sandbox_name} has invalid spec")
273
+
274
+ # Update TTL
275
+ updated_sandbox.spec.runtime.ttl = ttl
276
+
277
+ # Call the update API
278
+ response = update_sandbox(
279
+ sandbox_name=sandbox_name,
280
+ client=client,
281
+ body=updated_sandbox,
282
+ )
283
+
284
+ return cls(response)
285
+
286
+ @classmethod
287
+ def update_lifecycle(
288
+ cls, sandbox_name: str, lifecycle: SandboxLifecycle
289
+ ) -> "SyncSandboxInstance":
290
+ """Update sandbox lifecycle configuration without recreating it.
291
+
292
+ Args:
293
+ sandbox_name: The name of the sandbox to update
294
+ lifecycle: The new lifecycle configuration
295
+
296
+ Returns:
297
+ A new SyncSandboxInstance with updated lifecycle
298
+ """
299
+ # Get the existing sandbox
300
+ sandbox_instance = cls.get(sandbox_name)
301
+ sandbox = sandbox_instance.sandbox
302
+
303
+ # Prepare the updated sandbox object
304
+ updated_sandbox = Sandbox.from_dict(sandbox.to_dict())
305
+ if updated_sandbox.spec is None:
306
+ raise ValueError(f"Sandbox {sandbox_name} has invalid spec")
307
+
308
+ # Update lifecycle
309
+ updated_sandbox.spec.lifecycle = lifecycle
310
+
311
+ # Call the update API
312
+ response = update_sandbox(
313
+ sandbox_name=sandbox_name,
314
+ client=client,
315
+ body=updated_sandbox,
316
+ )
317
+
318
+ return cls(response)
319
+
253
320
  @classmethod
254
321
  def create_if_not_exists(
255
322
  cls, sandbox: Union[Sandbox, SandboxCreateConfiguration, Dict[str, Any]]
@@ -9,10 +9,13 @@ from ..client.api.volumes.get_volume import asyncio as get_volume
9
9
  from ..client.api.volumes.get_volume import sync as get_volume_sync
10
10
  from ..client.api.volumes.list_volumes import asyncio as list_volumes
11
11
  from ..client.api.volumes.list_volumes import sync as list_volumes_sync
12
+ from ..client.api.volumes.update_volume import asyncio as update_volume
13
+ from ..client.api.volumes.update_volume import sync as update_volume_sync
12
14
  from ..client.client import client
13
15
  from ..client.models import Metadata, Volume, VolumeSpec
14
16
  from ..client.models.error import Error
15
17
  from ..client.types import UNSET
18
+ from ..common.settings import settings
16
19
 
17
20
 
18
21
  class VolumeAPIError(Exception):
@@ -60,6 +63,46 @@ class _SyncDeleteDescriptor:
60
63
  return instance_delete
61
64
 
62
65
 
66
+ class _AsyncUpdateDescriptor:
67
+ """Descriptor that provides both class-level and instance-level update functionality."""
68
+
69
+ def __init__(self, update_func: Callable):
70
+ self._update_func = update_func
71
+
72
+ def __get__(self, instance, owner):
73
+ if instance is None:
74
+ # Called on the class: VolumeInstance.update("name", updates)
75
+ return self._update_func
76
+ else:
77
+ # Called on an instance: instance.update(updates)
78
+ async def instance_update(
79
+ updates: Union["VolumeCreateConfiguration", Volume, Dict[str, any]],
80
+ ) -> "VolumeInstance":
81
+ return await self._update_func(instance.metadata.name or "", updates)
82
+
83
+ return instance_update
84
+
85
+
86
+ class _SyncUpdateDescriptor:
87
+ """Descriptor that provides both class-level and instance-level update functionality (sync)."""
88
+
89
+ def __init__(self, update_func: Callable):
90
+ self._update_func = update_func
91
+
92
+ def __get__(self, instance, owner):
93
+ if instance is None:
94
+ # Called on the class: SyncVolumeInstance.update("name", updates)
95
+ return self._update_func
96
+ else:
97
+ # Called on an instance: instance.update(updates)
98
+ def instance_update(
99
+ updates: Union["VolumeCreateConfiguration", Volume, Dict[str, any]],
100
+ ) -> "SyncVolumeInstance":
101
+ return self._update_func(instance.metadata.name or "", updates)
102
+
103
+ return instance_update
104
+
105
+
63
106
  class VolumeCreateConfiguration:
64
107
  """Simplified configuration for creating volumes with default values."""
65
108
 
@@ -147,7 +190,7 @@ class VolumeInstance:
147
190
  ),
148
191
  spec=VolumeSpec(
149
192
  size=config.size or default_size,
150
- region=config.region or UNSET,
193
+ region=config.region or settings.region or UNSET,
151
194
  template=config.template or UNSET,
152
195
  ),
153
196
  )
@@ -161,7 +204,7 @@ class VolumeInstance:
161
204
  ),
162
205
  spec=VolumeSpec(
163
206
  size=volume_config.size or default_size,
164
- region=volume_config.region or UNSET,
207
+ region=volume_config.region or settings.region or UNSET,
165
208
  template=volume_config.template or UNSET,
166
209
  ),
167
210
  )
@@ -288,7 +331,7 @@ class SyncVolumeInstance:
288
331
  ),
289
332
  spec=VolumeSpec(
290
333
  size=config.size or default_size,
291
- region=config.region or UNSET,
334
+ region=config.region or settings.region or UNSET,
292
335
  template=config.template or UNSET,
293
336
  ),
294
337
  )
@@ -302,7 +345,7 @@ class SyncVolumeInstance:
302
345
  ),
303
346
  spec=VolumeSpec(
304
347
  size=volume_config.size or default_size,
305
- region=volume_config.region or UNSET,
348
+ region=volume_config.region or settings.region or UNSET,
306
349
  template=volume_config.template or UNSET,
307
350
  ),
308
351
  )
@@ -384,6 +427,162 @@ def _delete_volume_by_name_sync(volume_name: str) -> Volume:
384
427
  return response
385
428
 
386
429
 
430
+ async def _update_volume_by_name(
431
+ volume_name: str, updates: Union[VolumeCreateConfiguration, Volume, Dict[str, any]]
432
+ ) -> "VolumeInstance":
433
+ """Update a volume by name (async)."""
434
+ # Get the current volume
435
+ volume_instance = await VolumeInstance.get(volume_name)
436
+ current_volume = volume_instance.volume
437
+
438
+ # Build the update body
439
+ if isinstance(updates, Volume):
440
+ new_metadata = updates.metadata
441
+ new_spec = updates.spec
442
+ elif isinstance(updates, VolumeCreateConfiguration):
443
+ new_metadata = Metadata(
444
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
445
+ display_name=updates.display_name,
446
+ labels=updates.labels,
447
+ )
448
+ new_spec = VolumeSpec(
449
+ size=updates.size,
450
+ region=updates.region,
451
+ template=updates.template,
452
+ )
453
+ elif isinstance(updates, dict):
454
+ config = VolumeCreateConfiguration.from_dict(updates)
455
+ new_metadata = Metadata(
456
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
457
+ display_name=config.display_name,
458
+ labels=config.labels,
459
+ )
460
+ new_spec = VolumeSpec(
461
+ size=config.size,
462
+ region=config.region,
463
+ template=config.template,
464
+ )
465
+ else:
466
+ raise ValueError(
467
+ f"Invalid updates type: {type(updates)}. Expected VolumeCreateConfiguration, Volume, or dict."
468
+ )
469
+
470
+ # Merge current values with updates
471
+ merged_metadata = Metadata(
472
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
473
+ display_name=new_metadata.display_name
474
+ if new_metadata and new_metadata.display_name
475
+ else (current_volume.metadata.display_name if current_volume.metadata else None),
476
+ labels=new_metadata.labels
477
+ if new_metadata and new_metadata.labels
478
+ else (current_volume.metadata.labels if current_volume.metadata else None),
479
+ )
480
+
481
+ merged_spec = VolumeSpec(
482
+ size=new_spec.size
483
+ if new_spec and new_spec.size
484
+ else (current_volume.spec.size if current_volume.spec else None),
485
+ region=new_spec.region
486
+ if new_spec and new_spec.region
487
+ else (current_volume.spec.region if current_volume.spec else None),
488
+ template=new_spec.template
489
+ if new_spec and new_spec.template
490
+ else (current_volume.spec.template if current_volume.spec else None),
491
+ )
492
+
493
+ body = Volume(
494
+ metadata=merged_metadata,
495
+ spec=merged_spec,
496
+ )
497
+
498
+ response = await update_volume(volume_name=volume_name, client=client, body=body)
499
+ if isinstance(response, Error):
500
+ status_code = int(response.code) if response.code is not UNSET else None
501
+ message = response.message if response.message is not UNSET else response.error
502
+ raise VolumeAPIError(message, status_code=status_code, code=response.error)
503
+ return VolumeInstance(response)
504
+
505
+
506
+ def _update_volume_by_name_sync(
507
+ volume_name: str, updates: Union[VolumeCreateConfiguration, Volume, Dict[str, any]]
508
+ ) -> "SyncVolumeInstance":
509
+ """Update a volume by name (sync)."""
510
+ # Get the current volume
511
+ volume_instance = SyncVolumeInstance.get(volume_name)
512
+ current_volume = volume_instance.volume
513
+
514
+ # Build the update body
515
+ if isinstance(updates, Volume):
516
+ new_metadata = updates.metadata
517
+ new_spec = updates.spec
518
+ elif isinstance(updates, VolumeCreateConfiguration):
519
+ new_metadata = Metadata(
520
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
521
+ display_name=updates.display_name,
522
+ labels=updates.labels,
523
+ )
524
+ new_spec = VolumeSpec(
525
+ size=updates.size,
526
+ region=updates.region,
527
+ template=updates.template,
528
+ )
529
+ elif isinstance(updates, dict):
530
+ config = VolumeCreateConfiguration.from_dict(updates)
531
+ new_metadata = Metadata(
532
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
533
+ display_name=config.display_name,
534
+ labels=config.labels,
535
+ )
536
+ new_spec = VolumeSpec(
537
+ size=config.size,
538
+ region=config.region,
539
+ template=config.template,
540
+ )
541
+ else:
542
+ raise ValueError(
543
+ f"Invalid updates type: {type(updates)}. Expected VolumeCreateConfiguration, Volume, or dict."
544
+ )
545
+
546
+ # Merge current values with updates
547
+ merged_metadata = Metadata(
548
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
549
+ display_name=new_metadata.display_name
550
+ if new_metadata and new_metadata.display_name
551
+ else (current_volume.metadata.display_name if current_volume.metadata else None),
552
+ labels=new_metadata.labels
553
+ if new_metadata and new_metadata.labels
554
+ else (current_volume.metadata.labels if current_volume.metadata else None),
555
+ )
556
+
557
+ merged_spec = VolumeSpec(
558
+ size=new_spec.size
559
+ if new_spec and new_spec.size
560
+ else (current_volume.spec.size if current_volume.spec else None),
561
+ region=new_spec.region
562
+ if new_spec and new_spec.region
563
+ else (current_volume.spec.region if current_volume.spec else None),
564
+ template=new_spec.template
565
+ if new_spec and new_spec.template
566
+ else (current_volume.spec.template if current_volume.spec else None),
567
+ )
568
+
569
+ body = Volume(
570
+ metadata=merged_metadata,
571
+ spec=merged_spec,
572
+ )
573
+
574
+ response = update_volume_sync(volume_name=volume_name, client=client, body=body)
575
+ if isinstance(response, Error):
576
+ status_code = int(response.code) if response.code is not UNSET else None
577
+ message = response.message if response.message is not UNSET else response.error
578
+ raise VolumeAPIError(message, status_code=status_code, code=response.error)
579
+ return SyncVolumeInstance(response)
580
+
581
+
387
582
  # Assign the delete descriptors to support both class-level and instance-level calls
388
583
  VolumeInstance.delete = _AsyncDeleteDescriptor(_delete_volume_by_name)
389
584
  SyncVolumeInstance.delete = _SyncDeleteDescriptor(_delete_volume_by_name_sync)
585
+
586
+ # Assign the update descriptors to support both class-level and instance-level calls
587
+ VolumeInstance.update = _AsyncUpdateDescriptor(_update_volume_by_name)
588
+ SyncVolumeInstance.update = _SyncUpdateDescriptor(_update_volume_by_name_sync)
blaxel/langgraph/model.py CHANGED
@@ -1,22 +1,17 @@
1
+ from __future__ import annotations
2
+
1
3
  from logging import getLogger
2
- from typing import Any, AsyncIterator, Iterator, List
3
-
4
- from langchain_anthropic import ChatAnthropic
5
- from langchain_cerebras import ChatCerebras
6
- from langchain_cohere import ChatCohere
7
- from langchain_core.callbacks import Callbacks
8
- from langchain_core.language_models import LanguageModelInput
9
- from langchain_core.messages import BaseMessage
10
- from langchain_core.outputs import LLMResult
11
- from langchain_core.runnables import RunnableConfig
12
- from langchain_deepseek import ChatDeepSeek
13
- from langchain_openai import ChatOpenAI
14
- from langchain_xai import ChatXAI
4
+ from typing import TYPE_CHECKING, Any, AsyncIterator, Iterator, List
15
5
 
16
6
  from blaxel.core import bl_model as bl_model_core
17
7
  from blaxel.core import settings
18
8
 
19
- from .custom.gemini import ChatGoogleGenerativeAI
9
+ if TYPE_CHECKING:
10
+ from langchain_core.callbacks import Callbacks
11
+ from langchain_core.language_models import LanguageModelInput
12
+ from langchain_core.messages import BaseMessage
13
+ from langchain_core.outputs import LLMResult
14
+ from langchain_core.runnables import RunnableConfig
20
15
 
21
16
  logger = getLogger(__name__)
22
17
 
@@ -37,6 +32,8 @@ class TokenRefreshingWrapper:
37
32
  kwargs = config.get("kwargs", {})
38
33
 
39
34
  if model_type == "mistral":
35
+ from langchain_openai import ChatOpenAI
36
+
40
37
  return ChatOpenAI(
41
38
  api_key=settings.auth.token,
42
39
  model=model,
@@ -44,6 +41,8 @@ class TokenRefreshingWrapper:
44
41
  **kwargs,
45
42
  )
46
43
  elif model_type == "cohere":
44
+ from langchain_cohere import ChatCohere
45
+
47
46
  return ChatCohere(
48
47
  cohere_api_key=settings.auth.token,
49
48
  model=model,
@@ -51,6 +50,8 @@ class TokenRefreshingWrapper:
51
50
  **kwargs,
52
51
  )
53
52
  elif model_type == "xai":
53
+ from langchain_xai import ChatXAI
54
+
54
55
  return ChatXAI(
55
56
  model=model,
56
57
  api_key=settings.auth.token,
@@ -58,6 +59,8 @@ class TokenRefreshingWrapper:
58
59
  **kwargs,
59
60
  )
60
61
  elif model_type == "deepseek":
62
+ from langchain_deepseek import ChatDeepSeek
63
+
61
64
  return ChatDeepSeek(
62
65
  api_key=settings.auth.token,
63
66
  model=model,
@@ -65,6 +68,8 @@ class TokenRefreshingWrapper:
65
68
  **kwargs,
66
69
  )
67
70
  elif model_type == "anthropic":
71
+ from langchain_anthropic import ChatAnthropic
72
+
68
73
  return ChatAnthropic(
69
74
  api_key=settings.auth.token,
70
75
  anthropic_api_url=url,
@@ -73,6 +78,8 @@ class TokenRefreshingWrapper:
73
78
  **kwargs,
74
79
  )
75
80
  elif model_type == "gemini":
81
+ from .custom.gemini import ChatGoogleGenerativeAI
82
+
76
83
  return ChatGoogleGenerativeAI(
77
84
  model=model,
78
85
  client_options={"api_endpoint": url},
@@ -81,6 +88,8 @@ class TokenRefreshingWrapper:
81
88
  **kwargs,
82
89
  )
83
90
  elif model_type == "cerebras":
91
+ from langchain_cerebras import ChatCerebras
92
+
84
93
  return ChatCerebras(
85
94
  api_key=settings.auth.token,
86
95
  model=model,
@@ -88,6 +97,8 @@ class TokenRefreshingWrapper:
88
97
  **kwargs,
89
98
  )
90
99
  else:
100
+ from langchain_openai import ChatOpenAI
101
+
91
102
  if model_type != "openai":
92
103
  logger.warning(f"Model {model} is not supported by Langchain, defaulting to OpenAI")
93
104
  return ChatOpenAI(
blaxel/langgraph/tools.py CHANGED
@@ -1,17 +1,11 @@
1
- from typing import Any, Dict
2
-
3
- from langchain_core.tools import StructuredTool
4
- from mcp.types import (
5
- CallToolResult,
6
- EmbeddedResource,
7
- ImageContent,
8
- TextContent,
9
- )
1
+ from typing import TYPE_CHECKING, Any, Dict
10
2
 
11
3
  from blaxel.core.tools import bl_tools as bl_tools_core
12
4
  from blaxel.core.tools.types import Tool, ToolException
13
5
 
14
- NonTextContent = ImageContent | EmbeddedResource
6
+ if TYPE_CHECKING:
7
+ from langchain_core.tools import StructuredTool
8
+ from mcp.types import EmbeddedResource, ImageContent
15
9
 
16
10
 
17
11
  def _clean_schema_for_openai(schema: Dict[str, Any]) -> Dict[str, Any]:
@@ -43,7 +37,17 @@ def _clean_schema_for_openai(schema: Dict[str, Any]) -> Dict[str, Any]:
43
37
  return cleaned
44
38
 
45
39
 
46
- def get_langchain_tool(tool: Tool) -> StructuredTool:
40
+ def get_langchain_tool(tool: Tool) -> "StructuredTool":
41
+ from langchain_core.tools import StructuredTool
42
+ from mcp.types import (
43
+ CallToolResult,
44
+ EmbeddedResource,
45
+ ImageContent,
46
+ TextContent,
47
+ )
48
+
49
+ NonTextContent = ImageContent | EmbeddedResource
50
+
47
51
  async def langchain_coroutine(
48
52
  **arguments: dict[str, Any],
49
53
  ) -> tuple[str | list[str], list[NonTextContent] | None]:
@@ -77,7 +81,7 @@ def get_langchain_tool(tool: Tool) -> StructuredTool:
77
81
  )
78
82
 
79
83
 
80
- async def bl_tools(tools_names: list[str], **kwargs) -> list[StructuredTool]:
84
+ async def bl_tools(tools_names: list[str], **kwargs) -> list["StructuredTool"]:
81
85
  tools = bl_tools_core(tools_names, **kwargs)
82
86
  await tools.initialize()
83
87
  return [get_langchain_tool(tool) for tool in tools.get_tools()]
@@ -1,33 +1,25 @@
1
- import os
2
-
3
- # Transformers is a dependency of DeepSeek, and it logs a lot of warnings that are not useful
4
- os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
1
+ from __future__ import annotations
5
2
 
3
+ import os
6
4
  from logging import getLogger
7
- from typing import Any, Sequence
8
-
9
- from google.genai.types import HttpOptions
10
- from llama_index.core.base.llms.types import (
11
- ChatMessage,
12
- ChatResponse,
13
- ChatResponseAsyncGen,
14
- ChatResponseGen,
15
- CompletionResponse,
16
- CompletionResponseAsyncGen,
17
- CompletionResponseGen,
18
- )
19
- from llama_index.llms.anthropic import Anthropic
20
- from llama_index.llms.cerebras import Cerebras
21
- from llama_index.llms.deepseek import DeepSeek
22
- from llama_index.llms.google_genai import GoogleGenAI
23
- from llama_index.llms.groq import Groq
24
- from llama_index.llms.mistralai import MistralAI
25
- from llama_index.llms.openai import OpenAI
5
+ from typing import TYPE_CHECKING, Any, Sequence
26
6
 
27
7
  from blaxel.core import bl_model as bl_model_core
28
8
  from blaxel.core import settings
29
9
 
30
- from .custom.cohere import Cohere
10
+ # Transformers is a dependency of DeepSeek, and it logs a lot of warnings that are not useful
11
+ os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
12
+
13
+ if TYPE_CHECKING:
14
+ from llama_index.core.base.llms.types import (
15
+ ChatMessage,
16
+ ChatResponse,
17
+ ChatResponseAsyncGen,
18
+ ChatResponseGen,
19
+ CompletionResponse,
20
+ CompletionResponseAsyncGen,
21
+ CompletionResponseGen,
22
+ )
31
23
 
32
24
  logger = getLogger(__name__)
33
25
 
@@ -48,6 +40,8 @@ class TokenRefreshingWrapper:
48
40
  kwargs = config.get("kwargs", {})
49
41
 
50
42
  if model_type == "anthropic":
43
+ from llama_index.llms.anthropic import Anthropic
44
+
51
45
  return Anthropic(
52
46
  model=model,
53
47
  api_key=settings.auth.token,
@@ -56,6 +50,8 @@ class TokenRefreshingWrapper:
56
50
  **kwargs,
57
51
  )
58
52
  elif model_type == "xai":
53
+ from llama_index.llms.groq import Groq
54
+
59
55
  return Groq(
60
56
  model=model,
61
57
  api_key=settings.auth.token,
@@ -63,6 +59,9 @@ class TokenRefreshingWrapper:
63
59
  **kwargs,
64
60
  )
65
61
  elif model_type == "gemini":
62
+ from google.genai.types import HttpOptions
63
+ from llama_index.llms.google_genai import GoogleGenAI
64
+
66
65
  return GoogleGenAI(
67
66
  api_key=settings.auth.token,
68
67
  model=model,
@@ -74,8 +73,12 @@ class TokenRefreshingWrapper:
74
73
  **kwargs,
75
74
  )
76
75
  elif model_type == "cohere":
76
+ from .custom.cohere import Cohere
77
+
77
78
  return Cohere(model=model, api_key=settings.auth.token, api_base=url, **kwargs)
78
79
  elif model_type == "deepseek":
80
+ from llama_index.llms.deepseek import DeepSeek
81
+
79
82
  return DeepSeek(
80
83
  model=model,
81
84
  api_key=settings.auth.token,
@@ -83,8 +86,12 @@ class TokenRefreshingWrapper:
83
86
  **kwargs,
84
87
  )
85
88
  elif model_type == "mistral":
89
+ from llama_index.llms.mistralai import MistralAI
90
+
86
91
  return MistralAI(model=model, api_key=settings.auth.token, endpoint=url, **kwargs)
87
92
  elif model_type == "cerebras":
93
+ from llama_index.llms.cerebras import Cerebras
94
+
88
95
  return Cerebras(
89
96
  model=model,
90
97
  api_key=settings.auth.token,
@@ -92,6 +99,8 @@ class TokenRefreshingWrapper:
92
99
  **kwargs,
93
100
  )
94
101
  else:
102
+ from llama_index.llms.openai import OpenAI
103
+
95
104
  if model_type != "openai":
96
105
  logger.warning(
97
106
  f"Model {model} is not supported by LlamaIndex, defaulting to OpenAI"
@@ -1,12 +1,17 @@
1
- from llama_index.core.tools import FunctionTool
2
- from llama_index.core.tools.types import ToolMetadata
1
+ from typing import TYPE_CHECKING
3
2
 
4
3
  from blaxel.core.tools import bl_tools as bl_tools_core
5
4
  from blaxel.core.tools.common import create_model_from_json_schema
6
5
  from blaxel.core.tools.types import Tool
7
6
 
7
+ if TYPE_CHECKING:
8
+ from llama_index.core.tools import FunctionTool
9
+
10
+
11
+ def get_llamaindex_tool(tool: Tool) -> "FunctionTool":
12
+ from llama_index.core.tools import FunctionTool
13
+ from llama_index.core.tools.types import ToolMetadata
8
14
 
9
- def get_llamaindex_tool(tool: Tool) -> FunctionTool:
10
15
  model_schema = create_model_from_json_schema(
11
16
  tool.input_schema, model_name=f"{tool.name}_Schema"
12
17
  )
@@ -21,7 +26,7 @@ def get_llamaindex_tool(tool: Tool) -> FunctionTool:
21
26
  )
22
27
 
23
28
 
24
- async def bl_tools(tools_names: list[str], **kwargs) -> list[FunctionTool]:
29
+ async def bl_tools(tools_names: list[str], **kwargs) -> list["FunctionTool"]:
25
30
  tools = bl_tools_core(tools_names, **kwargs)
26
31
  await tools.initialize()
27
32
  return [get_llamaindex_tool(tool) for tool in tools.get_tools()]