blaxel 0.2.35__py3-none-any.whl → 0.2.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. blaxel/__init__.py +2 -2
  2. blaxel/core/client/api/compute/create_sandbox.py +21 -1
  3. blaxel/core/client/api/jobs/create_job_execution.py +12 -12
  4. blaxel/core/client/api/volumes/update_volume.py +187 -0
  5. blaxel/core/client/models/__init__.py +10 -6
  6. blaxel/core/client/models/{create_job_execution_response.py → create_job_execution_output.py} +11 -13
  7. blaxel/core/client/models/{create_job_execution_response_tasks_item.py → create_job_execution_output_tasks_item.py} +5 -5
  8. blaxel/core/client/models/create_job_execution_request.py +31 -0
  9. blaxel/core/client/models/create_job_execution_request_env.py +50 -0
  10. blaxel/core/client/models/function_runtime.py +18 -0
  11. blaxel/core/client/models/{function_spec_transport.py → function_runtime_transport.py} +2 -2
  12. blaxel/core/client/models/function_spec.py +0 -18
  13. blaxel/core/client/models/job_execution_spec.py +35 -0
  14. blaxel/core/client/models/job_execution_spec_env_override.py +50 -0
  15. blaxel/core/client/models/port_protocol.py +1 -0
  16. blaxel/core/client/models/preview.py +48 -1
  17. blaxel/core/client/models/sandbox.py +10 -0
  18. blaxel/core/common/settings.py +5 -0
  19. blaxel/core/jobs/__init__.py +60 -88
  20. blaxel/core/sandbox/__init__.py +12 -0
  21. blaxel/core/{client/api/invitations/list_all_pending_invitations.py → sandbox/client/api/system/get_health.py} +26 -34
  22. blaxel/core/sandbox/client/api/system/post_upgrade.py +196 -0
  23. blaxel/core/sandbox/client/models/__init__.py +8 -0
  24. blaxel/core/sandbox/client/models/content_search_match.py +24 -25
  25. blaxel/core/sandbox/client/models/content_search_response.py +25 -29
  26. blaxel/core/sandbox/client/models/find_match.py +13 -14
  27. blaxel/core/sandbox/client/models/find_response.py +21 -24
  28. blaxel/core/sandbox/client/models/fuzzy_search_match.py +17 -19
  29. blaxel/core/sandbox/client/models/fuzzy_search_response.py +21 -24
  30. blaxel/core/sandbox/client/models/health_response.py +159 -0
  31. blaxel/core/sandbox/client/models/process_upgrade_state.py +20 -0
  32. blaxel/core/sandbox/client/models/upgrade_request.py +71 -0
  33. blaxel/core/sandbox/client/models/upgrade_status.py +125 -0
  34. blaxel/core/sandbox/default/__init__.py +2 -0
  35. blaxel/core/sandbox/default/filesystem.py +20 -6
  36. blaxel/core/sandbox/default/preview.py +48 -1
  37. blaxel/core/sandbox/default/process.py +66 -21
  38. blaxel/core/sandbox/default/sandbox.py +104 -6
  39. blaxel/core/sandbox/default/system.py +71 -0
  40. blaxel/core/sandbox/sync/__init__.py +2 -0
  41. blaxel/core/sandbox/sync/filesystem.py +19 -2
  42. blaxel/core/sandbox/sync/preview.py +50 -3
  43. blaxel/core/sandbox/sync/process.py +38 -15
  44. blaxel/core/sandbox/sync/sandbox.py +97 -5
  45. blaxel/core/sandbox/sync/system.py +71 -0
  46. blaxel/core/sandbox/types.py +212 -5
  47. blaxel/core/volume/volume.py +209 -4
  48. blaxel/langgraph/model.py +25 -14
  49. blaxel/langgraph/tools.py +15 -12
  50. blaxel/llamaindex/model.py +33 -24
  51. blaxel/llamaindex/tools.py +9 -4
  52. blaxel/pydantic/model.py +26 -12
  53. blaxel-0.2.37.dist-info/METADATA +569 -0
  54. {blaxel-0.2.35.dist-info → blaxel-0.2.37.dist-info}/RECORD +57 -47
  55. blaxel-0.2.35.dist-info/METADATA +0 -228
  56. /blaxel/core/{client/api/invitations → sandbox/client/api/system}/__init__.py +0 -0
  57. {blaxel-0.2.35.dist-info → blaxel-0.2.37.dist-info}/WHEEL +0 -0
  58. {blaxel-0.2.35.dist-info → blaxel-0.2.37.dist-info}/licenses/LICENSE +0 -0
@@ -1,3 +1,5 @@
1
+ import asyncio
2
+ import time
1
3
  import uuid
2
4
  from typing import Callable, Dict, List, Union
3
5
 
@@ -9,10 +11,13 @@ from ..client.api.volumes.get_volume import asyncio as get_volume
9
11
  from ..client.api.volumes.get_volume import sync as get_volume_sync
10
12
  from ..client.api.volumes.list_volumes import asyncio as list_volumes
11
13
  from ..client.api.volumes.list_volumes import sync as list_volumes_sync
14
+ from ..client.api.volumes.update_volume import asyncio as update_volume
15
+ from ..client.api.volumes.update_volume import sync as update_volume_sync
12
16
  from ..client.client import client
13
17
  from ..client.models import Metadata, Volume, VolumeSpec
14
18
  from ..client.models.error import Error
15
19
  from ..client.types import UNSET
20
+ from ..common.settings import settings
16
21
 
17
22
 
18
23
  class VolumeAPIError(Exception):
@@ -60,6 +65,46 @@ class _SyncDeleteDescriptor:
60
65
  return instance_delete
61
66
 
62
67
 
68
+ class _AsyncUpdateDescriptor:
69
+ """Descriptor that provides both class-level and instance-level update functionality."""
70
+
71
+ def __init__(self, update_func: Callable):
72
+ self._update_func = update_func
73
+
74
+ def __get__(self, instance, owner):
75
+ if instance is None:
76
+ # Called on the class: VolumeInstance.update("name", updates)
77
+ return self._update_func
78
+ else:
79
+ # Called on an instance: instance.update(updates)
80
+ async def instance_update(
81
+ updates: Union["VolumeCreateConfiguration", Volume, Dict[str, any]],
82
+ ) -> "VolumeInstance":
83
+ return await self._update_func(instance.metadata.name or "", updates)
84
+
85
+ return instance_update
86
+
87
+
88
+ class _SyncUpdateDescriptor:
89
+ """Descriptor that provides both class-level and instance-level update functionality (sync)."""
90
+
91
+ def __init__(self, update_func: Callable):
92
+ self._update_func = update_func
93
+
94
+ def __get__(self, instance, owner):
95
+ if instance is None:
96
+ # Called on the class: SyncVolumeInstance.update("name", updates)
97
+ return self._update_func
98
+ else:
99
+ # Called on an instance: instance.update(updates)
100
+ def instance_update(
101
+ updates: Union["VolumeCreateConfiguration", Volume, Dict[str, any]],
102
+ ) -> "SyncVolumeInstance":
103
+ return self._update_func(instance.metadata.name or "", updates)
104
+
105
+ return instance_update
106
+
107
+
63
108
  class VolumeCreateConfiguration:
64
109
  """Simplified configuration for creating volumes with default values."""
65
110
 
@@ -147,7 +192,7 @@ class VolumeInstance:
147
192
  ),
148
193
  spec=VolumeSpec(
149
194
  size=config.size or default_size,
150
- region=config.region or UNSET,
195
+ region=config.region or settings.region or UNSET,
151
196
  template=config.template or UNSET,
152
197
  ),
153
198
  )
@@ -161,7 +206,7 @@ class VolumeInstance:
161
206
  ),
162
207
  spec=VolumeSpec(
163
208
  size=volume_config.size or default_size,
164
- region=volume_config.region or UNSET,
209
+ region=volume_config.region or settings.region or UNSET,
165
210
  template=volume_config.template or UNSET,
166
211
  ),
167
212
  )
@@ -288,7 +333,7 @@ class SyncVolumeInstance:
288
333
  ),
289
334
  spec=VolumeSpec(
290
335
  size=config.size or default_size,
291
- region=config.region or UNSET,
336
+ region=config.region or settings.region or UNSET,
292
337
  template=config.template or UNSET,
293
338
  ),
294
339
  )
@@ -302,7 +347,7 @@ class SyncVolumeInstance:
302
347
  ),
303
348
  spec=VolumeSpec(
304
349
  size=volume_config.size or default_size,
305
- region=volume_config.region or UNSET,
350
+ region=volume_config.region or settings.region or UNSET,
306
351
  template=volume_config.template or UNSET,
307
352
  ),
308
353
  )
@@ -384,6 +429,166 @@ def _delete_volume_by_name_sync(volume_name: str) -> Volume:
384
429
  return response
385
430
 
386
431
 
432
+ async def _update_volume_by_name(
433
+ volume_name: str, updates: Union[VolumeCreateConfiguration, Volume, Dict[str, any]]
434
+ ) -> "VolumeInstance":
435
+ """Update a volume by name (async)."""
436
+ # Get the current volume
437
+ volume_instance = await VolumeInstance.get(volume_name)
438
+ current_volume = volume_instance.volume
439
+
440
+ # Build the update body
441
+ if isinstance(updates, Volume):
442
+ new_metadata = updates.metadata
443
+ new_spec = updates.spec
444
+ elif isinstance(updates, VolumeCreateConfiguration):
445
+ new_metadata = Metadata(
446
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
447
+ display_name=updates.display_name,
448
+ labels=updates.labels,
449
+ )
450
+ new_spec = VolumeSpec(
451
+ size=updates.size,
452
+ region=updates.region,
453
+ template=updates.template,
454
+ )
455
+ elif isinstance(updates, dict):
456
+ config = VolumeCreateConfiguration.from_dict(updates)
457
+ new_metadata = Metadata(
458
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
459
+ display_name=config.display_name,
460
+ labels=config.labels,
461
+ )
462
+ new_spec = VolumeSpec(
463
+ size=config.size,
464
+ region=config.region,
465
+ template=config.template,
466
+ )
467
+ else:
468
+ raise ValueError(
469
+ f"Invalid updates type: {type(updates)}. Expected VolumeCreateConfiguration, Volume, or dict."
470
+ )
471
+
472
+ # Merge current values with updates
473
+ merged_metadata = Metadata(
474
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
475
+ display_name=new_metadata.display_name
476
+ if new_metadata and new_metadata.display_name
477
+ else (current_volume.metadata.display_name if current_volume.metadata else None),
478
+ labels=new_metadata.labels
479
+ if new_metadata and new_metadata.labels
480
+ else (current_volume.metadata.labels if current_volume.metadata else None),
481
+ )
482
+
483
+ merged_spec = VolumeSpec(
484
+ size=new_spec.size
485
+ if new_spec and new_spec.size
486
+ else (current_volume.spec.size if current_volume.spec else None),
487
+ region=new_spec.region
488
+ if new_spec and new_spec.region
489
+ else (current_volume.spec.region if current_volume.spec else None),
490
+ template=new_spec.template
491
+ if new_spec and new_spec.template
492
+ else (current_volume.spec.template if current_volume.spec else None),
493
+ )
494
+
495
+ body = Volume(
496
+ metadata=merged_metadata,
497
+ spec=merged_spec,
498
+ )
499
+
500
+ response = await update_volume(volume_name=volume_name, client=client, body=body)
501
+ if isinstance(response, Error):
502
+ status_code = int(response.code) if response.code is not UNSET else None
503
+ message = response.message if response.message is not UNSET else response.error
504
+ raise VolumeAPIError(message, status_code=status_code, code=response.error)
505
+ # This is for safe update
506
+ await asyncio.sleep(0.5)
507
+ return VolumeInstance(response)
508
+
509
+
510
+ def _update_volume_by_name_sync(
511
+ volume_name: str, updates: Union[VolumeCreateConfiguration, Volume, Dict[str, any]]
512
+ ) -> "SyncVolumeInstance":
513
+ """Update a volume by name (sync)."""
514
+ # Get the current volume
515
+ volume_instance = SyncVolumeInstance.get(volume_name)
516
+ current_volume = volume_instance.volume
517
+
518
+ # Build the update body
519
+ if isinstance(updates, Volume):
520
+ new_metadata = updates.metadata
521
+ new_spec = updates.spec
522
+ elif isinstance(updates, VolumeCreateConfiguration):
523
+ new_metadata = Metadata(
524
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
525
+ display_name=updates.display_name,
526
+ labels=updates.labels,
527
+ )
528
+ new_spec = VolumeSpec(
529
+ size=updates.size,
530
+ region=updates.region,
531
+ template=updates.template,
532
+ )
533
+ elif isinstance(updates, dict):
534
+ config = VolumeCreateConfiguration.from_dict(updates)
535
+ new_metadata = Metadata(
536
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
537
+ display_name=config.display_name,
538
+ labels=config.labels,
539
+ )
540
+ new_spec = VolumeSpec(
541
+ size=config.size,
542
+ region=config.region,
543
+ template=config.template,
544
+ )
545
+ else:
546
+ raise ValueError(
547
+ f"Invalid updates type: {type(updates)}. Expected VolumeCreateConfiguration, Volume, or dict."
548
+ )
549
+
550
+ # Merge current values with updates
551
+ merged_metadata = Metadata(
552
+ name=current_volume.metadata.name if current_volume.metadata else volume_name,
553
+ display_name=new_metadata.display_name
554
+ if new_metadata and new_metadata.display_name
555
+ else (current_volume.metadata.display_name if current_volume.metadata else None),
556
+ labels=new_metadata.labels
557
+ if new_metadata and new_metadata.labels
558
+ else (current_volume.metadata.labels if current_volume.metadata else None),
559
+ )
560
+
561
+ merged_spec = VolumeSpec(
562
+ size=new_spec.size
563
+ if new_spec and new_spec.size
564
+ else (current_volume.spec.size if current_volume.spec else None),
565
+ region=new_spec.region
566
+ if new_spec and new_spec.region
567
+ else (current_volume.spec.region if current_volume.spec else None),
568
+ template=new_spec.template
569
+ if new_spec and new_spec.template
570
+ else (current_volume.spec.template if current_volume.spec else None),
571
+ )
572
+
573
+ body = Volume(
574
+ metadata=merged_metadata,
575
+ spec=merged_spec,
576
+ )
577
+
578
+ response = update_volume_sync(volume_name=volume_name, client=client, body=body)
579
+ if isinstance(response, Error):
580
+ status_code = int(response.code) if response.code is not UNSET else None
581
+ message = response.message if response.message is not UNSET else response.error
582
+ raise VolumeAPIError(message, status_code=status_code, code=response.error)
583
+ # This is for safe update
584
+ time.sleep(0.5)
585
+ return SyncVolumeInstance(response)
586
+
587
+
387
588
  # Assign the delete descriptors to support both class-level and instance-level calls
388
589
  VolumeInstance.delete = _AsyncDeleteDescriptor(_delete_volume_by_name)
389
590
  SyncVolumeInstance.delete = _SyncDeleteDescriptor(_delete_volume_by_name_sync)
591
+
592
+ # Assign the update descriptors to support both class-level and instance-level calls
593
+ VolumeInstance.update = _AsyncUpdateDescriptor(_update_volume_by_name)
594
+ SyncVolumeInstance.update = _SyncUpdateDescriptor(_update_volume_by_name_sync)
blaxel/langgraph/model.py CHANGED
@@ -1,22 +1,17 @@
1
+ from __future__ import annotations
2
+
1
3
  from logging import getLogger
2
- from typing import Any, AsyncIterator, Iterator, List
3
-
4
- from langchain_anthropic import ChatAnthropic
5
- from langchain_cerebras import ChatCerebras
6
- from langchain_cohere import ChatCohere
7
- from langchain_core.callbacks import Callbacks
8
- from langchain_core.language_models import LanguageModelInput
9
- from langchain_core.messages import BaseMessage
10
- from langchain_core.outputs import LLMResult
11
- from langchain_core.runnables import RunnableConfig
12
- from langchain_deepseek import ChatDeepSeek
13
- from langchain_openai import ChatOpenAI
14
- from langchain_xai import ChatXAI
4
+ from typing import TYPE_CHECKING, Any, AsyncIterator, Iterator, List
15
5
 
16
6
  from blaxel.core import bl_model as bl_model_core
17
7
  from blaxel.core import settings
18
8
 
19
- from .custom.gemini import ChatGoogleGenerativeAI
9
+ if TYPE_CHECKING:
10
+ from langchain_core.callbacks import Callbacks
11
+ from langchain_core.language_models import LanguageModelInput
12
+ from langchain_core.messages import BaseMessage
13
+ from langchain_core.outputs import LLMResult
14
+ from langchain_core.runnables import RunnableConfig
20
15
 
21
16
  logger = getLogger(__name__)
22
17
 
@@ -37,6 +32,8 @@ class TokenRefreshingWrapper:
37
32
  kwargs = config.get("kwargs", {})
38
33
 
39
34
  if model_type == "mistral":
35
+ from langchain_openai import ChatOpenAI
36
+
40
37
  return ChatOpenAI(
41
38
  api_key=settings.auth.token,
42
39
  model=model,
@@ -44,6 +41,8 @@ class TokenRefreshingWrapper:
44
41
  **kwargs,
45
42
  )
46
43
  elif model_type == "cohere":
44
+ from langchain_cohere import ChatCohere
45
+
47
46
  return ChatCohere(
48
47
  cohere_api_key=settings.auth.token,
49
48
  model=model,
@@ -51,6 +50,8 @@ class TokenRefreshingWrapper:
51
50
  **kwargs,
52
51
  )
53
52
  elif model_type == "xai":
53
+ from langchain_xai import ChatXAI
54
+
54
55
  return ChatXAI(
55
56
  model=model,
56
57
  api_key=settings.auth.token,
@@ -58,6 +59,8 @@ class TokenRefreshingWrapper:
58
59
  **kwargs,
59
60
  )
60
61
  elif model_type == "deepseek":
62
+ from langchain_deepseek import ChatDeepSeek
63
+
61
64
  return ChatDeepSeek(
62
65
  api_key=settings.auth.token,
63
66
  model=model,
@@ -65,6 +68,8 @@ class TokenRefreshingWrapper:
65
68
  **kwargs,
66
69
  )
67
70
  elif model_type == "anthropic":
71
+ from langchain_anthropic import ChatAnthropic
72
+
68
73
  return ChatAnthropic(
69
74
  api_key=settings.auth.token,
70
75
  anthropic_api_url=url,
@@ -73,6 +78,8 @@ class TokenRefreshingWrapper:
73
78
  **kwargs,
74
79
  )
75
80
  elif model_type == "gemini":
81
+ from .custom.gemini import ChatGoogleGenerativeAI
82
+
76
83
  return ChatGoogleGenerativeAI(
77
84
  model=model,
78
85
  client_options={"api_endpoint": url},
@@ -81,6 +88,8 @@ class TokenRefreshingWrapper:
81
88
  **kwargs,
82
89
  )
83
90
  elif model_type == "cerebras":
91
+ from langchain_cerebras import ChatCerebras
92
+
84
93
  return ChatCerebras(
85
94
  api_key=settings.auth.token,
86
95
  model=model,
@@ -88,6 +97,8 @@ class TokenRefreshingWrapper:
88
97
  **kwargs,
89
98
  )
90
99
  else:
100
+ from langchain_openai import ChatOpenAI
101
+
91
102
  if model_type != "openai":
92
103
  logger.warning(f"Model {model} is not supported by Langchain, defaulting to OpenAI")
93
104
  return ChatOpenAI(
blaxel/langgraph/tools.py CHANGED
@@ -1,17 +1,10 @@
1
- from typing import Any, Dict
2
-
3
- from langchain_core.tools import StructuredTool
4
- from mcp.types import (
5
- CallToolResult,
6
- EmbeddedResource,
7
- ImageContent,
8
- TextContent,
9
- )
1
+ from typing import TYPE_CHECKING, Any, Dict
10
2
 
11
3
  from blaxel.core.tools import bl_tools as bl_tools_core
12
4
  from blaxel.core.tools.types import Tool, ToolException
13
5
 
14
- NonTextContent = ImageContent | EmbeddedResource
6
+ if TYPE_CHECKING:
7
+ from langchain_core.tools import StructuredTool
15
8
 
16
9
 
17
10
  def _clean_schema_for_openai(schema: Dict[str, Any]) -> Dict[str, Any]:
@@ -43,7 +36,17 @@ def _clean_schema_for_openai(schema: Dict[str, Any]) -> Dict[str, Any]:
43
36
  return cleaned
44
37
 
45
38
 
46
- def get_langchain_tool(tool: Tool) -> StructuredTool:
39
+ def get_langchain_tool(tool: Tool) -> "StructuredTool":
40
+ from langchain_core.tools import StructuredTool
41
+ from mcp.types import (
42
+ CallToolResult,
43
+ EmbeddedResource,
44
+ ImageContent,
45
+ TextContent,
46
+ )
47
+
48
+ NonTextContent = ImageContent | EmbeddedResource
49
+
47
50
  async def langchain_coroutine(
48
51
  **arguments: dict[str, Any],
49
52
  ) -> tuple[str | list[str], list[NonTextContent] | None]:
@@ -77,7 +80,7 @@ def get_langchain_tool(tool: Tool) -> StructuredTool:
77
80
  )
78
81
 
79
82
 
80
- async def bl_tools(tools_names: list[str], **kwargs) -> list[StructuredTool]:
83
+ async def bl_tools(tools_names: list[str], **kwargs) -> list["StructuredTool"]:
81
84
  tools = bl_tools_core(tools_names, **kwargs)
82
85
  await tools.initialize()
83
86
  return [get_langchain_tool(tool) for tool in tools.get_tools()]
@@ -1,33 +1,25 @@
1
- import os
2
-
3
- # Transformers is a dependency of DeepSeek, and it logs a lot of warnings that are not useful
4
- os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
1
+ from __future__ import annotations
5
2
 
3
+ import os
6
4
  from logging import getLogger
7
- from typing import Any, Sequence
8
-
9
- from google.genai.types import HttpOptions
10
- from llama_index.core.base.llms.types import (
11
- ChatMessage,
12
- ChatResponse,
13
- ChatResponseAsyncGen,
14
- ChatResponseGen,
15
- CompletionResponse,
16
- CompletionResponseAsyncGen,
17
- CompletionResponseGen,
18
- )
19
- from llama_index.llms.anthropic import Anthropic
20
- from llama_index.llms.cerebras import Cerebras
21
- from llama_index.llms.deepseek import DeepSeek
22
- from llama_index.llms.google_genai import GoogleGenAI
23
- from llama_index.llms.groq import Groq
24
- from llama_index.llms.mistralai import MistralAI
25
- from llama_index.llms.openai import OpenAI
5
+ from typing import TYPE_CHECKING, Any, Sequence
26
6
 
27
7
  from blaxel.core import bl_model as bl_model_core
28
8
  from blaxel.core import settings
29
9
 
30
- from .custom.cohere import Cohere
10
+ # Transformers is a dependency of DeepSeek, and it logs a lot of warnings that are not useful
11
+ os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
12
+
13
+ if TYPE_CHECKING:
14
+ from llama_index.core.base.llms.types import (
15
+ ChatMessage,
16
+ ChatResponse,
17
+ ChatResponseAsyncGen,
18
+ ChatResponseGen,
19
+ CompletionResponse,
20
+ CompletionResponseAsyncGen,
21
+ CompletionResponseGen,
22
+ )
31
23
 
32
24
  logger = getLogger(__name__)
33
25
 
@@ -48,6 +40,8 @@ class TokenRefreshingWrapper:
48
40
  kwargs = config.get("kwargs", {})
49
41
 
50
42
  if model_type == "anthropic":
43
+ from llama_index.llms.anthropic import Anthropic
44
+
51
45
  return Anthropic(
52
46
  model=model,
53
47
  api_key=settings.auth.token,
@@ -56,6 +50,8 @@ class TokenRefreshingWrapper:
56
50
  **kwargs,
57
51
  )
58
52
  elif model_type == "xai":
53
+ from llama_index.llms.groq import Groq
54
+
59
55
  return Groq(
60
56
  model=model,
61
57
  api_key=settings.auth.token,
@@ -63,6 +59,9 @@ class TokenRefreshingWrapper:
63
59
  **kwargs,
64
60
  )
65
61
  elif model_type == "gemini":
62
+ from google.genai.types import HttpOptions
63
+ from llama_index.llms.google_genai import GoogleGenAI
64
+
66
65
  return GoogleGenAI(
67
66
  api_key=settings.auth.token,
68
67
  model=model,
@@ -74,8 +73,12 @@ class TokenRefreshingWrapper:
74
73
  **kwargs,
75
74
  )
76
75
  elif model_type == "cohere":
76
+ from .custom.cohere import Cohere
77
+
77
78
  return Cohere(model=model, api_key=settings.auth.token, api_base=url, **kwargs)
78
79
  elif model_type == "deepseek":
80
+ from llama_index.llms.deepseek import DeepSeek
81
+
79
82
  return DeepSeek(
80
83
  model=model,
81
84
  api_key=settings.auth.token,
@@ -83,8 +86,12 @@ class TokenRefreshingWrapper:
83
86
  **kwargs,
84
87
  )
85
88
  elif model_type == "mistral":
89
+ from llama_index.llms.mistralai import MistralAI
90
+
86
91
  return MistralAI(model=model, api_key=settings.auth.token, endpoint=url, **kwargs)
87
92
  elif model_type == "cerebras":
93
+ from llama_index.llms.cerebras import Cerebras
94
+
88
95
  return Cerebras(
89
96
  model=model,
90
97
  api_key=settings.auth.token,
@@ -92,6 +99,8 @@ class TokenRefreshingWrapper:
92
99
  **kwargs,
93
100
  )
94
101
  else:
102
+ from llama_index.llms.openai import OpenAI
103
+
95
104
  if model_type != "openai":
96
105
  logger.warning(
97
106
  f"Model {model} is not supported by LlamaIndex, defaulting to OpenAI"
@@ -1,12 +1,17 @@
1
- from llama_index.core.tools import FunctionTool
2
- from llama_index.core.tools.types import ToolMetadata
1
+ from typing import TYPE_CHECKING
3
2
 
4
3
  from blaxel.core.tools import bl_tools as bl_tools_core
5
4
  from blaxel.core.tools.common import create_model_from_json_schema
6
5
  from blaxel.core.tools.types import Tool
7
6
 
7
+ if TYPE_CHECKING:
8
+ from llama_index.core.tools import FunctionTool
9
+
10
+
11
+ def get_llamaindex_tool(tool: Tool) -> "FunctionTool":
12
+ from llama_index.core.tools import FunctionTool
13
+ from llama_index.core.tools.types import ToolMetadata
8
14
 
9
- def get_llamaindex_tool(tool: Tool) -> FunctionTool:
10
15
  model_schema = create_model_from_json_schema(
11
16
  tool.input_schema, model_name=f"{tool.name}_Schema"
12
17
  )
@@ -21,7 +26,7 @@ def get_llamaindex_tool(tool: Tool) -> FunctionTool:
21
26
  )
22
27
 
23
28
 
24
- async def bl_tools(tools_names: list[str], **kwargs) -> list[FunctionTool]:
29
+ async def bl_tools(tools_names: list[str], **kwargs) -> list["FunctionTool"]:
25
30
  tools = bl_tools_core(tools_names, **kwargs)
26
31
  await tools.initialize()
27
32
  return [get_llamaindex_tool(tool) for tool in tools.get_tools()]
blaxel/pydantic/model.py CHANGED
@@ -1,19 +1,7 @@
1
1
  import logging
2
2
  from typing import Any
3
3
 
4
- from anthropic import AsyncAnthropic
5
- from cohere import AsyncClientV2
6
- from mistralai.sdk import Mistral
7
4
  from pydantic_ai.models import Model
8
- from pydantic_ai.models.anthropic import AnthropicModel
9
- from pydantic_ai.models.cohere import CohereModel
10
- from pydantic_ai.models.gemini import GeminiModel
11
- from pydantic_ai.models.mistral import MistralModel
12
- from pydantic_ai.models.openai import OpenAIModel
13
- from pydantic_ai.providers.anthropic import AnthropicProvider
14
- from pydantic_ai.providers.cohere import CohereProvider
15
- from pydantic_ai.providers.mistral import MistralProvider
16
- from pydantic_ai.providers.openai import OpenAIProvider
17
5
 
18
6
  from blaxel.core import bl_model as bl_model_core
19
7
  from blaxel.core import settings
@@ -41,6 +29,10 @@ class TokenRefreshingModel(Model):
41
29
  kwargs = config.get("kwargs", {})
42
30
 
43
31
  if type == "mistral":
32
+ from mistralai.sdk import Mistral
33
+ from pydantic_ai.models.mistral import MistralModel
34
+ from pydantic_ai.providers.mistral import MistralProvider
35
+
44
36
  return MistralModel(
45
37
  model_name=model,
46
38
  provider=MistralProvider(
@@ -52,6 +44,10 @@ class TokenRefreshingModel(Model):
52
44
  ),
53
45
  )
54
46
  elif type == "cohere":
47
+ from cohere import AsyncClientV2
48
+ from pydantic_ai.models.cohere import CohereModel
49
+ from pydantic_ai.providers.cohere import CohereProvider
50
+
55
51
  return CohereModel(
56
52
  model_name=model,
57
53
  provider=CohereProvider(
@@ -62,6 +58,9 @@ class TokenRefreshingModel(Model):
62
58
  ),
63
59
  )
64
60
  elif type == "xai":
61
+ from pydantic_ai.models.openai import OpenAIModel
62
+ from pydantic_ai.providers.openai import OpenAIProvider
63
+
65
64
  return OpenAIModel(
66
65
  model_name=model,
67
66
  provider=OpenAIProvider(
@@ -69,6 +68,9 @@ class TokenRefreshingModel(Model):
69
68
  ),
70
69
  )
71
70
  elif type == "deepseek":
71
+ from pydantic_ai.models.openai import OpenAIModel
72
+ from pydantic_ai.providers.openai import OpenAIProvider
73
+
72
74
  return OpenAIModel(
73
75
  model_name=model,
74
76
  provider=OpenAIProvider(
@@ -76,6 +78,9 @@ class TokenRefreshingModel(Model):
76
78
  ),
77
79
  )
78
80
  elif type == "cerebras":
81
+ from pydantic_ai.models.openai import OpenAIModel
82
+ from pydantic_ai.providers.openai import OpenAIProvider
83
+
79
84
  return OpenAIModel(
80
85
  model_name=model,
81
86
  provider=OpenAIProvider(
@@ -83,6 +88,10 @@ class TokenRefreshingModel(Model):
83
88
  ),
84
89
  )
85
90
  elif type == "anthropic":
91
+ from anthropic import AsyncAnthropic
92
+ from pydantic_ai.models.anthropic import AnthropicModel
93
+ from pydantic_ai.providers.anthropic import AnthropicProvider
94
+
86
95
  return AnthropicModel(
87
96
  model_name=model,
88
97
  provider=AnthropicProvider(
@@ -95,6 +104,8 @@ class TokenRefreshingModel(Model):
95
104
  ),
96
105
  )
97
106
  elif type == "gemini":
107
+ from pydantic_ai.models.gemini import GeminiModel
108
+
98
109
  return GeminiModel(
99
110
  model_name=model,
100
111
  provider=GoogleGLAProvider(
@@ -105,6 +116,9 @@ class TokenRefreshingModel(Model):
105
116
  ),
106
117
  )
107
118
  else:
119
+ from pydantic_ai.models.openai import OpenAIModel
120
+ from pydantic_ai.providers.openai import OpenAIProvider
121
+
108
122
  if type != "openai":
109
123
  logger.warning(f"Model {model} is not supported by Pydantic, defaulting to OpenAI")
110
124
  return OpenAIModel(