blaxel 0.2.36__py3-none-any.whl → 0.2.38rc122__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. blaxel/__init__.py +2 -2
  2. blaxel/core/client/models/create_job_execution_request_env.py +3 -3
  3. blaxel/core/client/models/preview.py +48 -1
  4. blaxel/core/client/models/sandbox.py +10 -0
  5. blaxel/core/jobs/__init__.py +2 -2
  6. blaxel/core/sandbox/__init__.py +12 -0
  7. blaxel/core/sandbox/client/api/system/__init__.py +0 -0
  8. blaxel/core/sandbox/client/api/system/get_health.py +134 -0
  9. blaxel/core/sandbox/client/api/system/post_upgrade.py +196 -0
  10. blaxel/core/sandbox/client/models/__init__.py +8 -0
  11. blaxel/core/sandbox/client/models/content_search_match.py +24 -25
  12. blaxel/core/sandbox/client/models/content_search_response.py +25 -29
  13. blaxel/core/sandbox/client/models/find_match.py +13 -14
  14. blaxel/core/sandbox/client/models/find_response.py +21 -24
  15. blaxel/core/sandbox/client/models/fuzzy_search_match.py +17 -19
  16. blaxel/core/sandbox/client/models/fuzzy_search_response.py +21 -24
  17. blaxel/core/sandbox/client/models/health_response.py +159 -0
  18. blaxel/core/sandbox/client/models/process_upgrade_state.py +20 -0
  19. blaxel/core/sandbox/client/models/upgrade_request.py +71 -0
  20. blaxel/core/sandbox/client/models/upgrade_status.py +125 -0
  21. blaxel/core/sandbox/default/__init__.py +2 -0
  22. blaxel/core/sandbox/default/filesystem.py +20 -6
  23. blaxel/core/sandbox/default/preview.py +48 -1
  24. blaxel/core/sandbox/default/process.py +66 -21
  25. blaxel/core/sandbox/default/sandbox.py +36 -5
  26. blaxel/core/sandbox/default/system.py +71 -0
  27. blaxel/core/sandbox/sync/__init__.py +2 -0
  28. blaxel/core/sandbox/sync/filesystem.py +19 -2
  29. blaxel/core/sandbox/sync/preview.py +50 -3
  30. blaxel/core/sandbox/sync/process.py +38 -15
  31. blaxel/core/sandbox/sync/sandbox.py +29 -4
  32. blaxel/core/sandbox/sync/system.py +71 -0
  33. blaxel/core/sandbox/types.py +212 -5
  34. blaxel/core/volume/volume.py +6 -0
  35. blaxel/langgraph/tools.py +0 -1
  36. blaxel/llamaindex/model.py +119 -74
  37. blaxel-0.2.38rc122.dist-info/METADATA +569 -0
  38. {blaxel-0.2.36.dist-info → blaxel-0.2.38rc122.dist-info}/RECORD +40 -31
  39. blaxel-0.2.36.dist-info/METADATA +0 -228
  40. {blaxel-0.2.36.dist-info → blaxel-0.2.38rc122.dist-info}/WHEEL +0 -0
  41. {blaxel-0.2.36.dist-info → blaxel-0.2.38rc122.dist-info}/licenses/LICENSE +0 -0
@@ -4,7 +4,14 @@ from typing import Any, Callable, Dict, List, TypeVar, Union
4
4
  import httpx
5
5
  from attrs import define as _attrs_define
6
6
 
7
- from ..client.models import Port, Sandbox, SandboxLifecycle, VolumeAttachment
7
+ from ..client.models import (
8
+ Env,
9
+ Port,
10
+ PortProtocol,
11
+ Sandbox,
12
+ SandboxLifecycle,
13
+ VolumeAttachment,
14
+ )
8
15
  from ..client.types import UNSET
9
16
  from .client.models.process_request import ProcessRequest
10
17
  from .client.models.process_response import ProcessResponse
@@ -205,7 +212,7 @@ class SandboxCreateConfiguration:
205
212
  if isinstance(port, Port):
206
213
  # If it's already a Port object, ensure protocol defaults to HTTP
207
214
  if port.protocol is UNSET or not port.protocol:
208
- port.protocol = "HTTP"
215
+ port.protocol = PortProtocol.HTTP
209
216
  port_objects.append(port)
210
217
  elif isinstance(port, dict):
211
218
  # Convert dict to Port object with HTTP as default protocol
@@ -218,20 +225,22 @@ class SandboxCreateConfiguration:
218
225
 
219
226
  return port_objects
220
227
 
221
- def _normalize_envs(self) -> List[Dict[str, str]] | None:
228
+ def _normalize_envs(self) -> List[Env] | None:
222
229
  """Convert envs to list of dicts with name and value keys."""
223
230
  if not self.envs:
224
231
  return None
225
232
 
226
233
  env_objects = []
227
234
  for env in self.envs:
228
- if isinstance(env, dict):
235
+ if isinstance(env, Env):
236
+ env_objects.append(env)
237
+ elif isinstance(env, dict):
229
238
  # Validate that the dict has the required keys
230
239
  if "name" not in env or "value" not in env:
231
240
  raise ValueError(
232
241
  f"Environment variable dict must have 'name' and 'value' keys: {env}"
233
242
  )
234
- env_objects.append({"name": env["name"], "value": env["value"]})
243
+ env_objects.append(Env(name=env["name"], value=env["value"]))
235
244
  else:
236
245
  raise ValueError(
237
246
  f"Invalid env type: {type(env)}. Expected dict with 'name' and 'value' keys."
@@ -385,3 +394,201 @@ class Context:
385
394
  @classmethod
386
395
  def from_json(cls, data: Dict[str, Any]) -> "Context":
387
396
  return cls(id=str(data.get("id") or data.get("context_id") or ""))
397
+
398
+
399
+ class StreamHandle:
400
+ """Handle for managing a streaming operation (sync version).
401
+
402
+ Can be used as a context manager for automatic cleanup:
403
+
404
+ with sandbox.process.stream_logs(name, options) as handle:
405
+ # do something
406
+ # handle is automatically closed
407
+
408
+ Or used manually:
409
+
410
+ handle = sandbox.process.stream_logs(name, options)
411
+ try:
412
+ # do something
413
+ finally:
414
+ handle.close()
415
+ """
416
+
417
+ def __init__(self, close_func: Callable[[], None]):
418
+ self._close_func = close_func
419
+ self._closed = False
420
+
421
+ def close(self) -> None:
422
+ """Close the stream and stop receiving data."""
423
+ if not self._closed:
424
+ self._close_func()
425
+ self._closed = True
426
+
427
+ @property
428
+ def closed(self) -> bool:
429
+ """Returns True if the stream handle has been closed."""
430
+ return self._closed
431
+
432
+ def __enter__(self) -> "StreamHandle":
433
+ return self
434
+
435
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
436
+ self.close()
437
+
438
+ # Backward compatibility: support dict-like access
439
+ def __getitem__(self, key: str) -> Callable[[], None]:
440
+ if key == "close":
441
+ return self.close
442
+ raise KeyError(key)
443
+
444
+
445
+ class AsyncStreamHandle:
446
+ """Handle for managing a streaming operation (async version).
447
+
448
+ Can be used as an async context manager for automatic cleanup:
449
+
450
+ async with sandbox.process.stream_logs(name, options) as handle:
451
+ # do something
452
+ # handle is automatically closed
453
+
454
+ Or used manually:
455
+
456
+ handle = sandbox.process.stream_logs(name, options)
457
+ try:
458
+ # do something
459
+ finally:
460
+ handle.close()
461
+ """
462
+
463
+ def __init__(self, close_func: Callable[[], None]):
464
+ self._close_func = close_func
465
+ self._closed = False
466
+
467
+ def close(self) -> None:
468
+ """Close the stream and stop receiving data."""
469
+ if not self._closed:
470
+ self._close_func()
471
+ self._closed = True
472
+
473
+ @property
474
+ def closed(self) -> bool:
475
+ """Returns True if the stream handle has been closed."""
476
+ return self._closed
477
+
478
+ async def __aenter__(self) -> "AsyncStreamHandle":
479
+ return self
480
+
481
+ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
482
+ self.close()
483
+
484
+ # Also support sync context manager for convenience
485
+ def __enter__(self) -> "AsyncStreamHandle":
486
+ return self
487
+
488
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
489
+ self.close()
490
+
491
+ # Backward compatibility: support dict-like access
492
+ def __getitem__(self, key: str) -> Callable[[], None]:
493
+ if key == "close":
494
+ return self.close
495
+ raise KeyError(key)
496
+
497
+
498
+ class WatchHandle:
499
+ """Handle for managing a file system watch operation (sync version).
500
+
501
+ Can be used as a context manager for automatic cleanup:
502
+
503
+ with sandbox.fs.watch(path, callback) as handle:
504
+ # do something
505
+ # handle is automatically closed
506
+
507
+ Or used manually:
508
+
509
+ handle = sandbox.fs.watch(path, callback)
510
+ try:
511
+ # do something
512
+ finally:
513
+ handle.close()
514
+ """
515
+
516
+ def __init__(self, close_func: Callable[[], None]):
517
+ self._close_func = close_func
518
+ self._closed = False
519
+
520
+ def close(self) -> None:
521
+ """Close the watch and stop receiving events."""
522
+ if not self._closed:
523
+ self._close_func()
524
+ self._closed = True
525
+
526
+ @property
527
+ def closed(self) -> bool:
528
+ """Returns True if the watch handle has been closed."""
529
+ return self._closed
530
+
531
+ def __enter__(self) -> "WatchHandle":
532
+ return self
533
+
534
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
535
+ self.close()
536
+
537
+ # Backward compatibility: support dict-like access
538
+ def __getitem__(self, key: str) -> Callable[[], None]:
539
+ if key == "close":
540
+ return self.close
541
+ raise KeyError(key)
542
+
543
+
544
+ class AsyncWatchHandle:
545
+ """Handle for managing a file system watch operation (async version).
546
+
547
+ Can be used as an async context manager for automatic cleanup:
548
+
549
+ async with sandbox.fs.watch(path, callback) as handle:
550
+ # do something
551
+ # handle is automatically closed
552
+
553
+ Or used manually:
554
+
555
+ handle = sandbox.fs.watch(path, callback)
556
+ try:
557
+ # do something
558
+ finally:
559
+ handle.close()
560
+ """
561
+
562
+ def __init__(self, close_func: Callable[[], None]):
563
+ self._close_func = close_func
564
+ self._closed = False
565
+
566
+ def close(self) -> None:
567
+ """Close the watch and stop receiving events."""
568
+ if not self._closed:
569
+ self._close_func()
570
+ self._closed = True
571
+
572
+ @property
573
+ def closed(self) -> bool:
574
+ """Returns True if the watch handle has been closed."""
575
+ return self._closed
576
+
577
+ async def __aenter__(self) -> "AsyncWatchHandle":
578
+ return self
579
+
580
+ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
581
+ self.close()
582
+
583
+ # Also support sync context manager for convenience
584
+ def __enter__(self) -> "AsyncWatchHandle":
585
+ return self
586
+
587
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
588
+ self.close()
589
+
590
+ # Backward compatibility: support dict-like access
591
+ def __getitem__(self, key: str) -> Callable[[], None]:
592
+ if key == "close":
593
+ return self.close
594
+ raise KeyError(key)
@@ -1,3 +1,5 @@
1
+ import asyncio
2
+ import time
1
3
  import uuid
2
4
  from typing import Callable, Dict, List, Union
3
5
 
@@ -500,6 +502,8 @@ async def _update_volume_by_name(
500
502
  status_code = int(response.code) if response.code is not UNSET else None
501
503
  message = response.message if response.message is not UNSET else response.error
502
504
  raise VolumeAPIError(message, status_code=status_code, code=response.error)
505
+ # This is for safe update
506
+ await asyncio.sleep(0.5)
503
507
  return VolumeInstance(response)
504
508
 
505
509
 
@@ -576,6 +580,8 @@ def _update_volume_by_name_sync(
576
580
  status_code = int(response.code) if response.code is not UNSET else None
577
581
  message = response.message if response.message is not UNSET else response.error
578
582
  raise VolumeAPIError(message, status_code=status_code, code=response.error)
583
+ # This is for safe update
584
+ time.sleep(0.5)
579
585
  return SyncVolumeInstance(response)
580
586
 
581
587
 
blaxel/langgraph/tools.py CHANGED
@@ -5,7 +5,6 @@ from blaxel.core.tools.types import Tool, ToolException
5
5
 
6
6
  if TYPE_CHECKING:
7
7
  from langchain_core.tools import StructuredTool
8
- from mcp.types import EmbeddedResource, ImageContent
9
8
 
10
9
 
11
10
  def _clean_schema_for_openai(schema: Dict[str, Any]) -> Dict[str, Any]:
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import os
4
4
  from logging import getLogger
5
- from typing import TYPE_CHECKING, Any, Sequence
5
+ from typing import TYPE_CHECKING, Any, Dict, List, Sequence, Union
6
6
 
7
7
  from blaxel.core import bl_model as bl_model_core
8
8
  from blaxel.core import settings
@@ -20,20 +20,61 @@ if TYPE_CHECKING:
20
20
  CompletionResponseAsyncGen,
21
21
  CompletionResponseGen,
22
22
  )
23
+ from llama_index.core.llms.llm import ToolSelection
24
+ from llama_index.core.tools.types import BaseTool
25
+
26
+ # Runtime imports needed for class inheritance and construction
27
+ from llama_index.core.base.llms.types import LLMMetadata # noqa: E402
28
+ from llama_index.core.llms.function_calling import FunctionCallingLLM # noqa: E402
29
+ from pydantic import PrivateAttr # noqa: E402
23
30
 
24
31
  logger = getLogger(__name__)
25
32
 
33
+ DEFAULT_CONTEXT_WINDOW = 128000
34
+ DEFAULT_NUM_OUTPUT = 4096
35
+
36
+
37
+ class TokenRefreshingLLM(FunctionCallingLLM):
38
+ """Wrapper for LlamaIndex LLMs that refreshes token before each call.
26
39
 
27
- class TokenRefreshingWrapper:
28
- """Base wrapper class that refreshes token before each call."""
40
+ Inherits from FunctionCallingLLM to maintain type compatibility with
41
+ LlamaIndex's agents and components that validate isinstance(model, LLM).
42
+ """
43
+
44
+ _model_config_data: dict = PrivateAttr(default_factory=dict)
45
+ _wrapped: Any = PrivateAttr(default=None)
29
46
 
30
47
  def __init__(self, model_config: dict):
31
- self.model_config = model_config
32
- self.wrapped_model = self._create_model()
48
+ super().__init__()
49
+ self._model_config_data = model_config
50
+ self._wrapped = self._create_model()
51
+
52
+ @classmethod
53
+ def class_name(cls) -> str:
54
+ return "TokenRefreshingLLM"
55
+
56
+ @property
57
+ def wrapped_model(self) -> Any:
58
+ """Access the underlying wrapped LLM model."""
59
+ return self._wrapped
60
+
61
+ @property
62
+ def metadata(self) -> LLMMetadata:
63
+ """Get LLM metadata, with fallback for unknown model names."""
64
+ try:
65
+ return self._wrapped.metadata
66
+ except (ValueError, KeyError) as e:
67
+ logger.warning(f"Could not get metadata from wrapped model: {e}. Using defaults.")
68
+ return LLMMetadata(
69
+ context_window=DEFAULT_CONTEXT_WINDOW,
70
+ num_output=DEFAULT_NUM_OUTPUT,
71
+ is_chat_model=True,
72
+ model_name=self._model_config_data.get("model", "unknown"),
73
+ )
33
74
 
34
75
  def _create_model(self):
35
76
  """Create the model instance with current token."""
36
- config = self.model_config
77
+ config = self._model_config_data
37
78
  model_type = config["type"]
38
79
  model = config["model"]
39
80
  url = config["url"]
@@ -115,102 +156,106 @@ class TokenRefreshingWrapper:
115
156
 
116
157
  def _refresh_token(self):
117
158
  """Refresh the token and recreate the model if needed."""
118
- # Only refresh if using ClientCredentials (which has get_token method)
119
159
  current_token = settings.auth.token
120
160
 
121
- if hasattr(settings.auth, "get_token"):
122
- # This will trigger token refresh if needed
123
- settings.auth.get_token()
124
-
125
161
  new_token = settings.auth.token
126
162
 
127
- # If token changed, recreate the model
128
163
  if current_token != new_token:
129
- self.wrapped_model = self._create_model()
164
+ self._wrapped = self._create_model()
130
165
 
131
- def __getattr__(self, name):
132
- """Delegate attribute access to wrapped model."""
133
- return getattr(self.wrapped_model, name)
166
+ # --- Core LLM methods with token refresh ---
134
167
 
168
+ def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
169
+ self._refresh_token()
170
+ return self._wrapped.chat(messages, **kwargs)
135
171
 
136
- class TokenRefreshingLLM(TokenRefreshingWrapper):
137
- """Wrapper for LlamaIndex LLMs that refreshes token before each call."""
138
-
139
- async def achat(
140
- self,
141
- messages: Sequence[ChatMessage],
142
- **kwargs: Any,
143
- ) -> ChatResponse:
144
- """Async chat with token refresh."""
172
+ async def achat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
145
173
  self._refresh_token()
146
- return await self.wrapped_model.achat(messages, **kwargs)
174
+ return await self._wrapped.achat(messages, **kwargs)
147
175
 
148
- def chat(
149
- self,
150
- messages: Sequence[ChatMessage],
151
- **kwargs: Any,
152
- ) -> ChatResponse:
153
- """Sync chat with token refresh."""
176
+ def complete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> CompletionResponse:
154
177
  self._refresh_token()
155
- return self.wrapped_model.chat(messages, **kwargs)
178
+ return self._wrapped.complete(prompt, formatted=formatted, **kwargs)
156
179
 
157
- async def astream_chat(
158
- self,
159
- messages: Sequence[ChatMessage],
160
- **kwargs: Any,
161
- ) -> ChatResponseAsyncGen:
162
- """Async stream chat with token refresh."""
180
+ async def acomplete(
181
+ self, prompt: str, formatted: bool = False, **kwargs: Any
182
+ ) -> CompletionResponse:
163
183
  self._refresh_token()
164
- async for chunk in self.wrapped_model.astream_chat(messages, **kwargs):
165
- yield chunk
184
+ return await self._wrapped.acomplete(prompt, formatted=formatted, **kwargs)
166
185
 
167
186
  def stream_chat(
168
- self,
169
- messages: Sequence[ChatMessage],
170
- **kwargs: Any,
187
+ self, messages: Sequence[ChatMessage], **kwargs: Any
171
188
  ) -> ChatResponseGen:
172
- """Sync stream chat with token refresh."""
173
189
  self._refresh_token()
174
- for chunk in self.wrapped_model.stream_chat(messages, **kwargs):
175
- yield chunk
190
+ return self._wrapped.stream_chat(messages, **kwargs)
176
191
 
177
- async def acomplete(
178
- self,
179
- prompt: str,
180
- **kwargs: Any,
181
- ) -> CompletionResponse:
182
- """Async complete with token refresh."""
192
+ async def astream_chat(
193
+ self, messages: Sequence[ChatMessage], **kwargs: Any
194
+ ) -> ChatResponseAsyncGen:
183
195
  self._refresh_token()
184
- return await self.wrapped_model.acomplete(prompt, **kwargs)
196
+ result = self._wrapped.astream_chat(messages, **kwargs)
197
+ # Handle both coroutine and async generator patterns
198
+ if hasattr(result, "__aiter__"):
199
+ return result
200
+ return await result
185
201
 
186
- def complete(
187
- self,
188
- prompt: str,
189
- **kwargs: Any,
190
- ) -> CompletionResponse:
191
- """Sync complete with token refresh."""
202
+ def stream_complete(
203
+ self, prompt: str, formatted: bool = False, **kwargs: Any
204
+ ) -> CompletionResponseGen:
192
205
  self._refresh_token()
193
- return self.wrapped_model.complete(prompt, **kwargs)
206
+ return self._wrapped.stream_complete(prompt, formatted=formatted, **kwargs)
194
207
 
195
208
  async def astream_complete(
196
- self,
197
- prompt: str,
198
- **kwargs: Any,
209
+ self, prompt: str, formatted: bool = False, **kwargs: Any
199
210
  ) -> CompletionResponseAsyncGen:
200
- """Async stream complete with token refresh."""
201
211
  self._refresh_token()
202
- async for chunk in self.wrapped_model.astream_complete(prompt, **kwargs):
203
- yield chunk
212
+ result = self._wrapped.astream_complete(prompt, formatted=formatted, **kwargs)
213
+ # Handle both coroutine and async generator patterns
214
+ if hasattr(result, "__aiter__"):
215
+ return result
216
+ return await result
204
217
 
205
- def stream_complete(
218
+ # --- FunctionCallingLLM methods (delegate to wrapped model) ---
219
+
220
+ def _prepare_chat_with_tools(
206
221
  self,
207
- prompt: str,
222
+ tools: Sequence[BaseTool],
223
+ user_msg: Union[str, ChatMessage, None] = None,
224
+ chat_history: List[ChatMessage] | None = None,
225
+ verbose: bool = False,
226
+ allow_parallel_tool_calls: bool = False,
227
+ tool_required: Any = None,
208
228
  **kwargs: Any,
209
- ) -> CompletionResponseGen:
210
- """Sync stream complete with token refresh."""
211
- self._refresh_token()
212
- for chunk in self.wrapped_model.stream_complete(prompt, **kwargs):
213
- yield chunk
229
+ ) -> Dict[str, Any]:
230
+ if hasattr(self._wrapped, "_prepare_chat_with_tools"):
231
+ return self._wrapped._prepare_chat_with_tools(
232
+ tools,
233
+ user_msg=user_msg,
234
+ chat_history=chat_history,
235
+ verbose=verbose,
236
+ allow_parallel_tool_calls=allow_parallel_tool_calls,
237
+ tool_required=tool_required,
238
+ **kwargs,
239
+ )
240
+ raise NotImplementedError(
241
+ f"The wrapped model ({type(self._wrapped).__name__}) does not support function calling"
242
+ )
243
+
244
+ def get_tool_calls_from_response(
245
+ self,
246
+ response: ChatResponse,
247
+ error_on_no_tool_call: bool = True,
248
+ **kwargs: Any,
249
+ ) -> List[ToolSelection]:
250
+ if hasattr(self._wrapped, "get_tool_calls_from_response"):
251
+ return self._wrapped.get_tool_calls_from_response(
252
+ response,
253
+ error_on_no_tool_call=error_on_no_tool_call,
254
+ **kwargs,
255
+ )
256
+ raise NotImplementedError(
257
+ f"The wrapped model ({type(self._wrapped).__name__}) does not support function calling"
258
+ )
214
259
 
215
260
 
216
261
  async def bl_model(name, **kwargs):