dais-sdk 0.6.0__tar.gz → 0.6.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/PKG-INFO +1 -1
  2. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/pyproject.toml +1 -1
  3. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/__init__.py +61 -56
  4. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/mcp_client/remote_mcp_client.py +1 -1
  5. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/tool/execute.py +47 -7
  6. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/tool/utils.py +12 -1
  7. dais_sdk-0.6.1/src/dais_sdk/types/exceptions.py +56 -0
  8. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/types/message.py +8 -0
  9. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/types/request_params.py +0 -6
  10. dais_sdk-0.6.0/src/dais_sdk/types/exceptions.py +0 -27
  11. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/LICENSE +0 -0
  12. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/README.md +0 -0
  13. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/debug.py +0 -0
  14. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/logger.py +0 -0
  15. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/mcp_client/__init__.py +0 -0
  16. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/mcp_client/base_mcp_client.py +0 -0
  17. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/mcp_client/local_mcp_client.py +0 -0
  18. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/mcp_client/oauth_server.py +0 -0
  19. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/param_parser.py +0 -0
  20. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/stream.py +0 -0
  21. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/tool/__init__.py +0 -0
  22. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/tool/prepare.py +0 -0
  23. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/tool/toolset/__init__.py +0 -0
  24. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/tool/toolset/mcp_toolset.py +0 -0
  25. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/tool/toolset/python_toolset.py +0 -0
  26. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/tool/toolset/toolset.py +0 -0
  27. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/types/__init__.py +0 -0
  28. {dais_sdk-0.6.0 → dais_sdk-0.6.1}/src/dais_sdk/types/tool.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dais-sdk
3
- Version: 0.6.0
3
+ Version: 0.6.1
4
4
  Summary: A wrapper of LiteLLM
5
5
  Author-email: BHznJNs <bhznjns@outlook.com>
6
6
  Requires-Python: >=3.10
@@ -17,7 +17,7 @@ classifiers = [
17
17
  "Programming Language :: Python :: 3.12",
18
18
  ]
19
19
  requires-python = ">=3.10"
20
- version = "0.6.0"
20
+ version = "0.6.1"
21
21
 
22
22
  dependencies = [
23
23
  "litellm>=1.80.0",
@@ -1,5 +1,4 @@
1
- import asyncio
2
- import queue
1
+ import json
3
2
  from typing import cast
4
3
  from collections.abc import AsyncGenerator, Generator
5
4
  from litellm import CustomStreamWrapper, completion, acompletion
@@ -12,7 +11,10 @@ from litellm.types.utils import (
12
11
  from .debug import enable_debugging
13
12
  from .param_parser import ParamParser
14
13
  from .stream import AssistantMessageCollector
15
- from .tool.execute import execute_tool_sync, execute_tool
14
+ from .tool.execute import (
15
+ ToolExceptionHandlerManager,
16
+ execute_tool_sync, execute_tool
17
+ )
16
18
  from .tool.toolset import (
17
19
  Toolset,
18
20
  python_tool,
@@ -21,7 +23,7 @@ from .tool.toolset import (
21
23
  LocalMcpToolset,
22
24
  RemoteMcpToolset,
23
25
  )
24
- from .tool.utils import find_tool_by_name
26
+ from .tool.utils import get_tool_name
25
27
  from .mcp_client import (
26
28
  McpClient,
27
29
  McpTool,
@@ -51,6 +53,10 @@ from .types.exceptions import (
51
53
  ContentPolicyViolationError,
52
54
  APIError,
53
55
  Timeout,
56
+ LlmToolException,
57
+ ToolDoesNotExistError,
58
+ ToolArgumentDecodeError,
59
+ ToolExecutionError,
54
60
  )
55
61
  from .types.message import (
56
62
  ChatMessage, UserMessage, SystemMessage, AssistantMessage, ToolMessage,
@@ -87,47 +93,49 @@ class LLM:
87
93
  self.provider = provider
88
94
  self.base_url = base_url
89
95
  self.api_key = api_key
96
+ self._tool_exception_handler_manager = ToolExceptionHandlerManager()
90
97
  self._param_parser = ParamParser(self.provider, self.base_url, self.api_key)
91
98
 
92
- @staticmethod
93
- async def execute_tool_call(
94
- params: LlmRequestParams,
95
- incomplete_tool_message: ToolMessage
96
- ) -> tuple[str | None, str | None]:
99
+ @property
100
+ def tool_exception_handler_manager(self) -> ToolExceptionHandlerManager:
101
+ return self._tool_exception_handler_manager
102
+
103
+ async def execute_tool_call(self,
104
+ tool_def: ToolLike,
105
+ arguments: str | dict) -> tuple[str | None, str | None]:
97
106
  """
98
- Receive incomplete tool messages, execute the tool calls and
99
- return the result and error tuple.
107
+ Returns:
108
+ A tuple of (result, error)
100
109
  """
101
- name, arguments = incomplete_tool_message.name, incomplete_tool_message.arguments
102
- tool_def = params.find_tool(incomplete_tool_message.name)
103
- if tool_def is None:
104
- raise LlmRequestParams.ToolDoesNotExistError(name)
105
-
106
110
  result, error = None, None
107
111
  try:
108
112
  result = await execute_tool(tool_def, arguments)
113
+ except json.JSONDecodeError as e:
114
+ assert type(arguments) is str
115
+ _error = ToolArgumentDecodeError(get_tool_name(tool_def), arguments, e)
116
+ error = self._tool_exception_handler_manager.handle(_error)
109
117
  except Exception as e:
110
- error = f"{type(e).__name__}: {str(e)}"
118
+ _error = ToolExecutionError(tool_def, arguments, e)
119
+ error = self._tool_exception_handler_manager.handle(_error)
111
120
  return result, error
112
121
 
113
- @staticmethod
114
- def execute_tool_call_sync(
115
- params: LlmRequestParams,
116
- incomplete_tool_message: ToolMessage
117
- ) -> tuple[str | None, str | None]:
122
+ def execute_tool_call_sync(self,
123
+ tool_def: ToolLike,
124
+ arguments: str | dict
125
+ ) -> tuple[str | None, str | None]:
118
126
  """
119
127
  Synchronous version of `execute_tool_call`.
120
128
  """
121
- name, arguments = incomplete_tool_message.name, incomplete_tool_message.arguments
122
- tool_def = params.find_tool(incomplete_tool_message.name)
123
- if tool_def is None:
124
- raise LlmRequestParams.ToolDoesNotExistError(name)
125
-
126
129
  result, error = None, None
127
130
  try:
128
131
  result = execute_tool_sync(tool_def, arguments)
132
+ except json.JSONDecodeError as e:
133
+ assert type(arguments) is str
134
+ _error = ToolArgumentDecodeError(get_tool_name(tool_def), arguments, e)
135
+ error = self._tool_exception_handler_manager.handle(_error)
129
136
  except Exception as e:
130
- error = f"{type(e).__name__}: {str(e)}"
137
+ _error = ToolExecutionError(tool_def, arguments, e)
138
+ error = self._tool_exception_handler_manager.handle(_error)
131
139
  return result, error
132
140
 
133
141
  def _resolve_tool_calls_sync(self, params: LlmRequestParams, assistant_message: AssistantMessage) -> Generator[ToolMessage]:
@@ -136,17 +144,14 @@ class LLM:
136
144
  := assistant_message.get_incomplete_tool_messages()) is None:
137
145
  return
138
146
  for incomplete_tool_message in incomplete_tool_messages:
139
- try:
140
- result, error = LLM.execute_tool_call_sync(params, incomplete_tool_message)
141
- except LlmRequestParams.ToolDoesNotExistError as e:
142
- logger.warning(f"{e.message} Skipping this tool call.")
147
+ tool = params.find_tool(incomplete_tool_message.name)
148
+ if tool is None:
149
+ _error = ToolDoesNotExistError(incomplete_tool_message.name)
150
+ error = self._tool_exception_handler_manager.handle(_error)
151
+ yield incomplete_tool_message.with_result(None, error)
143
152
  continue
144
- yield ToolMessage(
145
- tool_call_id=incomplete_tool_message.tool_call_id,
146
- name=incomplete_tool_message.name,
147
- arguments=incomplete_tool_message.arguments,
148
- result=result,
149
- error=error)
153
+ result, error = self.execute_tool_call_sync(tool, incomplete_tool_message.arguments)
154
+ yield incomplete_tool_message.with_result(result, error)
150
155
 
151
156
  async def _resolve_tool_calls(self, params: LlmRequestParams, assistant_message: AssistantMessage) -> AsyncGenerator[ToolMessage]:
152
157
  if not params.execute_tools: return
@@ -154,17 +159,14 @@ class LLM:
154
159
  assistant_message.get_incomplete_tool_messages()) is None:
155
160
  return
156
161
  for incomplete_tool_message in incomplete_tool_messages:
157
- try:
158
- result, error = await LLM.execute_tool_call(params, incomplete_tool_message)
159
- except LlmRequestParams.ToolDoesNotExistError as e:
160
- logger.warning(f"{e.message} Skipping this tool call.")
162
+ tool = params.find_tool(incomplete_tool_message.name)
163
+ if tool is None:
164
+ _error = ToolDoesNotExistError(incomplete_tool_message.name)
165
+ error = self._tool_exception_handler_manager.handle(_error)
166
+ yield incomplete_tool_message.with_result(None, error)
161
167
  continue
162
- yield ToolMessage(
163
- tool_call_id=incomplete_tool_message.tool_call_id,
164
- name=incomplete_tool_message.name,
165
- arguments=incomplete_tool_message.arguments,
166
- result=result,
167
- error=error)
168
+ result, error = await self.execute_tool_call(tool, incomplete_tool_message.arguments)
169
+ yield incomplete_tool_message.with_result(result, error)
168
170
 
169
171
  def list_models(self) -> list[str]:
170
172
  provider_config = ProviderConfigManager.get_provider_model_info(
@@ -208,8 +210,8 @@ class LLM:
208
210
  - stream: Generator yielding `MessageChunk` objects
209
211
  - full_message_queue: Queue containing complete `AssistantMessage`, `ToolMessage` (or `None` when done)
210
212
  """
211
- def stream(response: CustomStreamWrapper) -> Generator[MessageChunk]:
212
- nonlocal message_collector
213
+ def stream(response: CustomStreamWrapper, full_message_queue: FullMessageQueueSync) -> Generator[MessageChunk]:
214
+ message_collector = AssistantMessageCollector()
213
215
  for chunk in response:
214
216
  chunk = cast(LiteLlmModelResponseStream, chunk)
215
217
  yield from openai_chunk_normalizer(chunk)
@@ -223,9 +225,8 @@ class LLM:
223
225
  full_message_queue.put(None)
224
226
 
225
227
  response = completion(**self._param_parser.parse_stream(params))
226
- message_collector = AssistantMessageCollector()
227
- returned_stream = stream(cast(CustomStreamWrapper, response))
228
228
  full_message_queue = FullMessageQueueSync()
229
+ returned_stream = stream(cast(CustomStreamWrapper, response), full_message_queue)
229
230
  return returned_stream, full_message_queue
230
231
 
231
232
  async def stream_text(self, params: LlmRequestParams) -> StreamTextResponseAsync:
@@ -234,8 +235,8 @@ class LLM:
234
235
  - stream: Generator yielding `MessageChunk` objects
235
236
  - full_message_queue: Queue containing complete `AssistantMessage`, `ToolMessage` (or `None` when done)
236
237
  """
237
- async def stream(response: CustomStreamWrapper) -> AsyncGenerator[MessageChunk]:
238
- nonlocal message_collector
238
+ async def stream(response: CustomStreamWrapper, full_message_queue: FullMessageQueueAsync) -> AsyncGenerator[MessageChunk]:
239
+ message_collector = AssistantMessageCollector()
239
240
  async for chunk in response:
240
241
  chunk = cast(LiteLlmModelResponseStream, chunk)
241
242
  for normalized_chunk in openai_chunk_normalizer(chunk):
@@ -244,14 +245,14 @@ class LLM:
244
245
 
245
246
  message = message_collector.get_message()
246
247
  await full_message_queue.put(message)
248
+
247
249
  async for tool_message in self._resolve_tool_calls(params, message):
248
250
  await full_message_queue.put(tool_message)
249
251
  await full_message_queue.put(None)
250
252
 
251
253
  response = await acompletion(**self._param_parser.parse_stream(params))
252
- message_collector = AssistantMessageCollector()
253
- returned_stream = stream(cast(CustomStreamWrapper, response))
254
254
  full_message_queue = FullMessageQueueAsync()
255
+ returned_stream = stream(cast(CustomStreamWrapper, response), full_message_queue)
255
256
  return returned_stream, full_message_queue
256
257
 
257
258
  __all__ = [
@@ -317,4 +318,8 @@ __all__ = [
317
318
  "ContentPolicyViolationError",
318
319
  "APIError",
319
320
  "Timeout",
321
+ "LlmToolException",
322
+ "ToolDoesNotExistError",
323
+ "ToolArgumentDecodeError",
324
+ "ToolExecutionError",
320
325
  ]
@@ -98,7 +98,7 @@ class RemoteMcpClient(McpClient):
98
98
  try:
99
99
  webbrowser.open(url)
100
100
  except Exception as e:
101
- logger.error(f"[OAuth] Not able to open browser: {e}")
101
+ logger.error(f"[OAuth] Not able to open browser", exc_info=e)
102
102
 
103
103
  async def _handle_oauth_callback(self) -> OAuthCode:
104
104
  if self._oauth_context is None:
@@ -1,12 +1,42 @@
1
1
  import asyncio
2
2
  import json
3
3
  from functools import singledispatch
4
- from typing import Any, Awaitable, Callable, cast
5
- from types import FunctionType, MethodType, CoroutineType
4
+ from typing import Any, assert_never, Callable, cast
5
+ from types import FunctionType, MethodType
6
6
  from ..types.tool import ToolDef, ToolLike
7
+ from ..types.exceptions import ToolDoesNotExistError, ToolArgumentDecodeError, ToolExecutionError
8
+ from ..logger import logger
7
9
 
8
- async def _coroutine_wrapper(awaitable: Awaitable[Any]) -> CoroutineType:
9
- return await awaitable
10
+ class ToolExceptionHandlerManager:
11
+ ToolException = ToolDoesNotExistError | ToolArgumentDecodeError | ToolExecutionError
12
+ Handler = Callable[[ToolException], str]
13
+
14
+ def __init__(self):
15
+ self._handlers: dict[
16
+ type[ToolExceptionHandlerManager.ToolException],
17
+ ToolExceptionHandlerManager.Handler
18
+ ] = {}
19
+
20
+ def register(self, exception_type: type[ToolException]):
21
+ def decorator(handler: ToolExceptionHandlerManager.Handler) -> ToolExceptionHandlerManager.Handler:
22
+ self.set_handler(exception_type, handler)
23
+ return handler
24
+ return decorator
25
+
26
+ def set_handler(self, exception_type: type[ToolException], handler: Handler):
27
+ self._handlers[exception_type] = handler
28
+
29
+ def get_handler(self, exception_type: type[ToolException]) -> Handler | None:
30
+ return self._handlers.get(exception_type)
31
+
32
+ def handle(self, e: ToolException) -> str:
33
+ handler = self.get_handler(type(e))
34
+ if handler is None:
35
+ logger.warning(f"Unhandled tool exception: {type(e).__name__}", exc_info=e)
36
+ return f"Unhandled tool exception | {type(e).__name__}: {e}"
37
+ return handler(e)
38
+
39
+ # --- --- --- --- --- ---
10
40
 
11
41
  def _arguments_normalizer(arguments: str | dict) -> dict:
12
42
  if isinstance(arguments, str):
@@ -15,7 +45,7 @@ def _arguments_normalizer(arguments: str | dict) -> dict:
15
45
  elif isinstance(arguments, dict):
16
46
  return arguments
17
47
  else:
18
- raise ValueError(f"Invalid arguments type: {type(arguments)}")
48
+ assert_never(arguments)
19
49
 
20
50
  def _result_normalizer(result: Any) -> str:
21
51
  if isinstance(result, str):
@@ -24,13 +54,18 @@ def _result_normalizer(result: Any) -> str:
24
54
 
25
55
  @singledispatch
26
56
  def execute_tool_sync(tool: ToolLike, arguments: str | dict) -> str:
57
+ """
58
+ Raises:
59
+ ValueError: If the tool type is not supported.
60
+ JSONDecodeError: If the arguments is a string but not valid JSON.
61
+ """
27
62
  raise ValueError(f"Invalid tool type: {type(tool)}")
28
63
 
29
64
  @execute_tool_sync.register(FunctionType)
30
65
  @execute_tool_sync.register(MethodType)
31
66
  def _(toolfn: Callable, arguments: str | dict) -> str:
32
67
  arguments = _arguments_normalizer(arguments)
33
- result = (asyncio.run(_coroutine_wrapper(toolfn(**arguments)))
68
+ result = (asyncio.run(toolfn(**arguments))
34
69
  if asyncio.iscoroutinefunction(toolfn)
35
70
  else toolfn(**arguments))
36
71
  return _result_normalizer(result)
@@ -38,13 +73,18 @@ def _(toolfn: Callable, arguments: str | dict) -> str:
38
73
  @execute_tool_sync.register(ToolDef)
39
74
  def _(tooldef: ToolDef, arguments: str | dict) -> str:
40
75
  arguments = _arguments_normalizer(arguments)
41
- result = (asyncio.run(_coroutine_wrapper(tooldef.execute(**arguments)))
76
+ result = (asyncio.run(tooldef.execute(**arguments))
42
77
  if asyncio.iscoroutinefunction(tooldef.execute)
43
78
  else tooldef.execute(**arguments))
44
79
  return _result_normalizer(result)
45
80
 
46
81
  @singledispatch
47
82
  async def execute_tool(tool: ToolLike, arguments: str | dict) -> str:
83
+ """
84
+ Raises:
85
+ ValueError: If the tool type is not supported.
86
+ JSONDecodeError: If the arguments is a string but not valid JSON.
87
+ """
48
88
  raise ValueError(f"Invalid tool type: {type(tool)}")
49
89
 
50
90
  @execute_tool.register(FunctionType)
@@ -1,4 +1,15 @@
1
- from ..types.tool import ToolFn, ToolDef, ToolLike
1
+ from typing import assert_never
2
+ from ..types.tool import ToolDef, ToolLike
3
+
4
+ def get_tool_name(tool: ToolLike) -> str:
5
+ if callable(tool):
6
+ return tool.__name__
7
+ elif isinstance(tool, ToolDef):
8
+ return tool.name
9
+ elif isinstance(tool, dict):
10
+ return tool.get("name", "")
11
+ else:
12
+ assert_never(tool)
2
13
 
3
14
  def find_tool_by_name(tools: list[ToolLike], name: str) -> ToolLike | None:
4
15
  for tool in tools:
@@ -0,0 +1,56 @@
1
+ from __future__ import annotations
2
+ import json
3
+ from typing import TYPE_CHECKING
4
+ from litellm.exceptions import (
5
+ AuthenticationError,
6
+ PermissionDeniedError,
7
+ RateLimitError,
8
+ ContextWindowExceededError,
9
+ BadRequestError,
10
+ InvalidRequestError,
11
+ InternalServerError,
12
+ ServiceUnavailableError,
13
+ ContentPolicyViolationError,
14
+ APIError,
15
+ Timeout,
16
+ )
17
+
18
+ if TYPE_CHECKING:
19
+ from .tool import ToolLike
20
+
21
+ class LlmToolException(Exception): pass
22
+
23
+ class ToolDoesNotExistError(LlmToolException):
24
+ def __init__(self, tool_name: str):
25
+ self.tool_name = tool_name
26
+
27
+ class ToolArgumentDecodeError(LlmToolException):
28
+ def __init__(self, tool_name: str, arguments: str, raw_error: json.JSONDecodeError):
29
+ self.tool_name = tool_name
30
+ self.arguments = arguments
31
+ self.raw_error = raw_error
32
+
33
+ class ToolExecutionError(LlmToolException):
34
+ def __init__(self, tool: ToolLike, arguments: str | dict, raw_error: Exception):
35
+ self.tool = tool
36
+ self.arguments = arguments
37
+ self.raw_error = raw_error
38
+
39
+ __all__ = [
40
+ "AuthenticationError",
41
+ "PermissionDeniedError",
42
+ "RateLimitError",
43
+ "ContextWindowExceededError",
44
+ "BadRequestError",
45
+ "InvalidRequestError",
46
+ "InternalServerError",
47
+ "ServiceUnavailableError",
48
+ "ContentPolicyViolationError",
49
+ "APIError",
50
+ "Timeout",
51
+
52
+ "LlmToolException",
53
+ "ToolDoesNotExistError",
54
+ "ToolArgumentDecodeError",
55
+ "ToolExecutionError",
56
+ ]
@@ -59,6 +59,14 @@ class ToolMessage(ChatMessage):
59
59
  if isinstance(v, str): return v
60
60
  return json.dumps(v, ensure_ascii=False)
61
61
 
62
+ def with_result(self, result: str | None, error: str | None) -> "ToolMessage":
63
+ return ToolMessage(
64
+ tool_call_id=self.tool_call_id,
65
+ name=self.name,
66
+ arguments=self.arguments,
67
+ result=result,
68
+ error=error)
69
+
62
70
  def to_litellm_message(self) -> ChatCompletionToolMessage:
63
71
  if self.result is None and self.error is None:
64
72
  raise ValueError(f"ToolMessage({self.id}, {self.name}) is incomplete, "
@@ -47,9 +47,3 @@ class LlmRequestParams:
47
47
  if (tools := self.extract_tools()) is None:
48
48
  return None
49
49
  return find_tool_by_name(tools, tool_name)
50
-
51
- class ToolDoesNotExistError(Exception):
52
- def __init__(self, tool_name: str):
53
- self.tool_name = tool_name
54
- self.message = f"Tool \"{tool_name}\" does not exist in the request params."
55
- super().__init__(self.message)
@@ -1,27 +0,0 @@
1
- from litellm.exceptions import (
2
- AuthenticationError,
3
- PermissionDeniedError,
4
- RateLimitError,
5
- ContextWindowExceededError,
6
- BadRequestError,
7
- InvalidRequestError,
8
- InternalServerError,
9
- ServiceUnavailableError,
10
- ContentPolicyViolationError,
11
- APIError,
12
- Timeout,
13
- )
14
-
15
- __all__ = [
16
- "AuthenticationError",
17
- "PermissionDeniedError",
18
- "RateLimitError",
19
- "ContextWindowExceededError",
20
- "BadRequestError",
21
- "InvalidRequestError",
22
- "InternalServerError",
23
- "ServiceUnavailableError",
24
- "ContentPolicyViolationError",
25
- "APIError",
26
- "Timeout",
27
- ]
File without changes
File without changes
File without changes