liteai-sdk 0.3.21__py3-none-any.whl → 0.3.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
liteai_sdk/__init__.py CHANGED
@@ -19,8 +19,7 @@ from litellm.exceptions import (
19
19
  from litellm.utils import get_valid_models
20
20
  from litellm.types.utils import LlmProviders,\
21
21
  ModelResponse as LiteLlmModelResponse,\
22
- ModelResponseStream as LiteLlmModelResponseStream,\
23
- Choices as LiteLlmModelResponseChoices
22
+ ModelResponseStream as LiteLlmModelResponseStream
24
23
  from .debug import enable_debugging
25
24
  from .param_parser import ParamParser
26
25
  from .stream import AssistantMessageCollector
@@ -30,7 +29,7 @@ from .tool.utils import find_tool_by_name
30
29
  from .types import LlmRequestParams, GenerateTextResponse, StreamTextResponseSync, StreamTextResponseAsync
31
30
  from .types.exceptions import *
32
31
  from .types.message import ChatMessage, UserMessage, SystemMessage, AssistantMessage, ToolMessage,\
33
- MessageChunk, TextChunk, ReasoningChunk, AudioChunk, ImageChunk, ToolCallChunk,\
32
+ MessageChunk, TextChunk, UsageChunk, ReasoningChunk, AudioChunk, ImageChunk, ToolCallChunk,\
34
33
  ToolCallTuple, openai_chunk_normalizer
35
34
  from .logger import logger, enable_logging
36
35
 
@@ -146,10 +145,8 @@ class LLM:
146
145
  def generate_text_sync(self, params: LlmRequestParams) -> GenerateTextResponse:
147
146
  response = completion(**self._param_parser.parse_nonstream(params))
148
147
  response = cast(LiteLlmModelResponse, response)
149
- choices = cast(list[LiteLlmModelResponseChoices], response.choices)
150
- message = choices[0].message
151
148
  assistant_message = AssistantMessage\
152
- .from_litellm_message(message)\
149
+ .from_litellm_message(response)\
153
150
  .with_request_params(params)
154
151
  result: GenerateTextResponse = [assistant_message]
155
152
  if (tools_and_tool_calls := self._should_resolve_tool_calls(params, assistant_message)):
@@ -160,10 +157,8 @@ class LLM:
160
157
  async def generate_text(self, params: LlmRequestParams) -> GenerateTextResponse:
161
158
  response = await acompletion(**self._param_parser.parse_nonstream(params))
162
159
  response = cast(LiteLlmModelResponse, response)
163
- choices = cast(list[LiteLlmModelResponseChoices], response.choices)
164
- message = choices[0].message
165
160
  assistant_message = AssistantMessage\
166
- .from_litellm_message(message)\
161
+ .from_litellm_message(response)\
167
162
  .with_request_params(params)
168
163
  result: GenerateTextResponse = [assistant_message]
169
164
  if (tools_and_tool_calls := self._should_resolve_tool_calls(params, assistant_message)):
@@ -250,6 +245,7 @@ __all__ = [
250
245
 
251
246
  "MessageChunk",
252
247
  "TextChunk",
248
+ "UsageChunk",
253
249
  "ReasoningChunk",
254
250
  "AudioChunk",
255
251
  "ImageChunk",
@@ -45,4 +45,5 @@ class ParamParser:
45
45
  def parse_stream(self, params: LlmRequestParams) -> ParsedParams:
46
46
  parsed = self._parse(params)
47
47
  parsed["stream"] = True
48
+ parsed["stream_options"] = {"include_usage": True}
48
49
  return parsed
@@ -6,10 +6,13 @@ from abc import ABC, abstractmethod
6
6
  from typing import TYPE_CHECKING, Any, Literal, cast
7
7
  from pydantic import BaseModel, ConfigDict, PrivateAttr, field_validator
8
8
  from litellm.types.utils import Message as LiteLlmMessage,\
9
+ ModelResponse as LiteLlmModelResponse,\
9
10
  ModelResponseStream as LiteLlmModelResponseStream,\
11
+ Choices as LiteLlmModelResponseChoices,\
10
12
  ChatCompletionAudioResponse,\
11
13
  ChatCompletionMessageToolCall,\
12
- ChatCompletionDeltaToolCall
14
+ ChatCompletionDeltaToolCall,\
15
+ Usage as LiteLlmUsage
13
16
  from litellm.types.llms.openai import (
14
17
  AllMessageValues,
15
18
  OpenAIMessageContent,
@@ -95,12 +98,16 @@ class AssistantMessage(ChatMessage):
95
98
  tool_calls: list[ChatCompletionAssistantToolCall] | None = None
96
99
  audio: ChatCompletionAudioResponse | None = None
97
100
  images: list[ChatCompletionImageURL] | None = None
101
+ usage: LiteLlmUsage | None = None
98
102
  role: Literal["assistant"] = "assistant"
99
103
 
100
104
  _request_params_ref: LlmRequestParams | None = PrivateAttr(default=None)
101
105
 
102
106
  @classmethod
103
- def from_litellm_message(cls, message: LiteLlmMessage) -> "AssistantMessage":
107
+ def from_litellm_message(cls, response: LiteLlmModelResponse) -> "AssistantMessage":
108
+ choices = cast(list[LiteLlmModelResponseChoices], response.choices)
109
+ message = choices[0].message
110
+
104
111
  tool_calls: list[ChatCompletionAssistantToolCall] | None = None
105
112
  if (message_tool_calls := message.get("tool_calls")) is not None:
106
113
  tool_calls = [ChatCompletionAssistantToolCall(
@@ -118,6 +125,7 @@ class AssistantMessage(ChatMessage):
118
125
  tool_calls=tool_calls,
119
126
  audio=message.get("audio"),
120
127
  images=message.get("images"),
128
+ usage=response.get("usage"),
121
129
  )
122
130
 
123
131
  def with_request_params(self, request_params: LlmRequestParams) -> "AssistantMessage":
@@ -196,6 +204,12 @@ class SystemMessage(ChatMessage):
196
204
  class TextChunk:
197
205
  content: str
198
206
 
207
+ @dataclasses.dataclass
208
+ class UsageChunk:
209
+ input_tokens: int
210
+ output_tokens: int
211
+ total_tokens: int
212
+
199
213
  @dataclasses.dataclass
200
214
  class ReasoningChunk:
201
215
  content: str
@@ -239,4 +253,10 @@ def openai_chunk_normalizer(
239
253
  tool_call.function.name,
240
254
  tool_call.function.arguments,
241
255
  tool_call.index))
256
+ if (usage := getattr(chunk, "usage", None)) is not None:
257
+ usage = cast(LiteLlmUsage, usage)
258
+ result.append(UsageChunk(
259
+ input_tokens=usage.prompt_tokens,
260
+ output_tokens=usage.completion_tokens,
261
+ total_tokens=usage.total_tokens))
242
262
  return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: liteai_sdk
3
- Version: 0.3.21
3
+ Version: 0.3.22
4
4
  Summary: A wrapper of LiteLLM
5
5
  Author-email: BHznJNs <bhznjns@outlook.com>
6
6
  Requires-Python: >=3.10
@@ -1,15 +1,15 @@
1
- liteai_sdk/__init__.py,sha256=KTHDeLyGGtVA-H8nJhJMBr_rKFV1h5cD5voZ8oPXI00,10608
1
+ liteai_sdk/__init__.py,sha256=rLAihgUHA6hrh9LZSWsAoJdZWH8bIaUYAuDC1AXIb_w,10341
2
2
  liteai_sdk/debug.py,sha256=T7qIy1BeeUGlF40l9JCMMVn8pvvMJAEQeG4adQbOydA,69
3
3
  liteai_sdk/logger.py,sha256=99vJAQRKcu4CuHgZYAJ2zDQtGea6Bn3vJJrS-mtza7c,677
4
- liteai_sdk/param_parser.py,sha256=xykvUesZzwZNf4-n1j4JfVk0L2y_wvnSWSsHo5vjBU8,1655
4
+ liteai_sdk/param_parser.py,sha256=KjVnTnW2cr-mJMSxhBUG__GDzTk-mKO4wlbM6Z3lODM,1714
5
5
  liteai_sdk/stream.py,sha256=T9MLmgPC8te6qvSkBOh7vkl-I4OGCKuW1kEN6RkiCe0,3176
6
6
  liteai_sdk/tool/__init__.py,sha256=c1qJaEpoYlgOCtAjFODhrSR73ZW17OuamsO__yeYAkY,12150
7
7
  liteai_sdk/tool/execute.py,sha256=1CfRlJZgqoev42fDH4vygXyEtCEEBPcRfbqaP77jxu4,2462
8
8
  liteai_sdk/tool/utils.py,sha256=Djd1-EoLPfIqgPbWWvOreozQ76NHX4FZ6OXc1evKqPM,409
9
9
  liteai_sdk/types/__init__.py,sha256=CMmweIGMgreZlbvBtRTKfvdcC7war2ApLNf-9Fz0yzc,1006
10
10
  liteai_sdk/types/exceptions.py,sha256=hIGu06htOJxfEBAHx7KTvLQr0Y8GYnBLFJFlr_IGpDs,602
11
- liteai_sdk/types/message.py,sha256=AnhJ5wKKcWuAt0lW3mPXpIyvUBy3u-iFLa1dpeUTp18,8785
12
- liteai_sdk-0.3.21.dist-info/licenses/LICENSE,sha256=cTeVgQVJJcRdm1boa2P1FBnOeXfA_egV6s4PouyrCxg,1064
13
- liteai_sdk-0.3.21.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
14
- liteai_sdk-0.3.21.dist-info/METADATA,sha256=uUYWHL4MKkSTsqokLBEN2EcJd60HkinQthNXRRTabzU,3024
15
- liteai_sdk-0.3.21.dist-info/RECORD,,
11
+ liteai_sdk/types/message.py,sha256=rj-h_YGdUH9x87_kToiJfyy_NV6VymZrcEnPFw1nYNU,9575
12
+ liteai_sdk-0.3.22.dist-info/licenses/LICENSE,sha256=cTeVgQVJJcRdm1boa2P1FBnOeXfA_egV6s4PouyrCxg,1064
13
+ liteai_sdk-0.3.22.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
14
+ liteai_sdk-0.3.22.dist-info/METADATA,sha256=Nol0OgniIOeSUQ1pOKPhnKBqRBnmJa4cmV17dDVGupk,3024
15
+ liteai_sdk-0.3.22.dist-info/RECORD,,