gllm-inference-binary 0.5.9__cp312-cp312-macosx_13_0_x86_64.whl → 0.5.10b13__cp312-cp312-macosx_13_0_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

@@ -162,7 +162,12 @@ class GoogleLMInvoker(BaseLMInvoker):
162
162
  ```python
163
163
  LMOutput(
164
164
  response="Golden retriever is a good dog breed.",
165
- token_usage=TokenUsage(input_tokens=100, output_tokens=50),
165
+ token_usage=TokenUsage(
166
+ input_tokens=1500,
167
+ output_tokens=200,
168
+ input_token_details=InputTokenDetails(cached_tokens=1200, uncached_tokens=300),
169
+ output_token_details=OutputTokenDetails(reasoning_tokens=180, response_tokens=20),
170
+ ),
166
171
  duration=0.729,
167
172
  finish_details={"finish_reason": "STOP", "finish_message": None},
168
173
  )
@@ -5,9 +5,9 @@ from gllm_inference.schema.lm_output import LMOutput as LMOutput
5
5
  from gllm_inference.schema.message import Message as Message
6
6
  from gllm_inference.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
7
7
  from gllm_inference.schema.reasoning import Reasoning as Reasoning
8
- from gllm_inference.schema.token_usage import TokenUsage as TokenUsage
8
+ from gllm_inference.schema.token_usage import InputTokenDetails as InputTokenDetails, OutputTokenDetails as OutputTokenDetails, TokenUsage as TokenUsage
9
9
  from gllm_inference.schema.tool_call import ToolCall as ToolCall
10
10
  from gllm_inference.schema.tool_result import ToolResult as ToolResult
11
11
  from gllm_inference.schema.type_alias import EMContent as EMContent, ErrorResponse as ErrorResponse, MessageContent as MessageContent, ResponseSchema as ResponseSchema, Vector as Vector
12
12
 
13
- __all__ = ['Attachment', 'AttachmentType', 'CodeExecResult', 'EMContent', 'EmitDataType', 'ErrorResponse', 'MessageContent', 'LMOutput', 'ModelId', 'ModelProvider', 'Message', 'MessageRole', 'Reasoning', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'Vector']
13
+ __all__ = ['Attachment', 'AttachmentType', 'CodeExecResult', 'EMContent', 'EmitDataType', 'ErrorResponse', 'InputTokenDetails', 'MessageContent', 'LMOutput', 'ModelId', 'ModelProvider', 'Message', 'MessageRole', 'OutputTokenDetails', 'Reasoning', 'ResponseSchema', 'TokenUsage', 'ToolCall', 'ToolResult', 'Vector']
@@ -1,11 +1,75 @@
1
1
  from pydantic import BaseModel
2
2
 
3
+ class InputTokenDetails(BaseModel):
4
+ """Defines the input token details schema.
5
+
6
+ Attributes:
7
+ cached_tokens (int): The number of cached tokens. Defaults to 0.
8
+ uncached_tokens (int): The number of uncached tokens. Defaults to 0.
9
+ """
10
+ cached_tokens: int
11
+ uncached_tokens: int
12
+ def __add__(self, other: InputTokenDetails) -> InputTokenDetails:
13
+ """Add two InputTokenDetails objects together.
14
+
15
+ Args:
16
+ other (InputTokenDetails): The other InputTokenDetails object to add.
17
+
18
+ Returns:
19
+ InputTokenDetails: A new InputTokenDetails object with summed values.
20
+ """
21
+
22
+ class OutputTokenDetails(BaseModel):
23
+ """Defines the output token details schema.
24
+
25
+ Attributes:
26
+ reasoning_tokens (int): The number of reasoning tokens. Defaults to 0.
27
+ response_tokens (int): The number of response tokens. Defaults to 0.
28
+ """
29
+ reasoning_tokens: int
30
+ response_tokens: int
31
+ def __add__(self, other: OutputTokenDetails) -> OutputTokenDetails:
32
+ """Add two OutputTokenDetails objects together.
33
+
34
+ Args:
35
+ other (OutputTokenDetails): The other OutputTokenDetails object to add.
36
+
37
+ Returns:
38
+ OutputTokenDetails: A new OutputTokenDetails object with summed values.
39
+ """
40
+
3
41
  class TokenUsage(BaseModel):
4
42
  """Defines the token usage data structure of a language model.
5
43
 
6
44
  Attributes:
7
- input_tokens (int): The number of input tokens.
8
- output_tokens (int): The number of output tokens.
45
+ input_tokens (int): The number of input tokens. Defaults to 0.
46
+ output_tokens (int): The number of output tokens. Defaults to 0.
47
+ input_token_details (InputTokenDetails | None): The details of the input tokens. Defaults to None.
48
+ output_token_details (OutputTokenDetails | None): The details of the output tokens. Defaults to None.
9
49
  """
10
50
  input_tokens: int
11
51
  output_tokens: int
52
+ input_token_details: InputTokenDetails | None
53
+ output_token_details: OutputTokenDetails | None
54
+ @classmethod
55
+ def from_token_details(cls, input_tokens: int | None = None, output_tokens: int | None = None, cached_tokens: int | None = None, reasoning_tokens: int | None = None) -> TokenUsage:
56
+ """Creates a TokenUsage from token details.
57
+
58
+ Args:
59
+ input_tokens (int | None): The number of input tokens. Defaults to None.
60
+ output_tokens (int | None): The number of output tokens. Defaults to None.
61
+ cached_tokens (int | None): The number of cached tokens. Defaults to None.
62
+ reasoning_tokens (int | None): The number of reasoning tokens. Defaults to None.
63
+
64
+ Returns:
65
+ TokenUsage: The instantiated TokenUsage.
66
+ """
67
+ def __add__(self, other: TokenUsage) -> TokenUsage:
68
+ """Add two TokenUsage objects together.
69
+
70
+ Args:
71
+ other (TokenUsage): The other TokenUsage object to add.
72
+
73
+ Returns:
74
+ TokenUsage: A new TokenUsage object with summed values.
75
+ """
Binary file
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gllm-inference-binary
3
- Version: 0.5.9
3
+ Version: 0.5.10b13
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author: Henry Wicaksono
6
6
  Author-email: henry.wicaksono@gdplabs.id
@@ -35,7 +35,7 @@ gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=MsF3OmDo0L9aEHuTJYTgso
35
35
  gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=EV_yrj6mnV_rCDEqSZaIikfw76_rXXyDlC-w_y-m7K0,14603
36
36
  gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=IuLxgCThOSBHx1AXqnhL6yVu5_JV6hAeGBWWm5P1JCo,12423
37
37
  gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=QS84w3WpD3Oyl5HdxrucsadCmsHE8gn6Ewl3l01DCgI,9203
38
- gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=4DYsPUJ0QJWoH3VFGy0s4nXBNh7KVHa6MDPRMfCTnJQ,16543
38
+ gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=aPOlaw2rexUDhbMFaXnuKqOT7lqeKxjfeToe9LjwEUw,16787
39
39
  gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=skcbX34ZosGMFli3SLmGITJTzMj9HxkQBWGrd-zYCbU,13264
40
40
  gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=_c56ewpEQ-Ywj5ofFzRYBvQgefR7Q_WkcQt97lnIFgg,13128
41
41
  gllm_inference/lm_invoker/lm_invoker.pyi,sha256=NLLzJY5YaWckR0DHgNNxZhg-Lif41ZX-DZZcICq7NIU,7809
@@ -74,7 +74,7 @@ gllm_inference/prompt_formatter/prompt_formatter.pyi,sha256=UkcPi5ao98OGJyNRsqfh
74
74
  gllm_inference/request_processor/__init__.pyi,sha256=hVnfdNZnkTBJHnmLtN3Na4ANP0yK6AstWdIizVr2Apo,227
75
75
  gllm_inference/request_processor/lm_request_processor.pyi,sha256=7pVNb2GwITb1jTflZP498qZ321G15b16jayZuuhuO1o,5424
76
76
  gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=He-ytjwv2H5Hn312WFBAlBK96ALKTtDO3AT_80hCGTg,2321
77
- gllm_inference/schema/__init__.pyi,sha256=bJeO_4fCFPmYrVZ-4RQw0eQn62X228pB0w2tYDM-oek,1238
77
+ gllm_inference/schema/__init__.pyi,sha256=aQx7eKVsFNgJkYtbbKppa9Ww2WfTe0yzl_ewajDCo6w,1363
78
78
  gllm_inference/schema/attachment.pyi,sha256=jApuzjOHJDCz4lr4MlHzBgIndh559nbWu2Xp1fk3hso,3297
79
79
  gllm_inference/schema/code_exec_result.pyi,sha256=ZTHh6JtRrPIdQ059P1UAiD2L-tAO1_S5YcMsAXfJ5A0,559
80
80
  gllm_inference/schema/enums.pyi,sha256=Nvc_Qsd8yyiH_tCtymN39O6EZ0DT_wxYHmlKLllxC00,605
@@ -82,7 +82,7 @@ gllm_inference/schema/lm_output.pyi,sha256=GafJV0KeD-VSwWkwG1oz-uruXrQ7KDZTuoojP
82
82
  gllm_inference/schema/message.pyi,sha256=VP9YppKj2mo1esl9cy6qQO9m2mMHUjTmfGDdyUor880,2220
83
83
  gllm_inference/schema/model_id.pyi,sha256=XFfIkfetFamwVxmUifMZLehuvTcASkK7kp4OMNcteo8,5230
84
84
  gllm_inference/schema/reasoning.pyi,sha256=SlTuiDw87GdnAn-I6YOPIJRhEBiwQljM46JohG05guQ,562
85
- gllm_inference/schema/token_usage.pyi,sha256=fKXfdd-q6HrP0h7zzj16OJ5WEOn2YedsCFIgd5IyDFk,312
85
+ gllm_inference/schema/token_usage.pyi,sha256=1GTQVORV0dBNmD_jix8aVaUqxMKFF04KpLP7y2urqbk,2950
86
86
  gllm_inference/schema/tool_call.pyi,sha256=zQaVxCnkVxOfOEhBidqohU85gb4PRwnwBiygKaunamk,389
87
87
  gllm_inference/schema/tool_result.pyi,sha256=cAG7TVtB4IWJPt8XBBbB92cuY1ZsX9M276bN9aqjcvM,276
88
88
  gllm_inference/schema/type_alias.pyi,sha256=CkqX5zLML4vII7BEIXDz7ZQd211RsHtq7EJekkV2V6g,725
@@ -90,8 +90,8 @@ gllm_inference/utils/__init__.pyi,sha256=npmBmmlBv7cPHMg1hdL3S2_RelD6vk_LhCsGELh
90
90
  gllm_inference/utils/langchain.pyi,sha256=VluQiHkGigDdqLUbhB6vnXiISCP5hHqV0qokYY6dC1A,1164
91
91
  gllm_inference/utils/validation.pyi,sha256=toxBtRp-VItC_X7sNi-GDd7sjibBdWMrR0q01OI2D7k,385
92
92
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
93
- gllm_inference.cpython-312-darwin.so,sha256=9AwAWQZ8Zxst-1c0HO99aur3CgHHLp1A0NEc9NsNGcg,3737400
93
+ gllm_inference.cpython-312-darwin.so,sha256=5do1zeISsxwqD5A-RYK1AMbN_71gAjssXBEWcgjMeOk,3773016
94
94
  gllm_inference.pyi,sha256=fsNCXXsB4E8WhP477yGq_QOJAfOyoZA4G2PfAMBav5Y,3324
95
- gllm_inference_binary-0.5.9.dist-info/METADATA,sha256=C4ovZJsiSKuRWp_2DbNV57oJwtxC0sI8sPrKXlCHFN4,4531
96
- gllm_inference_binary-0.5.9.dist-info/WHEEL,sha256=eE2zhpXf8mNi4Sj7Wo77hQIVjvfcPTxg9pdEi0RABeA,107
97
- gllm_inference_binary-0.5.9.dist-info/RECORD,,
95
+ gllm_inference_binary-0.5.10b13.dist-info/METADATA,sha256=IyS2K3UVI8tMAXBpVFK5fWEgnvxaGFu9_Cl5HDdckhE,4535
96
+ gllm_inference_binary-0.5.10b13.dist-info/WHEEL,sha256=eE2zhpXf8mNi4Sj7Wo77hQIVjvfcPTxg9pdEi0RABeA,107
97
+ gllm_inference_binary-0.5.10b13.dist-info/RECORD,,