gllm-inference-binary 0.5.53__cp312-cp312-win_amd64.whl → 0.5.55__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

@@ -1,6 +1,8 @@
1
- from gllm_core.schema import Chunk as Chunk
1
+ from _typeshed import Incomplete
2
+ from gllm_core.schema import Chunk
2
3
  from gllm_inference.schema.attachment import Attachment as Attachment
3
4
  from gllm_inference.schema.code_exec_result import CodeExecResult as CodeExecResult
5
+ from gllm_inference.schema.enums import LMOutputType as LMOutputType
4
6
  from gllm_inference.schema.mcp import MCPCall as MCPCall
5
7
  from gllm_inference.schema.reasoning import Reasoning as Reasoning
6
8
  from gllm_inference.schema.token_usage import TokenUsage as TokenUsage
@@ -8,37 +10,179 @@ from gllm_inference.schema.tool_call import ToolCall as ToolCall
8
10
  from pydantic import BaseModel
9
11
  from typing import Any
10
12
 
13
+ LMOutputData = str | dict[str, Any] | BaseModel | Attachment | ToolCall | Reasoning | Chunk | CodeExecResult | MCPCall
14
+ logger: Incomplete
15
+
16
+ class LMOutputItem(BaseModel):
17
+ """Defines the output item of a language model.
18
+
19
+ Attributes:
20
+ type (str): The type of the output item.
21
+ output (LMOutputData): The output data of the output item.
22
+ """
23
+ type: str
24
+ output: LMOutputData
25
+
11
26
  class LMOutput(BaseModel):
12
27
  """Defines the output of a language model.
13
28
 
14
29
  Attributes:
15
- response (str): The text response. Defaults to an empty string.
16
- attachments (list[Attachment]): The attachments, if the language model decides to output attachments.
17
- Defaults to an empty list.
18
- tool_calls (list[ToolCall]): The tool calls, if the language model decides to invoke tools.
19
- Defaults to an empty list.
20
- structured_output (dict[str, Any] | BaseModel | None): The structured output, if a response schema is defined
21
- for the language model. Defaults to None.
30
+ outputs (list[LMOutputItem]): The outputs of the language model in sequential order. Defaults to an empty list.
22
31
  token_usage (TokenUsage | None): The token usage analytics, if requested. Defaults to None.
23
32
  duration (float | None): The duration of the invocation in seconds, if requested. Defaults to None.
24
33
  finish_details (dict[str, Any]): The details about how the generation finished, if requested.
25
34
  Defaults to an empty dictionary.
26
- reasoning (list[Reasoning]): The reasoning, if the language model is configured to output reasoning.
27
- Defaults to an empty list.
28
- citations (list[Chunk]): The citations, if the language model outputs citations. Defaults to an empty list.
29
- code_exec_results (list[CodeExecResult]): The code execution results, if the language model decides to
30
- execute code. Defaults to an empty list.
31
- mcp_calls (list[MCPCall]): The MCP calls, if the language model decides to invoke MCP tools.
32
- Defaults to an empty list.
35
+
36
+ text (str): The first text response.
37
+ structured_output (dict[str, Any] | BaseModel | None): The first structured output.
38
+
39
+ texts (list[str]): The texts from the outputs.
40
+ structured_outputs (list[dict[str, Any] | BaseModel]): The structured outputs from the outputs.
41
+ attachments (list[Attachment]): The attachments from the outputs.
42
+ tool_calls (list[ToolCall]): The tool calls from the outputs.
43
+ thinkings (list[Reasoning]): The thinkings from the outputs.
44
+ citations (list[Chunk]): The citations from the outputs.
45
+ code_exec_results (list[CodeExecResult]): The code exec results from the outputs.
46
+ mcp_calls (list[MCPCall]): The MCP calls from the outputs.
47
+
48
+ response (str): Deprecated. Replaced by `text`.
49
+ reasoning (list[Reasoning]): Deprecated. Replaced by `thinkings`.
33
50
  """
34
- response: str
35
- attachments: list[Attachment]
36
- tool_calls: list[ToolCall]
37
- structured_output: dict[str, Any] | BaseModel | None
51
+ outputs: list[LMOutputItem]
38
52
  token_usage: TokenUsage | None
39
53
  duration: float | None
40
54
  finish_details: dict[str, Any]
41
- reasoning: list[Reasoning]
42
- citations: list[Chunk]
43
- code_exec_results: list[CodeExecResult]
44
- mcp_calls: list[MCPCall]
55
+ @property
56
+ def response(self) -> str:
57
+ """Deprecated property to get the first text response from the LMOutput.
58
+
59
+ Returns:
60
+ str: The first text response from the LMOutput.
61
+ """
62
+ @property
63
+ def text(self) -> str:
64
+ """Get the first text from the LMOutput.
65
+
66
+ Returns:
67
+ str: The first text from the LMOutput.
68
+ """
69
+ @property
70
+ def structured_output(self) -> dict[str, Any] | BaseModel | None:
71
+ """Deprecated property to get the first structured output from the LMOutput.
72
+
73
+ Returns:
74
+ dict[str, Any] | BaseModel | None: The first structured output from the LMOutput.
75
+ """
76
+ @property
77
+ def texts(self) -> list[str]:
78
+ """Get the texts from the LMOutput.
79
+
80
+ Returns:
81
+ list[str]: The texts from the LMOutput.
82
+ """
83
+ @property
84
+ def structured_outputs(self) -> list[dict[str, Any] | BaseModel]:
85
+ """Get the structured outputs from the LMOutput.
86
+
87
+ Returns:
88
+ list[dict[str, Any] | BaseModel]: The structured outputs from the LMOutput.
89
+ """
90
+ @property
91
+ def attachments(self) -> list[Attachment]:
92
+ """Get the attachments from the LMOutput.
93
+
94
+ Returns:
95
+ list[Attachment]: The attachments from the LMOutput.
96
+ """
97
+ @property
98
+ def tool_calls(self) -> list[ToolCall]:
99
+ """Get the tool calls from the LMOutput.
100
+
101
+ Returns:
102
+ list[ToolCall]: The tool calls from the LMOutput.
103
+ """
104
+ @property
105
+ def reasoning(self) -> list[Reasoning]:
106
+ """Deprecated property to get the thinkings from the LMOutput.
107
+
108
+ Returns:
109
+ list[Reasoning]: The thinkings from the LMOutput.
110
+ """
111
+ @property
112
+ def thinkings(self) -> list[Reasoning]:
113
+ """Get the thinkings from the LMOutput.
114
+
115
+ Returns:
116
+ list[Reasoning]: The thinkings from the LMOutput.
117
+ """
118
+ @property
119
+ def citations(self) -> list[Chunk]:
120
+ """Get the citations from the LMOutput.
121
+
122
+ Returns:
123
+ list[Chunk]: The citations from the LMOutput.
124
+ """
125
+ @property
126
+ def code_exec_results(self) -> list[CodeExecResult]:
127
+ """Get the code exec results from the LMOutput.
128
+
129
+ Returns:
130
+ list[CodeExecResult]: The code exec results from the LMOutput.
131
+ """
132
+ @property
133
+ def mcp_calls(self) -> list[MCPCall]:
134
+ """Get the MCP calls from the LMOutput.
135
+
136
+ Returns:
137
+ list[MCPCall]: The MCP calls from the LMOutput.
138
+ """
139
+ def add_text(self, text: str | list[str]) -> None:
140
+ """Add an output or a list of outputs to the LMOutput.
141
+
142
+ Args:
143
+ text (str | list[str]): The text or a list of texts to add.
144
+ """
145
+ def add_attachment(self, attachment: Attachment | list[Attachment]) -> None:
146
+ """Add an attachment or a list of attachments to the LMOutput.
147
+
148
+ Args:
149
+ attachment (Attachment | list[Attachment]): The attachment or a list of attachments to add.
150
+ """
151
+ def add_tool_call(self, tool_call: ToolCall | list[ToolCall]) -> None:
152
+ """Add a tool call or a list of tool calls to the LMOutput.
153
+
154
+ Args:
155
+ tool_call (ToolCall | list[ToolCall]): The tool call or a list of tool calls to add.
156
+ """
157
+ def add_structured(self, structured: dict[str, Any] | BaseModel | list[dict[str, Any] | BaseModel]) -> None:
158
+ """Add a structured output or a list of structured outputs to the LMOutput.
159
+
160
+ Args:
161
+ structured (dict[str, Any] | BaseModel | list[dict[str, Any] | BaseModel]): The structured output
162
+ or a list of structured outputs to add.
163
+ """
164
+ def add_thinking(self, thinking: Reasoning | list[Reasoning]) -> None:
165
+ """Add a thinking or a list of thoughts to the LMOutput.
166
+
167
+ Args:
168
+ thinking (Reasoning | list[Reasoning]): The thinking or a list of thoughts to add.
169
+ """
170
+ def add_citation(self, citation: Chunk | list[Chunk]) -> None:
171
+ """Add a citation or a list of citations to the LMOutput.
172
+
173
+ Args:
174
+ citation (Chunk | list[Chunk]): The citation or a list of citations to add.
175
+ """
176
+ def add_code_exec_result(self, code_exec_result: CodeExecResult | list[CodeExecResult]) -> None:
177
+ """Add a code exec result or a list of code exec results to the LMOutput.
178
+
179
+ Args:
180
+ code_exec_result (CodeExecResult | list[CodeExecResult]): The code exec result or a list of code exec
181
+ results to add.
182
+ """
183
+ def add_mcp_call(self, mcp_call: MCPCall | list[MCPCall]) -> None:
184
+ """Add an MCP call or a list of MCP calls to the LMOutput.
185
+
186
+ Args:
187
+ mcp_call (MCPCall | list[MCPCall]): The MCP call or a list of MCP calls to add.
188
+ """
Binary file
gllm_inference.pyi CHANGED
@@ -113,7 +113,6 @@ import inspect
113
113
  import time
114
114
  import jsonschema
115
115
  import gllm_inference.lm_invoker.batch.BatchOperations
116
- import gllm_inference.schema.LMEventType
117
116
  import gllm_inference.schema.MessageContent
118
117
  import __future__
119
118
  import gllm_inference.schema.ActivityEvent
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: gllm-inference-binary
3
- Version: 0.5.53
3
+ Version: 0.5.55
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
6
6
  Requires-Python: <3.14,>=3.11
7
7
  Description-Content-Type: text/markdown
8
8
  Requires-Dist: poetry<3.0.0,>=2.1.3
9
- Requires-Dist: gllm-core-binary<0.4.0,>=0.3.21
9
+ Requires-Dist: gllm-core-binary<0.4.0,>=0.3.23
10
10
  Requires-Dist: aiohttp<4.0.0,>=3.12.14
11
11
  Requires-Dist: filetype<2.0.0,>=1.2.0
12
12
  Requires-Dist: httpx<0.29.0,>=0.28.0
@@ -1,7 +1,7 @@
1
- gllm_inference.cp312-win_amd64.pyd,sha256=Tx1FL0TG8Lbfz0JEMchI-QxpeCj3_JjqNrW70HIp3mg,3836416
2
- gllm_inference.pyi,sha256=-TkA7sEPGZVJZwbUGO5OwfO3J-fSrXHLwvAQg01wCfQ,5137
1
+ gllm_inference.cp312-win_amd64.pyd,sha256=pTCkTdXb9piCXj9HCxKwt6fWnE10xg2TcQ6Qoojmzek,3915264
2
+ gllm_inference.pyi,sha256=PuOcOQY2Xg84ZvIx4_391WJlWJTNSFX7zR6_AP28MiQ,5096
3
3
  gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- gllm_inference/constants.pyi,sha256=PncjVw-mkzcJ3ln1ohvVZGdJ-TD-VZy1Ygn4Va8Z7i0,350
4
+ gllm_inference/constants.pyi,sha256=8jIYOyxJYVWUYXSXF3vag9HhHwjq1iU9tzPiosRHkWk,328
5
5
  gllm_inference/builder/__init__.pyi,sha256=-bw1uDx7CAM7pkvjvb1ZXku9zXlQ7aEAyC83KIn3bz8,506
6
6
  gllm_inference/builder/_build_invoker.pyi,sha256=SZst1ocnRntcA7lWKp44lMJ3yRA5GP_jsUZkcsxwTjw,876
7
7
  gllm_inference/builder/build_em_invoker.pyi,sha256=Z09hP6_ziumI2XB-7HCdyIE4AyoV1oL9DBdoU2G8izo,5879
@@ -41,19 +41,19 @@ gllm_inference/exceptions/error_parser.pyi,sha256=4aiJZhBzBOqlhdmpvaCvildGy7_Xxl
41
41
  gllm_inference/exceptions/exceptions.pyi,sha256=6y3ECgHAStqMGgQv8Dv-Ui-5PDD07mSj6qaRZeSWea4,5857
42
42
  gllm_inference/exceptions/provider_error_map.pyi,sha256=vWa4ZIHn7qIghECGvO-dS2KzOmf3c10GRWKZ4YDPnSQ,1267
43
43
  gllm_inference/lm_invoker/__init__.pyi,sha256=L2nlkj13WwWbDYEBtM0mlAj0-UbSilMjVLpCJ_0Eock,1502
44
- gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=JSgKUk9d1ZHlitv_ZjHlAk2hIW-J7u6yslVHflIeUro,16726
45
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=FYfRNPG-oD4wIfitjTHnGib1uMZL7Pid0gbrRsymAHU,14601
46
- gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=IMFD6Nr_4tkDDMy1WveOz91u_v1rjplVdtgep9UXsxw,11851
47
- gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=LR0EM4vTfufq9OWk8JVIwLyFeJFTguPNmPgJBUooSq4,8342
48
- gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=aSmEgoYj_V72Nb6erDResphw9RaHfbE5C6PhqpMfEeQ,17674
49
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=tJIxkFUKjLF-yz0niaDjN3L0QNCbn4sT8hmPKtERpog,12742
50
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=IJxRUkmgXY8oQwS7tJoskO8fiESB7M4pyvpE64pyXDo,12648
51
- gllm_inference/lm_invoker/lm_invoker.pyi,sha256=lnp2DGUsxRunQDL6EbWDrZarYQaczq5fSBxoXPZ-dzE,8680
52
- gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi,sha256=uYJFgi4tJGab77232IC1gdoU9h9AqoClIUj6tM6O47s,15177
44
+ gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=_Dst_88LOpC-FN01hApihxUxuripiCGSOax5R_-bQEE,15496
45
+ gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=uV98H2nJsElCTsxAuInZ9KSk1jOTq6SROAGQRPR-_r0,13173
46
+ gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=qXmFK6zsOM3nPfueEhY5pAfG24bZytA1jqemPa63vLY,10951
47
+ gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=FnpayOW_Zi0pWFSawLX8XahEnknbnpsRWrkhKZe8Y3U,8035
48
+ gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=GmZZmHBN_7j8cLdC3l-AnHDOLdbokBWwdaY6tH5QGqQ,16686
49
+ gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=ull3cX-iUT4hYMbixcxqfrNUxR8ZoR4Vt9ACVILQWSM,12126
50
+ gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=qG8pPTiDJZR2e7wr5Q2VyceC227tz3QybX3UPihT5ng,11400
51
+ gllm_inference/lm_invoker/lm_invoker.pyi,sha256=L_PHRCeHo0dNs6BjnB8H29irGib-qhxKYf7F7pZlU0E,8652
52
+ gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi,sha256=tsv2qSnPTVHEWlt1y-6obIZYAwBNrT5gTmLMZ6nrhnE,13973
53
53
  gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=T9sShA_9fgEuaaAuT2gJZq_EYNbEhf3IkWwMCwfszY8,4244
54
- gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=10iKCyleqHNbJc8M1rj3ogRcNlNxcVgyk0v6TcS6gf4,23452
55
- gllm_inference/lm_invoker/portkey_lm_invoker.pyi,sha256=BmZ5TFiQx3-6Ijf6J2ICzP6SCfnOFUVTPRLijv85oU0,13465
56
- gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=gyi12K7M9HkjNX6pU6NVv5Uq3-aHErixO-PVhHjioo8,14632
54
+ gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=85eAaUdqe1sFULSDs67auVdLAdmaTrS43jTzWOQKH50,21582
55
+ gllm_inference/lm_invoker/portkey_lm_invoker.pyi,sha256=FYOp4BaDfOtompWIRhDqzMVVSK-TiFyw7JA4TznANQE,15236
56
+ gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=b4fQvazDBCTIEJw7ywxWUyRkZCKFk-ny9DGJKYiQLj8,13118
57
57
  gllm_inference/lm_invoker/batch/__init__.pyi,sha256=vJOTHRJ83oq8Bq0UsMdID9_HW5JAxr06gUs4aPRZfEE,130
58
58
  gllm_inference/lm_invoker/batch/batch_operations.pyi,sha256=o2U17M41RKVFW6j_oxy-SxU1JqUtVt75pKRxrqXzorE,5499
59
59
  gllm_inference/lm_invoker/schema/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -109,15 +109,15 @@ gllm_inference/realtime_chat/output_streamer/output_streamer.pyi,sha256=5P9NQ0aJ
109
109
  gllm_inference/request_processor/__init__.pyi,sha256=giEme2WFQhgyKiBZHhSet0_nKSCHwGy-_2p6NRzg0Zc,231
110
110
  gllm_inference/request_processor/lm_request_processor.pyi,sha256=0fy1HyILCVDw6y46E-7tLnQTRYx4ppeRMe0QP6t9Jyw,5990
111
111
  gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=LYHq-zLoXEMel1LfVdYv7W3BZ8WtBLo_WWFjRf10Yto,6512
112
- gllm_inference/schema/__init__.pyi,sha256=Bpbo6a4NqSwJJnPBRKAKx2gAdGiDl1tsak-vJxfZ6UU,2284
112
+ gllm_inference/schema/__init__.pyi,sha256=4qSVD761xZz1r_GML6g7DDAJhg4nrmG17eYdfocRk-I,2422
113
113
  gllm_inference/schema/activity.pyi,sha256=atrU4OwLesA9FEt1H7K3gsUWYNdOqpI5i2VdWkmo6cs,2367
114
- gllm_inference/schema/attachment.pyi,sha256=9zgAjGXBjLfzPGaKi68FMW6b5mXdEA352nDe-ynOSvY,3385
114
+ gllm_inference/schema/attachment.pyi,sha256=NyvbdtjRpPwdrCnGorDCiuVsqduidiomKCb-nGvbfqc,3334
115
115
  gllm_inference/schema/code_exec_result.pyi,sha256=WQ-ARoGM9r6nyRX-A0Ro1XKiqrc9R3jRYXZpu_xo5S4,573
116
116
  gllm_inference/schema/config.pyi,sha256=NVmjQK6HipIE0dKSfx12hgIC0O-S1HEcAc-TWlXAF5A,689
117
- gllm_inference/schema/enums.pyi,sha256=wbD5Qifv9y0c3_FR_M4WBvlDLzq0bQbWwox_Lw3f-KM,1820
118
- gllm_inference/schema/events.pyi,sha256=YStRTYGtYlM0a46AfCuBwEaijsRujTSkEusJ-M6cvSY,4810
117
+ gllm_inference/schema/enums.pyi,sha256=aJjmCCUV4ASDM8VTiSJlxPxdapKkxXjS4_NqRplyUnE,2145
118
+ gllm_inference/schema/events.pyi,sha256=_CKuGNzb3j2Y1dOB2yssFkT_9FQz1AY4J_ApCwKNizU,4743
119
119
  gllm_inference/schema/lm_input.pyi,sha256=HxQiZgY7zcXh_Dw8nK8LSeBTZEHMPZVwmPmnfgSsAbs,197
120
- gllm_inference/schema/lm_output.pyi,sha256=DIV8BiIOPaSnMKxzKzH_Mp7j7-MScWCvmllegJDLqFg,2479
120
+ gllm_inference/schema/lm_output.pyi,sha256=Rr5E5GRwvsLooxGTf2c0618OD0-OMkjFUz64GeHR8Hw,7597
121
121
  gllm_inference/schema/mcp.pyi,sha256=4SgQ83pEowfWm2p-w9lupV4NayqqVBOy7SuYxIFeWRs,1045
122
122
  gllm_inference/schema/message.pyi,sha256=jJV6A0ihEcun2OhzyMtNkiHnf7d6v5R-GdpTBGfJ0AQ,2272
123
123
  gllm_inference/schema/model_id.pyi,sha256=Ozu46pp1uaPPvXJ6SPKpiL1vETPJqErEjNAYfOb5b6Y,6070
@@ -131,7 +131,7 @@ gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXv
131
131
  gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
132
132
  gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
133
133
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
134
- gllm_inference_binary-0.5.53.dist-info/METADATA,sha256=2MZFh1ZORaN9wzbchWs1EfWVxqUuKsvW7zECd4lG6As,5945
135
- gllm_inference_binary-0.5.53.dist-info/WHEEL,sha256=x5rgv--I0NI0IT1Lh9tN1VG2cI637p3deednwYLKnxc,96
136
- gllm_inference_binary-0.5.53.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
137
- gllm_inference_binary-0.5.53.dist-info/RECORD,,
134
+ gllm_inference_binary-0.5.55.dist-info/METADATA,sha256=nr6YRSn468AAP7i7SXNqTR5678Uwk7vkkLMwuViNlvo,5945
135
+ gllm_inference_binary-0.5.55.dist-info/WHEEL,sha256=x5rgv--I0NI0IT1Lh9tN1VG2cI637p3deednwYLKnxc,96
136
+ gllm_inference_binary-0.5.55.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
137
+ gllm_inference_binary-0.5.55.dist-info/RECORD,,