hjxdl 0.1.80__py3-none-any.whl → 0.1.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +2 -2
- hdl/utils/llm/chat.py +61 -59
- hdl/utils/llm/vis.py +1 -0
- {hjxdl-0.1.80.dist-info → hjxdl-0.1.82.dist-info}/METADATA +1 -1
- {hjxdl-0.1.80.dist-info → hjxdl-0.1.82.dist-info}/RECORD +7 -7
- {hjxdl-0.1.80.dist-info → hjxdl-0.1.82.dist-info}/WHEEL +1 -1
- {hjxdl-0.1.80.dist-info → hjxdl-0.1.82.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
hdl/utils/llm/chat.py
CHANGED
@@ -98,6 +98,7 @@ def run_tool_with_kwargs(tool, func_kwargs):
|
|
98
98
|
"""
|
99
99
|
return tool(**func_kwargs)
|
100
100
|
|
101
|
+
|
101
102
|
class OpenAI_M():
|
102
103
|
def __init__(
|
103
104
|
self,
|
@@ -143,38 +144,48 @@ class OpenAI_M():
|
|
143
144
|
if tool_desc is not None:
|
144
145
|
self.tool_desc = self.tool_desc | tool_desc
|
145
146
|
|
146
|
-
def
|
147
|
+
def get_resp(
|
147
148
|
self,
|
148
149
|
prompt : str,
|
149
|
-
|
150
|
+
images: list = [],
|
151
|
+
image_keys: tuple = ("image", "image"),
|
150
152
|
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
151
|
-
# history: list = [],
|
152
153
|
model="default_model",
|
154
|
+
stream: bool = True,
|
153
155
|
**kwargs: t.Any,
|
154
|
-
)
|
155
|
-
"""
|
156
|
+
):
|
157
|
+
"""Get response from chat completion model.
|
156
158
|
|
157
159
|
Args:
|
158
|
-
prompt (str): The prompt to
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
160
|
+
prompt (str): The prompt text to generate a response for.
|
161
|
+
images (list, optional): List of image URLs to include in the prompt. Defaults to [].
|
162
|
+
image_keys (tuple, optional): Tuple containing keys for image data. Defaults to ("image", "image").
|
163
|
+
stop (list[str] | None, optional): List of strings to stop the conversation. Defaults to ["USER:", "ASSISTANT:"].
|
164
|
+
model (str, optional): The model to use for generating the response. Defaults to "default_model".
|
165
|
+
stream (bool, optional): Whether to stream the response or not. Defaults to True.
|
166
|
+
**kwargs: Additional keyword arguments to pass to the chat completion API.
|
167
|
+
|
168
|
+
Yields:
|
169
|
+
str: The generated response content.
|
163
170
|
|
164
171
|
Returns:
|
165
|
-
str: The response
|
172
|
+
str: The generated response content if stream is False.
|
166
173
|
"""
|
167
174
|
content = [
|
168
175
|
{"type": "text", "text": prompt},
|
169
176
|
]
|
170
|
-
if
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
177
|
+
if images:
|
178
|
+
if isinstance(images, str):
|
179
|
+
images = [images]
|
180
|
+
for img in images:
|
181
|
+
content.append({
|
182
|
+
"type": image_keys[0],
|
183
|
+
image_keys[0]: {
|
184
|
+
image_keys[1]: img
|
185
|
+
}
|
186
|
+
})
|
187
|
+
else:
|
188
|
+
content = prompt
|
178
189
|
response = self.client.chat.completions.create(
|
179
190
|
messages=[{
|
180
191
|
"role": "user",
|
@@ -184,54 +195,45 @@ class OpenAI_M():
|
|
184
195
|
model=model,
|
185
196
|
**kwargs
|
186
197
|
)
|
187
|
-
|
198
|
+
if stream:
|
199
|
+
for chunk in response:
|
200
|
+
content = chunk.choices[0].delta.content
|
201
|
+
if content:
|
202
|
+
yield content
|
203
|
+
else:
|
204
|
+
return response.choices[0].message.content
|
205
|
+
|
206
|
+
def invoke(
|
207
|
+
self,
|
208
|
+
*args,
|
209
|
+
**kwargs
|
210
|
+
) -> str:
|
211
|
+
"""Invoke the function with the given arguments and keyword arguments.
|
212
|
+
|
213
|
+
Args:
|
214
|
+
*args: Variable length argument list.
|
215
|
+
**kwargs: Arbitrary keyword arguments.
|
216
|
+
|
217
|
+
Returns:
|
218
|
+
str: The response obtained by calling the get_resp method with the provided arguments and keyword arguments.
|
219
|
+
"""
|
220
|
+
return self.get_resp(*args, stream=False, **kwargs)
|
188
221
|
|
189
222
|
def stream(
|
190
223
|
self,
|
191
|
-
|
192
|
-
|
193
|
-
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
194
|
-
# history: list = [],
|
195
|
-
model="default_model",
|
196
|
-
**kwargs: t.Any,
|
224
|
+
*args,
|
225
|
+
**kwargs
|
197
226
|
):
|
198
|
-
"""
|
227
|
+
"""Stream data from the server.
|
199
228
|
|
200
229
|
Args:
|
201
|
-
|
202
|
-
|
203
|
-
stop (list[str], optional): List of strings that indicate the end of the conversation. Defaults to ["USER:", "ASSISTANT:"].
|
204
|
-
model (str, optional): The model to use for completion generation. Defaults to "default_model".
|
205
|
-
**kwargs: Additional keyword arguments to pass to the completion generation API.
|
230
|
+
*args: Variable length argument list.
|
231
|
+
**kwargs: Arbitrary keyword arguments.
|
206
232
|
|
207
|
-
|
208
|
-
|
233
|
+
Returns:
|
234
|
+
Response from the server with streaming enabled.
|
209
235
|
"""
|
210
|
-
|
211
|
-
{"type": "text", "text": prompt},
|
212
|
-
]
|
213
|
-
if image_url is not None:
|
214
|
-
image_content = {
|
215
|
-
"type": "image_url",
|
216
|
-
"image_url": {
|
217
|
-
"url": image_url,
|
218
|
-
},
|
219
|
-
}
|
220
|
-
content.append(image_content)
|
221
|
-
response = self.client.chat.completions.create(
|
222
|
-
messages=[{
|
223
|
-
"role": "user",
|
224
|
-
"content": content
|
225
|
-
}],
|
226
|
-
stream=True,
|
227
|
-
model=model,
|
228
|
-
**kwargs
|
229
|
-
)
|
230
|
-
|
231
|
-
for chunk in response:
|
232
|
-
content = chunk.choices[0].delta.content
|
233
|
-
if content:
|
234
|
-
yield content
|
236
|
+
return self.get_resp(*args, stream=True, **kwargs)
|
235
237
|
|
236
238
|
def agent_response(
|
237
239
|
self,
|
hdl/utils/llm/vis.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
2
|
-
hdl/_version.py,sha256=
|
2
|
+
hdl/_version.py,sha256=uD0kkklP47KEgakV5d1QibkwP9W5l8Jh1sG9ohg63cg,413
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -127,16 +127,16 @@ hdl/utils/desc/template.py,sha256=a0UAkkKctt_EHY9UECsIIAwVkGPcM1Hr01HSkRMeIuw,12
|
|
127
127
|
hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
128
128
|
hdl/utils/general/glob.py,sha256=8-RCnt6L297wMIfn34ZAMCsGCZUjHG3MGglGZI1cX0g,491
|
129
129
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
130
|
-
hdl/utils/llm/chat.py,sha256=
|
130
|
+
hdl/utils/llm/chat.py,sha256=uZQuiQKL0i5cRSiJq0CPQOPS2FUmDE90mZzvk9M8FK4,11742
|
131
131
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
132
132
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
133
133
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
134
|
-
hdl/utils/llm/vis.py,sha256=
|
134
|
+
hdl/utils/llm/vis.py,sha256=2pI0439GWi_BEVfQJtY29Y72FkUa8jEvBeqMlwy7xkc,15716
|
135
135
|
hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
136
136
|
hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
|
137
137
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
138
138
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
139
|
-
hjxdl-0.1.
|
140
|
-
hjxdl-0.1.
|
141
|
-
hjxdl-0.1.
|
142
|
-
hjxdl-0.1.
|
139
|
+
hjxdl-0.1.82.dist-info/METADATA,sha256=fajRkx2t1HdlBLjWP43OH9OlXsYTM4V7tOZEhfZ49zA,903
|
140
|
+
hjxdl-0.1.82.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
141
|
+
hjxdl-0.1.82.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
142
|
+
hjxdl-0.1.82.dist-info/RECORD,,
|
File without changes
|