hjxdl 0.1.81__py3-none-any.whl → 0.1.83__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hdl/_version.py +2 -2
- hdl/utils/llm/chat.py +61 -62
- {hjxdl-0.1.81.dist-info → hjxdl-0.1.83.dist-info}/METADATA +1 -1
- {hjxdl-0.1.81.dist-info → hjxdl-0.1.83.dist-info}/RECORD +6 -6
- {hjxdl-0.1.81.dist-info → hjxdl-0.1.83.dist-info}/WHEEL +1 -1
- {hjxdl-0.1.81.dist-info → hjxdl-0.1.83.dist-info}/top_level.txt +0 -0
hdl/_version.py
CHANGED
hdl/utils/llm/chat.py
CHANGED
@@ -98,6 +98,7 @@ def run_tool_with_kwargs(tool, func_kwargs):
|
|
98
98
|
"""
|
99
99
|
return tool(**func_kwargs)
|
100
100
|
|
101
|
+
|
101
102
|
class OpenAI_M():
|
102
103
|
def __init__(
|
103
104
|
self,
|
@@ -143,99 +144,97 @@ class OpenAI_M():
|
|
143
144
|
if tool_desc is not None:
|
144
145
|
self.tool_desc = self.tool_desc | tool_desc
|
145
146
|
|
146
|
-
def
|
147
|
+
def get_resp(
|
147
148
|
self,
|
148
149
|
prompt : str,
|
149
|
-
|
150
|
+
images: list = [],
|
151
|
+
image_keys: tuple = ("image", "image"),
|
150
152
|
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
151
|
-
# history: list = [],
|
152
153
|
model="default_model",
|
154
|
+
stream: bool = True,
|
153
155
|
**kwargs: t.Any,
|
154
|
-
)
|
155
|
-
"""
|
156
|
+
):
|
157
|
+
"""Get response from chat completion model.
|
156
158
|
|
157
159
|
Args:
|
158
|
-
prompt (str): The prompt to
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
160
|
+
prompt (str): The prompt text to generate a response for.
|
161
|
+
images (list, optional): List of image URLs to include in the prompt. Defaults to [].
|
162
|
+
image_keys (tuple, optional): Tuple containing keys for image data. Defaults to ("image", "image").
|
163
|
+
stop (list[str] | None, optional): List of strings to stop the conversation. Defaults to ["USER:", "ASSISTANT:"].
|
164
|
+
model (str, optional): The model to use for generating the response. Defaults to "default_model".
|
165
|
+
stream (bool, optional): Whether to stream the response or not. Defaults to True.
|
166
|
+
**kwargs: Additional keyword arguments to pass to the chat completion API.
|
167
|
+
|
168
|
+
Yields:
|
169
|
+
str: The generated response content.
|
163
170
|
|
164
171
|
Returns:
|
165
|
-
str: The response
|
172
|
+
str: The generated response content if stream is False.
|
166
173
|
"""
|
167
174
|
content = [
|
168
175
|
{"type": "text", "text": prompt},
|
169
176
|
]
|
170
|
-
if
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
177
|
+
if images:
|
178
|
+
if isinstance(images, str):
|
179
|
+
images = [images]
|
180
|
+
for img in images:
|
181
|
+
content.append({
|
182
|
+
"type": image_keys[0],
|
183
|
+
image_keys[0]: {
|
184
|
+
image_keys[1]: img
|
185
|
+
}
|
186
|
+
})
|
178
187
|
else:
|
179
188
|
content = prompt
|
189
|
+
|
180
190
|
response = self.client.chat.completions.create(
|
181
191
|
messages=[{
|
182
192
|
"role": "user",
|
183
193
|
"content": content
|
184
194
|
}],
|
185
|
-
stream=
|
195
|
+
stream=stream,
|
186
196
|
model=model,
|
187
197
|
**kwargs
|
188
198
|
)
|
189
|
-
|
199
|
+
if stream:
|
200
|
+
for chunk in response:
|
201
|
+
content = chunk.choices[0].delta.content
|
202
|
+
if content:
|
203
|
+
yield content
|
204
|
+
else:
|
205
|
+
return response.choices[0].message.content
|
206
|
+
|
207
|
+
def invoke(
|
208
|
+
self,
|
209
|
+
*args,
|
210
|
+
**kwargs
|
211
|
+
) -> str:
|
212
|
+
"""Invoke the function with the given arguments and keyword arguments.
|
213
|
+
|
214
|
+
Args:
|
215
|
+
*args: Variable length argument list.
|
216
|
+
**kwargs: Arbitrary keyword arguments.
|
217
|
+
|
218
|
+
Returns:
|
219
|
+
str: The response obtained by calling the get_resp method with the provided arguments and keyword arguments.
|
220
|
+
"""
|
221
|
+
return self.get_resp(*args, stream=False, **kwargs)
|
190
222
|
|
191
223
|
def stream(
|
192
224
|
self,
|
193
|
-
|
194
|
-
|
195
|
-
stop: list[str] | None = ["USER:", "ASSISTANT:"],
|
196
|
-
# history: list = [],
|
197
|
-
model="default_model",
|
198
|
-
**kwargs: t.Any,
|
225
|
+
*args,
|
226
|
+
**kwargs
|
199
227
|
):
|
200
|
-
"""
|
228
|
+
"""Stream data from the server.
|
201
229
|
|
202
230
|
Args:
|
203
|
-
|
204
|
-
|
205
|
-
stop (list[str], optional): List of strings that indicate the end of the conversation. Defaults to ["USER:", "ASSISTANT:"].
|
206
|
-
model (str, optional): The model to use for completion generation. Defaults to "default_model".
|
207
|
-
**kwargs: Additional keyword arguments to pass to the completion generation API.
|
231
|
+
*args: Variable length argument list.
|
232
|
+
**kwargs: Arbitrary keyword arguments.
|
208
233
|
|
209
|
-
|
210
|
-
|
234
|
+
Returns:
|
235
|
+
Response from the server with streaming enabled.
|
211
236
|
"""
|
212
|
-
|
213
|
-
{"type": "text", "text": prompt},
|
214
|
-
]
|
215
|
-
if image_url is not None:
|
216
|
-
image_content = {
|
217
|
-
"type": "image_url",
|
218
|
-
"image_url": {
|
219
|
-
"url": image_url,
|
220
|
-
},
|
221
|
-
}
|
222
|
-
content.append(image_content)
|
223
|
-
else:
|
224
|
-
content = prompt
|
225
|
-
response = self.client.chat.completions.create(
|
226
|
-
messages=[{
|
227
|
-
"role": "user",
|
228
|
-
"content": content
|
229
|
-
}],
|
230
|
-
stream=True,
|
231
|
-
model=model,
|
232
|
-
**kwargs
|
233
|
-
)
|
234
|
-
|
235
|
-
for chunk in response:
|
236
|
-
content = chunk.choices[0].delta.content
|
237
|
-
if content:
|
238
|
-
yield content
|
237
|
+
return self.get_resp(*args, stream=True, **kwargs)
|
239
238
|
|
240
239
|
def agent_response(
|
241
240
|
self,
|
@@ -1,5 +1,5 @@
|
|
1
1
|
hdl/__init__.py,sha256=GffnD0jLJdhkd-vo989v40N90sQbofkayRBwxc6TVhQ,72
|
2
|
-
hdl/_version.py,sha256=
|
2
|
+
hdl/_version.py,sha256=Van4uIgB8A_E5eax96hsnvpK7WTUkaeheppc5bn2yyU,413
|
3
3
|
hdl/args/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
hdl/args/loss_args.py,sha256=s7YzSdd7IjD24rZvvOrxLLFqMZQb9YylxKeyelSdrTk,70
|
5
5
|
hdl/controllers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -127,7 +127,7 @@ hdl/utils/desc/template.py,sha256=a0UAkkKctt_EHY9UECsIIAwVkGPcM1Hr01HSkRMeIuw,12
|
|
127
127
|
hdl/utils/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
128
128
|
hdl/utils/general/glob.py,sha256=8-RCnt6L297wMIfn34ZAMCsGCZUjHG3MGglGZI1cX0g,491
|
129
129
|
hdl/utils/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
130
|
-
hdl/utils/llm/chat.py,sha256=
|
130
|
+
hdl/utils/llm/chat.py,sha256=YUoWM0N4bGgwHEjaFaYOd1xrcRmke6i0FG3GE1SW3Qc,11744
|
131
131
|
hdl/utils/llm/embs.py,sha256=Tf0FOYrOFZp7qQpEPiSCXzlgyHH0X9HVTUtsup74a9E,7174
|
132
132
|
hdl/utils/llm/extract.py,sha256=2sK_WJzmYIc8iuWaM9DA6Nw3_6q1O4lJ5pKpcZo-bBA,6512
|
133
133
|
hdl/utils/llm/llama_chat.py,sha256=watcHGOaz-bv3x-yDucYlGk5f8FiqfFhwWogrl334fk,4387
|
@@ -136,7 +136,7 @@ hdl/utils/schedulers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
|
|
136
136
|
hdl/utils/schedulers/norm_lr.py,sha256=bDwCmdEK-WkgxQMFBiMuchv8Mm7C0-GZJ6usm-PQk14,4461
|
137
137
|
hdl/utils/weather/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
138
138
|
hdl/utils/weather/weather.py,sha256=k11o6wM15kF8b9NMlEfrg68ak-SfSYLN3nOOflFUv-I,4381
|
139
|
-
hjxdl-0.1.
|
140
|
-
hjxdl-0.1.
|
141
|
-
hjxdl-0.1.
|
142
|
-
hjxdl-0.1.
|
139
|
+
hjxdl-0.1.83.dist-info/METADATA,sha256=UiF8jIPtAdmS6H_nzjjkrNeK9wDO9t4AM76Y-m3Lwcg,903
|
140
|
+
hjxdl-0.1.83.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
141
|
+
hjxdl-0.1.83.dist-info/top_level.txt,sha256=-kxwTM5JPhylp06z3zAVO3w6_h7wtBfBo2zgM6YZoTk,4
|
142
|
+
hjxdl-0.1.83.dist-info/RECORD,,
|
File without changes
|