inferencesh 0.4.18__tar.gz → 0.4.20__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of inferencesh might be problematic. Click here for more details.
- {inferencesh-0.4.18/src/inferencesh.egg-info → inferencesh-0.4.20}/PKG-INFO +1 -1
- {inferencesh-0.4.18 → inferencesh-0.4.20}/pyproject.toml +1 -1
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh/models/llm.py +20 -4
- {inferencesh-0.4.18 → inferencesh-0.4.20/src/inferencesh.egg-info}/PKG-INFO +1 -1
- {inferencesh-0.4.18 → inferencesh-0.4.20}/LICENSE +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/README.md +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/setup.cfg +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh/__init__.py +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh/client.py +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh/models/__init__.py +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh/models/base.py +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh/models/file.py +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh/utils/__init__.py +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh/utils/download.py +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh/utils/storage.py +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh.egg-info/SOURCES.txt +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh.egg-info/dependency_links.txt +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh.egg-info/entry_points.txt +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh.egg-info/requires.txt +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/src/inferencesh.egg-info/top_level.txt +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/tests/test_client.py +0 -0
- {inferencesh-0.4.18 → inferencesh-0.4.20}/tests/test_sdk.py +0 -0
|
@@ -21,7 +21,6 @@ class Message(BaseAppInput):
|
|
|
21
21
|
role: ContextMessageRole
|
|
22
22
|
content: str
|
|
23
23
|
|
|
24
|
-
|
|
25
24
|
class ContextMessage(BaseAppInput):
|
|
26
25
|
role: ContextMessageRole = Field(
|
|
27
26
|
description="the role of the message. user, assistant, or system",
|
|
@@ -33,6 +32,10 @@ class ContextMessage(BaseAppInput):
|
|
|
33
32
|
description="the image file of the message",
|
|
34
33
|
default=None
|
|
35
34
|
)
|
|
35
|
+
tool_calls: Optional[List[Dict[str, Any]]] = Field(
|
|
36
|
+
description="the tool calls of the message",
|
|
37
|
+
default=None
|
|
38
|
+
)
|
|
36
39
|
|
|
37
40
|
class BaseLLMInput(BaseAppInput):
|
|
38
41
|
"""Base class with common LLM fields."""
|
|
@@ -53,8 +56,12 @@ class BaseLLMInput(BaseAppInput):
|
|
|
53
56
|
]
|
|
54
57
|
]
|
|
55
58
|
)
|
|
59
|
+
role: ContextMessageRole = Field(
|
|
60
|
+
description="the role of the input text",
|
|
61
|
+
default=ContextMessageRole.USER
|
|
62
|
+
)
|
|
56
63
|
text: str = Field(
|
|
57
|
-
description="the
|
|
64
|
+
description="the input text to use for the model",
|
|
58
65
|
examples=[
|
|
59
66
|
"write a haiku about artificial general intelligence"
|
|
60
67
|
]
|
|
@@ -230,6 +237,13 @@ def build_messages(
|
|
|
230
237
|
images = [msg.image for msg in messages if msg.image]
|
|
231
238
|
image = images[0] if images else None # TODO: handle multiple images
|
|
232
239
|
return ContextMessage(role=messages[0].role, text=text, image=image)
|
|
240
|
+
|
|
241
|
+
def merge_tool_calls(messages: List[ContextMessage]) -> List[Dict[str, Any]]:
|
|
242
|
+
tool_calls = []
|
|
243
|
+
for msg in messages:
|
|
244
|
+
if msg.tool_calls:
|
|
245
|
+
tool_calls.extend(msg.tool_calls)
|
|
246
|
+
return tool_calls
|
|
233
247
|
|
|
234
248
|
user_input_text = ""
|
|
235
249
|
if hasattr(input_data, "text"):
|
|
@@ -255,14 +269,16 @@ def build_messages(
|
|
|
255
269
|
else:
|
|
256
270
|
messages.append({
|
|
257
271
|
"role": current_role,
|
|
258
|
-
"content": render_message(merge_messages(current_messages), allow_multipart=multipart)
|
|
272
|
+
"content": render_message(merge_messages(current_messages), allow_multipart=multipart),
|
|
273
|
+
"tool_calls": merge_tool_calls(current_messages)
|
|
259
274
|
})
|
|
260
275
|
current_messages = [msg]
|
|
261
276
|
current_role = msg.role
|
|
262
277
|
if len(current_messages) > 0:
|
|
263
278
|
messages.append({
|
|
264
279
|
"role": current_role,
|
|
265
|
-
"content": render_message(merge_messages(current_messages), allow_multipart=multipart)
|
|
280
|
+
"content": render_message(merge_messages(current_messages), allow_multipart=multipart),
|
|
281
|
+
"tool_calls": merge_tool_calls(current_messages)
|
|
266
282
|
})
|
|
267
283
|
|
|
268
284
|
return messages
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|