inferencesh 0.4.24__tar.gz → 0.4.25__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of inferencesh might be problematic. Click here for more details.
- {inferencesh-0.4.24/src/inferencesh.egg-info → inferencesh-0.4.25}/PKG-INFO +1 -1
- {inferencesh-0.4.24 → inferencesh-0.4.25}/pyproject.toml +1 -1
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh/models/llm.py +113 -9
- {inferencesh-0.4.24 → inferencesh-0.4.25/src/inferencesh.egg-info}/PKG-INFO +1 -1
- {inferencesh-0.4.24 → inferencesh-0.4.25}/LICENSE +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/README.md +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/setup.cfg +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh/__init__.py +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh/client.py +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh/models/__init__.py +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh/models/base.py +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh/models/file.py +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh/utils/__init__.py +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh/utils/download.py +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh/utils/storage.py +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh.egg-info/SOURCES.txt +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh.egg-info/dependency_links.txt +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh.egg-info/entry_points.txt +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh.egg-info/requires.txt +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/src/inferencesh.egg-info/top_level.txt +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/tests/test_client.py +0 -0
- {inferencesh-0.4.24 → inferencesh-0.4.25}/tests/test_sdk.py +0 -0
|
@@ -37,6 +37,10 @@ class ContextMessage(BaseAppInput):
|
|
|
37
37
|
description="the tool calls of the message",
|
|
38
38
|
default=None
|
|
39
39
|
)
|
|
40
|
+
tool_call_id: Optional[str] = Field(
|
|
41
|
+
description="the tool call id for tool role messages",
|
|
42
|
+
default=None
|
|
43
|
+
)
|
|
40
44
|
|
|
41
45
|
class BaseLLMInput(BaseAppInput):
|
|
42
46
|
"""Base class with common LLM fields."""
|
|
@@ -78,6 +82,13 @@ class ImageCapabilityMixin(BaseModel):
|
|
|
78
82
|
default=None,
|
|
79
83
|
contentMediaType="image/*",
|
|
80
84
|
)
|
|
85
|
+
|
|
86
|
+
class MultipleImageCapabilityMixin(BaseModel):
|
|
87
|
+
"""Mixin for models that support image inputs."""
|
|
88
|
+
images: Optional[List[File]] = Field(
|
|
89
|
+
description="the images to use for the model",
|
|
90
|
+
default=None,
|
|
91
|
+
)
|
|
81
92
|
|
|
82
93
|
class ReasoningCapabilityMixin(BaseModel):
|
|
83
94
|
"""Mixin for models that support reasoning."""
|
|
@@ -92,6 +103,10 @@ class ToolsCapabilityMixin(BaseModel):
|
|
|
92
103
|
description="tool definitions for function calling",
|
|
93
104
|
default=None
|
|
94
105
|
)
|
|
106
|
+
tool_call_id: Optional[str] = Field(
|
|
107
|
+
description="the tool call id for tool role messages",
|
|
108
|
+
default=None
|
|
109
|
+
)
|
|
95
110
|
|
|
96
111
|
# Example of how to use:
|
|
97
112
|
class LLMInput(BaseLLMInput):
|
|
@@ -227,6 +242,13 @@ def build_messages(
|
|
|
227
242
|
parts.append({"type": "image_url", "image_url": {"url": image_data_uri}})
|
|
228
243
|
elif msg.image.uri:
|
|
229
244
|
parts.append({"type": "image_url", "image_url": {"url": msg.image.uri}})
|
|
245
|
+
if msg.images:
|
|
246
|
+
for image in msg.images:
|
|
247
|
+
if image.path:
|
|
248
|
+
image_data_uri = image_to_base64_data_uri(image.path)
|
|
249
|
+
parts.append({"type": "image_url", "image_url": {"url": image_data_uri}})
|
|
250
|
+
elif image.uri:
|
|
251
|
+
parts.append({"type": "image_url", "image_url": {"url": image.uri}})
|
|
230
252
|
if allow_multipart:
|
|
231
253
|
return parts
|
|
232
254
|
if len(parts) == 1 and parts[0]["type"] == "text":
|
|
@@ -263,7 +285,8 @@ def build_messages(
|
|
|
263
285
|
multipart = multipart or input_data.image is not None
|
|
264
286
|
|
|
265
287
|
input_role = input_data.role if hasattr(input_data, "role") else ContextMessageRole.USER
|
|
266
|
-
|
|
288
|
+
input_tool_call_id = input_data.tool_call_id if hasattr(input_data, "tool_call_id") else None
|
|
289
|
+
user_msg = ContextMessage(role=input_role, text=user_input_text, image=user_input_image, tool_call_id=input_tool_call_id)
|
|
267
290
|
|
|
268
291
|
input_data.context.append(user_msg)
|
|
269
292
|
|
|
@@ -275,23 +298,104 @@ def build_messages(
|
|
|
275
298
|
current_messages.append(msg)
|
|
276
299
|
current_role = msg.role
|
|
277
300
|
else:
|
|
278
|
-
|
|
279
|
-
|
|
301
|
+
# Convert role enum to string for OpenAI API compatibility
|
|
302
|
+
role_str = current_role.value if hasattr(current_role, "value") else current_role
|
|
303
|
+
msg_dict = {
|
|
304
|
+
"role": role_str,
|
|
280
305
|
"content": render_message(merge_messages(current_messages), allow_multipart=multipart),
|
|
281
|
-
|
|
282
|
-
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
# Only add tool_calls if not empty
|
|
309
|
+
tool_calls = merge_tool_calls(current_messages)
|
|
310
|
+
if tool_calls:
|
|
311
|
+
# Ensure arguments are JSON strings (OpenAI API requirement)
|
|
312
|
+
for tc in tool_calls:
|
|
313
|
+
if "function" in tc and "arguments" in tc["function"]:
|
|
314
|
+
if isinstance(tc["function"]["arguments"], dict):
|
|
315
|
+
tc["function"]["arguments"] = json.dumps(tc["function"]["arguments"])
|
|
316
|
+
msg_dict["tool_calls"] = tool_calls
|
|
317
|
+
|
|
318
|
+
# Add tool_call_id for tool role messages (required by OpenAI API)
|
|
319
|
+
if role_str == "tool":
|
|
320
|
+
if current_messages and current_messages[0].tool_call_id:
|
|
321
|
+
msg_dict["tool_call_id"] = current_messages[0].tool_call_id
|
|
322
|
+
else:
|
|
323
|
+
# If not provided, use empty string to satisfy schema
|
|
324
|
+
msg_dict["tool_call_id"] = ""
|
|
325
|
+
|
|
326
|
+
messages.append(msg_dict)
|
|
283
327
|
current_messages = [msg]
|
|
284
328
|
current_role = msg.role
|
|
329
|
+
|
|
285
330
|
if len(current_messages) > 0:
|
|
286
|
-
|
|
287
|
-
|
|
331
|
+
# Convert role enum to string for OpenAI API compatibility
|
|
332
|
+
role_str = current_role.value if hasattr(current_role, "value") else current_role
|
|
333
|
+
msg_dict = {
|
|
334
|
+
"role": role_str,
|
|
288
335
|
"content": render_message(merge_messages(current_messages), allow_multipart=multipart),
|
|
289
|
-
|
|
290
|
-
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
# Only add tool_calls if not empty
|
|
339
|
+
tool_calls = merge_tool_calls(current_messages)
|
|
340
|
+
if tool_calls:
|
|
341
|
+
# Ensure arguments are JSON strings (OpenAI API requirement)
|
|
342
|
+
for tc in tool_calls:
|
|
343
|
+
if "function" in tc and "arguments" in tc["function"]:
|
|
344
|
+
if isinstance(tc["function"]["arguments"], dict):
|
|
345
|
+
tc["function"]["arguments"] = json.dumps(tc["function"]["arguments"])
|
|
346
|
+
msg_dict["tool_calls"] = tool_calls
|
|
347
|
+
|
|
348
|
+
# Add tool_call_id for tool role messages (required by OpenAI API)
|
|
349
|
+
if role_str == "tool":
|
|
350
|
+
if current_messages and current_messages[0].tool_call_id:
|
|
351
|
+
msg_dict["tool_call_id"] = current_messages[0].tool_call_id
|
|
352
|
+
else:
|
|
353
|
+
# If not provided, use empty string to satisfy schema
|
|
354
|
+
msg_dict["tool_call_id"] = ""
|
|
355
|
+
|
|
356
|
+
messages.append(msg_dict)
|
|
291
357
|
|
|
292
358
|
return messages
|
|
293
359
|
|
|
294
360
|
|
|
361
|
+
def build_tools(tools: Optional[List[Dict[str, Any]]]) -> Optional[List[Dict[str, Any]]]:
|
|
362
|
+
"""Build tools in OpenAI API format.
|
|
363
|
+
|
|
364
|
+
Ensures tools are properly formatted:
|
|
365
|
+
- Wrapped in {"type": "function", "function": {...}}
|
|
366
|
+
- Parameters is never None (OpenAI API requirement)
|
|
367
|
+
"""
|
|
368
|
+
if not tools:
|
|
369
|
+
return None
|
|
370
|
+
|
|
371
|
+
result = []
|
|
372
|
+
for tool in tools:
|
|
373
|
+
# Extract function definition
|
|
374
|
+
if "type" in tool and "function" in tool:
|
|
375
|
+
func_def = tool["function"].copy()
|
|
376
|
+
else:
|
|
377
|
+
func_def = tool.copy()
|
|
378
|
+
|
|
379
|
+
# Ensure parameters is not None (OpenAI API requirement)
|
|
380
|
+
if func_def.get("parameters") is None:
|
|
381
|
+
func_def["parameters"] = {"type": "object", "properties": {}}
|
|
382
|
+
# Also ensure properties within parameters is not None
|
|
383
|
+
elif func_def["parameters"].get("properties") is None:
|
|
384
|
+
func_def["parameters"]["properties"] = {}
|
|
385
|
+
else:
|
|
386
|
+
# Remove properties with null values (OpenAI API doesn't accept them)
|
|
387
|
+
properties = func_def["parameters"].get("properties", {})
|
|
388
|
+
if properties:
|
|
389
|
+
func_def["parameters"]["properties"] = {
|
|
390
|
+
k: v for k, v in properties.items() if v is not None
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
# Wrap in OpenAI format
|
|
394
|
+
result.append({"type": "function", "function": func_def})
|
|
395
|
+
|
|
396
|
+
return result
|
|
397
|
+
|
|
398
|
+
|
|
295
399
|
class StreamResponse:
|
|
296
400
|
"""Holds a single chunk of streamed response."""
|
|
297
401
|
def __init__(self):
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|