inferencesh 0.4.23__tar.gz → 0.4.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of inferencesh might be problematic. Click here for more details.

Files changed (22) hide show
  1. {inferencesh-0.4.23/src/inferencesh.egg-info → inferencesh-0.4.25}/PKG-INFO +1 -1
  2. {inferencesh-0.4.23 → inferencesh-0.4.25}/pyproject.toml +1 -1
  3. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh/models/llm.py +115 -13
  4. {inferencesh-0.4.23 → inferencesh-0.4.25/src/inferencesh.egg-info}/PKG-INFO +1 -1
  5. {inferencesh-0.4.23 → inferencesh-0.4.25}/LICENSE +0 -0
  6. {inferencesh-0.4.23 → inferencesh-0.4.25}/README.md +0 -0
  7. {inferencesh-0.4.23 → inferencesh-0.4.25}/setup.cfg +0 -0
  8. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh/__init__.py +0 -0
  9. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh/client.py +0 -0
  10. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh/models/__init__.py +0 -0
  11. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh/models/base.py +0 -0
  12. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh/models/file.py +0 -0
  13. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh/utils/__init__.py +0 -0
  14. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh/utils/download.py +0 -0
  15. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh/utils/storage.py +0 -0
  16. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh.egg-info/SOURCES.txt +0 -0
  17. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh.egg-info/dependency_links.txt +0 -0
  18. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh.egg-info/entry_points.txt +0 -0
  19. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh.egg-info/requires.txt +0 -0
  20. {inferencesh-0.4.23 → inferencesh-0.4.25}/src/inferencesh.egg-info/top_level.txt +0 -0
  21. {inferencesh-0.4.23 → inferencesh-0.4.25}/tests/test_client.py +0 -0
  22. {inferencesh-0.4.23 → inferencesh-0.4.25}/tests/test_sdk.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: inferencesh
3
- Version: 0.4.23
3
+ Version: 0.4.25
4
4
  Summary: inference.sh Python SDK
5
5
  Author-email: "Inference Shell Inc." <hello@inference.sh>
6
6
  Project-URL: Homepage, https://github.com/inference-sh/sdk
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "inferencesh"
7
- version = "0.4.23"
7
+ version = "0.4.25"
8
8
  description = "inference.sh Python SDK"
9
9
  authors = [
10
10
  {name = "Inference Shell Inc.", email = "hello@inference.sh"},
@@ -37,6 +37,10 @@ class ContextMessage(BaseAppInput):
37
37
  description="the tool calls of the message",
38
38
  default=None
39
39
  )
40
+ tool_call_id: Optional[str] = Field(
41
+ description="the tool call id for tool role messages",
42
+ default=None
43
+ )
40
44
 
41
45
  class BaseLLMInput(BaseAppInput):
42
46
  """Base class with common LLM fields."""
@@ -78,6 +82,13 @@ class ImageCapabilityMixin(BaseModel):
78
82
  default=None,
79
83
  contentMediaType="image/*",
80
84
  )
85
+
86
+ class MultipleImageCapabilityMixin(BaseModel):
87
+ """Mixin for models that support image inputs."""
88
+ images: Optional[List[File]] = Field(
89
+ description="the images to use for the model",
90
+ default=None,
91
+ )
81
92
 
82
93
  class ReasoningCapabilityMixin(BaseModel):
83
94
  """Mixin for models that support reasoning."""
@@ -92,6 +103,10 @@ class ToolsCapabilityMixin(BaseModel):
92
103
  description="tool definitions for function calling",
93
104
  default=None
94
105
  )
106
+ tool_call_id: Optional[str] = Field(
107
+ description="the tool call id for tool role messages",
108
+ default=None
109
+ )
95
110
 
96
111
  # Example of how to use:
97
112
  class LLMInput(BaseLLMInput):
@@ -217,18 +232,23 @@ def build_messages(
217
232
  def render_message(msg: ContextMessage, allow_multipart: bool) -> str | List[dict]:
218
233
  parts = []
219
234
  text = transform_user_message(msg.text) if transform_user_message and msg.role == ContextMessageRole.USER else msg.text
220
- if msg.tool_calls:
221
- for tool_call in msg.tool_calls:
222
- tool_call_string = json.dumps(tool_call)
223
- text += f"\n\nTool call: {tool_call_string}"
224
235
  if text:
225
236
  parts.append({"type": "text", "text": text})
237
+ else:
238
+ parts.append({"type": "text", "text": ""})
226
239
  if msg.image:
227
240
  if msg.image.path:
228
241
  image_data_uri = image_to_base64_data_uri(msg.image.path)
229
242
  parts.append({"type": "image_url", "image_url": {"url": image_data_uri}})
230
243
  elif msg.image.uri:
231
244
  parts.append({"type": "image_url", "image_url": {"url": msg.image.uri}})
245
+ if msg.images:
246
+ for image in msg.images:
247
+ if image.path:
248
+ image_data_uri = image_to_base64_data_uri(image.path)
249
+ parts.append({"type": "image_url", "image_url": {"url": image_data_uri}})
250
+ elif image.uri:
251
+ parts.append({"type": "image_url", "image_url": {"url": image.uri}})
232
252
  if allow_multipart:
233
253
  return parts
234
254
  if len(parts) == 1 and parts[0]["type"] == "text":
@@ -265,7 +285,8 @@ def build_messages(
265
285
  multipart = multipart or input_data.image is not None
266
286
 
267
287
  input_role = input_data.role if hasattr(input_data, "role") else ContextMessageRole.USER
268
- user_msg = ContextMessage(role=input_role, text=user_input_text, image=user_input_image)
288
+ input_tool_call_id = input_data.tool_call_id if hasattr(input_data, "tool_call_id") else None
289
+ user_msg = ContextMessage(role=input_role, text=user_input_text, image=user_input_image, tool_call_id=input_tool_call_id)
269
290
 
270
291
  input_data.context.append(user_msg)
271
292
 
@@ -277,23 +298,104 @@ def build_messages(
277
298
  current_messages.append(msg)
278
299
  current_role = msg.role
279
300
  else:
280
- messages.append({
281
- "role": current_role,
301
+ # Convert role enum to string for OpenAI API compatibility
302
+ role_str = current_role.value if hasattr(current_role, "value") else current_role
303
+ msg_dict = {
304
+ "role": role_str,
282
305
  "content": render_message(merge_messages(current_messages), allow_multipart=multipart),
283
- "tool_calls": merge_tool_calls(current_messages)
284
- })
306
+ }
307
+
308
+ # Only add tool_calls if not empty
309
+ tool_calls = merge_tool_calls(current_messages)
310
+ if tool_calls:
311
+ # Ensure arguments are JSON strings (OpenAI API requirement)
312
+ for tc in tool_calls:
313
+ if "function" in tc and "arguments" in tc["function"]:
314
+ if isinstance(tc["function"]["arguments"], dict):
315
+ tc["function"]["arguments"] = json.dumps(tc["function"]["arguments"])
316
+ msg_dict["tool_calls"] = tool_calls
317
+
318
+ # Add tool_call_id for tool role messages (required by OpenAI API)
319
+ if role_str == "tool":
320
+ if current_messages and current_messages[0].tool_call_id:
321
+ msg_dict["tool_call_id"] = current_messages[0].tool_call_id
322
+ else:
323
+ # If not provided, use empty string to satisfy schema
324
+ msg_dict["tool_call_id"] = ""
325
+
326
+ messages.append(msg_dict)
285
327
  current_messages = [msg]
286
328
  current_role = msg.role
329
+
287
330
  if len(current_messages) > 0:
288
- messages.append({
289
- "role": current_role,
331
+ # Convert role enum to string for OpenAI API compatibility
332
+ role_str = current_role.value if hasattr(current_role, "value") else current_role
333
+ msg_dict = {
334
+ "role": role_str,
290
335
  "content": render_message(merge_messages(current_messages), allow_multipart=multipart),
291
- "tool_calls": merge_tool_calls(current_messages)
292
- })
336
+ }
337
+
338
+ # Only add tool_calls if not empty
339
+ tool_calls = merge_tool_calls(current_messages)
340
+ if tool_calls:
341
+ # Ensure arguments are JSON strings (OpenAI API requirement)
342
+ for tc in tool_calls:
343
+ if "function" in tc and "arguments" in tc["function"]:
344
+ if isinstance(tc["function"]["arguments"], dict):
345
+ tc["function"]["arguments"] = json.dumps(tc["function"]["arguments"])
346
+ msg_dict["tool_calls"] = tool_calls
347
+
348
+ # Add tool_call_id for tool role messages (required by OpenAI API)
349
+ if role_str == "tool":
350
+ if current_messages and current_messages[0].tool_call_id:
351
+ msg_dict["tool_call_id"] = current_messages[0].tool_call_id
352
+ else:
353
+ # If not provided, use empty string to satisfy schema
354
+ msg_dict["tool_call_id"] = ""
355
+
356
+ messages.append(msg_dict)
293
357
 
294
358
  return messages
295
359
 
296
360
 
361
+ def build_tools(tools: Optional[List[Dict[str, Any]]]) -> Optional[List[Dict[str, Any]]]:
362
+ """Build tools in OpenAI API format.
363
+
364
+ Ensures tools are properly formatted:
365
+ - Wrapped in {"type": "function", "function": {...}}
366
+ - Parameters is never None (OpenAI API requirement)
367
+ """
368
+ if not tools:
369
+ return None
370
+
371
+ result = []
372
+ for tool in tools:
373
+ # Extract function definition
374
+ if "type" in tool and "function" in tool:
375
+ func_def = tool["function"].copy()
376
+ else:
377
+ func_def = tool.copy()
378
+
379
+ # Ensure parameters is not None (OpenAI API requirement)
380
+ if func_def.get("parameters") is None:
381
+ func_def["parameters"] = {"type": "object", "properties": {}}
382
+ # Also ensure properties within parameters is not None
383
+ elif func_def["parameters"].get("properties") is None:
384
+ func_def["parameters"]["properties"] = {}
385
+ else:
386
+ # Remove properties with null values (OpenAI API doesn't accept them)
387
+ properties = func_def["parameters"].get("properties", {})
388
+ if properties:
389
+ func_def["parameters"]["properties"] = {
390
+ k: v for k, v in properties.items() if v is not None
391
+ }
392
+
393
+ # Wrap in OpenAI format
394
+ result.append({"type": "function", "function": func_def})
395
+
396
+ return result
397
+
398
+
297
399
  class StreamResponse:
298
400
  """Holds a single chunk of streamed response."""
299
401
  def __init__(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: inferencesh
3
- Version: 0.4.23
3
+ Version: 0.4.25
4
4
  Summary: inference.sh Python SDK
5
5
  Author-email: "Inference Shell Inc." <hello@inference.sh>
6
6
  Project-URL: Homepage, https://github.com/inference-sh/sdk
File without changes
File without changes
File without changes