inferencesh 0.4.24__tar.gz → 0.4.26__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of inferencesh might be problematic. Click here for more details.

Files changed (22) hide show
  1. {inferencesh-0.4.24/src/inferencesh.egg-info → inferencesh-0.4.26}/PKG-INFO +1 -1
  2. {inferencesh-0.4.24 → inferencesh-0.4.26}/pyproject.toml +1 -1
  3. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh/models/llm.py +117 -9
  4. {inferencesh-0.4.24 → inferencesh-0.4.26/src/inferencesh.egg-info}/PKG-INFO +1 -1
  5. {inferencesh-0.4.24 → inferencesh-0.4.26}/LICENSE +0 -0
  6. {inferencesh-0.4.24 → inferencesh-0.4.26}/README.md +0 -0
  7. {inferencesh-0.4.24 → inferencesh-0.4.26}/setup.cfg +0 -0
  8. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh/__init__.py +0 -0
  9. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh/client.py +0 -0
  10. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh/models/__init__.py +0 -0
  11. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh/models/base.py +0 -0
  12. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh/models/file.py +0 -0
  13. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh/utils/__init__.py +0 -0
  14. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh/utils/download.py +0 -0
  15. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh/utils/storage.py +0 -0
  16. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh.egg-info/SOURCES.txt +0 -0
  17. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh.egg-info/dependency_links.txt +0 -0
  18. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh.egg-info/entry_points.txt +0 -0
  19. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh.egg-info/requires.txt +0 -0
  20. {inferencesh-0.4.24 → inferencesh-0.4.26}/src/inferencesh.egg-info/top_level.txt +0 -0
  21. {inferencesh-0.4.24 → inferencesh-0.4.26}/tests/test_client.py +0 -0
  22. {inferencesh-0.4.24 → inferencesh-0.4.26}/tests/test_sdk.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: inferencesh
3
- Version: 0.4.24
3
+ Version: 0.4.26
4
4
  Summary: inference.sh Python SDK
5
5
  Author-email: "Inference Shell Inc." <hello@inference.sh>
6
6
  Project-URL: Homepage, https://github.com/inference-sh/sdk
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "inferencesh"
7
- version = "0.4.24"
7
+ version = "0.4.26"
8
8
  description = "inference.sh Python SDK"
9
9
  authors = [
10
10
  {name = "Inference Shell Inc.", email = "hello@inference.sh"},
@@ -33,10 +33,18 @@ class ContextMessage(BaseAppInput):
33
33
  description="the image file of the message",
34
34
  default=None
35
35
  )
36
+ images: Optional[List[File]] = Field(
37
+ description="the images of the message",
38
+ default=None
39
+ )
36
40
  tool_calls: Optional[List[Dict[str, Any]]] = Field(
37
41
  description="the tool calls of the message",
38
42
  default=None
39
43
  )
44
+ tool_call_id: Optional[str] = Field(
45
+ description="the tool call id for tool role messages",
46
+ default=None
47
+ )
40
48
 
41
49
  class BaseLLMInput(BaseAppInput):
42
50
  """Base class with common LLM fields."""
@@ -78,6 +86,13 @@ class ImageCapabilityMixin(BaseModel):
78
86
  default=None,
79
87
  contentMediaType="image/*",
80
88
  )
89
+
90
+ class MultipleImageCapabilityMixin(BaseModel):
91
+ """Mixin for models that support image inputs."""
92
+ images: Optional[List[File]] = Field(
93
+ description="the images to use for the model",
94
+ default=None,
95
+ )
81
96
 
82
97
  class ReasoningCapabilityMixin(BaseModel):
83
98
  """Mixin for models that support reasoning."""
@@ -92,6 +107,10 @@ class ToolsCapabilityMixin(BaseModel):
92
107
  description="tool definitions for function calling",
93
108
  default=None
94
109
  )
110
+ tool_call_id: Optional[str] = Field(
111
+ description="the tool call id for tool role messages",
112
+ default=None
113
+ )
95
114
 
96
115
  # Example of how to use:
97
116
  class LLMInput(BaseLLMInput):
@@ -227,6 +246,13 @@ def build_messages(
227
246
  parts.append({"type": "image_url", "image_url": {"url": image_data_uri}})
228
247
  elif msg.image.uri:
229
248
  parts.append({"type": "image_url", "image_url": {"url": msg.image.uri}})
249
+ if msg.images:
250
+ for image in msg.images:
251
+ if image.path:
252
+ image_data_uri = image_to_base64_data_uri(image.path)
253
+ parts.append({"type": "image_url", "image_url": {"url": image_data_uri}})
254
+ elif image.uri:
255
+ parts.append({"type": "image_url", "image_url": {"url": image.uri}})
230
256
  if allow_multipart:
231
257
  return parts
232
258
  if len(parts) == 1 and parts[0]["type"] == "text":
@@ -263,7 +289,8 @@ def build_messages(
263
289
  multipart = multipart or input_data.image is not None
264
290
 
265
291
  input_role = input_data.role if hasattr(input_data, "role") else ContextMessageRole.USER
266
- user_msg = ContextMessage(role=input_role, text=user_input_text, image=user_input_image)
292
+ input_tool_call_id = input_data.tool_call_id if hasattr(input_data, "tool_call_id") else None
293
+ user_msg = ContextMessage(role=input_role, text=user_input_text, image=user_input_image, tool_call_id=input_tool_call_id)
267
294
 
268
295
  input_data.context.append(user_msg)
269
296
 
@@ -275,23 +302,104 @@ def build_messages(
275
302
  current_messages.append(msg)
276
303
  current_role = msg.role
277
304
  else:
278
- messages.append({
279
- "role": current_role,
305
+ # Convert role enum to string for OpenAI API compatibility
306
+ role_str = current_role.value if hasattr(current_role, "value") else current_role
307
+ msg_dict = {
308
+ "role": role_str,
280
309
  "content": render_message(merge_messages(current_messages), allow_multipart=multipart),
281
- "tool_calls": merge_tool_calls(current_messages)
282
- })
310
+ }
311
+
312
+ # Only add tool_calls if not empty
313
+ tool_calls = merge_tool_calls(current_messages)
314
+ if tool_calls:
315
+ # Ensure arguments are JSON strings (OpenAI API requirement)
316
+ for tc in tool_calls:
317
+ if "function" in tc and "arguments" in tc["function"]:
318
+ if isinstance(tc["function"]["arguments"], dict):
319
+ tc["function"]["arguments"] = json.dumps(tc["function"]["arguments"])
320
+ msg_dict["tool_calls"] = tool_calls
321
+
322
+ # Add tool_call_id for tool role messages (required by OpenAI API)
323
+ if role_str == "tool":
324
+ if current_messages and current_messages[0].tool_call_id:
325
+ msg_dict["tool_call_id"] = current_messages[0].tool_call_id
326
+ else:
327
+ # If not provided, use empty string to satisfy schema
328
+ msg_dict["tool_call_id"] = ""
329
+
330
+ messages.append(msg_dict)
283
331
  current_messages = [msg]
284
332
  current_role = msg.role
333
+
285
334
  if len(current_messages) > 0:
286
- messages.append({
287
- "role": current_role,
335
+ # Convert role enum to string for OpenAI API compatibility
336
+ role_str = current_role.value if hasattr(current_role, "value") else current_role
337
+ msg_dict = {
338
+ "role": role_str,
288
339
  "content": render_message(merge_messages(current_messages), allow_multipart=multipart),
289
- "tool_calls": merge_tool_calls(current_messages)
290
- })
340
+ }
341
+
342
+ # Only add tool_calls if not empty
343
+ tool_calls = merge_tool_calls(current_messages)
344
+ if tool_calls:
345
+ # Ensure arguments are JSON strings (OpenAI API requirement)
346
+ for tc in tool_calls:
347
+ if "function" in tc and "arguments" in tc["function"]:
348
+ if isinstance(tc["function"]["arguments"], dict):
349
+ tc["function"]["arguments"] = json.dumps(tc["function"]["arguments"])
350
+ msg_dict["tool_calls"] = tool_calls
351
+
352
+ # Add tool_call_id for tool role messages (required by OpenAI API)
353
+ if role_str == "tool":
354
+ if current_messages and current_messages[0].tool_call_id:
355
+ msg_dict["tool_call_id"] = current_messages[0].tool_call_id
356
+ else:
357
+ # If not provided, use empty string to satisfy schema
358
+ msg_dict["tool_call_id"] = ""
359
+
360
+ messages.append(msg_dict)
291
361
 
292
362
  return messages
293
363
 
294
364
 
365
+ def build_tools(tools: Optional[List[Dict[str, Any]]]) -> Optional[List[Dict[str, Any]]]:
366
+ """Build tools in OpenAI API format.
367
+
368
+ Ensures tools are properly formatted:
369
+ - Wrapped in {"type": "function", "function": {...}}
370
+ - Parameters is never None (OpenAI API requirement)
371
+ """
372
+ if not tools:
373
+ return None
374
+
375
+ result = []
376
+ for tool in tools:
377
+ # Extract function definition
378
+ if "type" in tool and "function" in tool:
379
+ func_def = tool["function"].copy()
380
+ else:
381
+ func_def = tool.copy()
382
+
383
+ # Ensure parameters is not None (OpenAI API requirement)
384
+ if func_def.get("parameters") is None:
385
+ func_def["parameters"] = {"type": "object", "properties": {}}
386
+ # Also ensure properties within parameters is not None
387
+ elif func_def["parameters"].get("properties") is None:
388
+ func_def["parameters"]["properties"] = {}
389
+ else:
390
+ # Remove properties with null values (OpenAI API doesn't accept them)
391
+ properties = func_def["parameters"].get("properties", {})
392
+ if properties:
393
+ func_def["parameters"]["properties"] = {
394
+ k: v for k, v in properties.items() if v is not None
395
+ }
396
+
397
+ # Wrap in OpenAI format
398
+ result.append({"type": "function", "function": func_def})
399
+
400
+ return result
401
+
402
+
295
403
  class StreamResponse:
296
404
  """Holds a single chunk of streamed response."""
297
405
  def __init__(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: inferencesh
3
- Version: 0.4.24
3
+ Version: 0.4.26
4
4
  Summary: inference.sh Python SDK
5
5
  Author-email: "Inference Shell Inc." <hello@inference.sh>
6
6
  Project-URL: Homepage, https://github.com/inference-sh/sdk
File without changes
File without changes
File without changes