inferencesh 0.4.13__py3-none-any.whl → 0.4.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of inferencesh might be problematic. Click here for more details.

@@ -270,8 +270,8 @@ class File(BaseModel):
270
270
  return {
271
271
  "$id": "/schemas/File",
272
272
  "oneOf": [
273
- {"type": "string"},
274
- json_schema
273
+ json_schema,
274
+ {"type": "string"}
275
275
  ]
276
276
  }
277
277
 
inferencesh/models/llm.py CHANGED
@@ -464,26 +464,27 @@ class ResponseTransformer:
464
464
  text: Cleaned text to process for reasoning
465
465
  """
466
466
  # Default implementation for <think> style reasoning
467
- if "<think>" in text and not self.state.state_changes["reasoning_started"]:
467
+ # Check for tags in the complete buffer
468
+ if "<think>" in self.state.buffer and not self.state.state_changes["reasoning_started"]:
468
469
  self.state.state_changes["reasoning_started"] = True
469
470
  if self.timing:
470
471
  self.timing.start_reasoning()
471
472
 
472
- if "</think>" in text and not self.state.state_changes["reasoning_ended"]:
473
- self.state.state_changes["reasoning_ended"] = True
474
- if self.timing:
475
- # Estimate token count from character count (rough approximation)
476
- token_count = len(self.state.buffer.split("<think>")[1].split("</think>")[0]) // 4
477
- self.timing.end_reasoning(token_count)
478
-
479
- if "<think>" in self.state.buffer:
480
- parts = self.state.buffer.split("</think>", 1)
481
- if len(parts) > 1:
482
- self.state.reasoning = parts[0].split("<think>", 1)[1].strip()
483
- self.state.response = parts[1].strip()
484
- else:
485
- self.state.reasoning = self.state.buffer.split("<think>", 1)[1].strip()
486
- self.state.response = ""
473
+ # Extract content and handle end of reasoning
474
+ parts = self.state.buffer.split("<think>", 1)
475
+ if len(parts) > 1:
476
+ reasoning_text = parts[1]
477
+ end_parts = reasoning_text.split("</think>", 1)
478
+ self.state.reasoning = end_parts[0].strip()
479
+ self.state.response = end_parts[1].strip() if len(end_parts) > 1 else ""
480
+
481
+ # Check for end tag in complete buffer
482
+ if "</think>" in self.state.buffer and not self.state.state_changes["reasoning_ended"]:
483
+ self.state.state_changes["reasoning_ended"] = True
484
+ if self.timing:
485
+ # Estimate token count from character count (rough approximation)
486
+ token_count = len(self.state.reasoning) // 4
487
+ self.timing.end_reasoning(token_count)
487
488
  else:
488
489
  self.state.response = self.state.buffer
489
490
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: inferencesh
3
- Version: 0.4.13
3
+ Version: 0.4.15
4
4
  Summary: inference.sh Python SDK
5
5
  Author-email: "Inference Shell Inc." <hello@inference.sh>
6
6
  Project-URL: Homepage, https://github.com/inference-sh/sdk
@@ -2,14 +2,14 @@ inferencesh/__init__.py,sha256=dY3l3yCkWoMtGX0gNXgxFnrprFRl6PPWjH8V7Qedx5g,772
2
2
  inferencesh/client.py,sha256=6wTCLqLq-QapvjCjMg8ZE3BQyg8iTL8hv8UU7t-oxmE,39360
3
3
  inferencesh/models/__init__.py,sha256=FDwcdtT6c4hbRitymjmN-hZMlQa8RbKSftkZZyjtUXA,536
4
4
  inferencesh/models/base.py,sha256=eTwRvXAjMGh6b8AUXWKSGpRyeScAkPs6bNYY8AXKSz8,5505
5
- inferencesh/models/file.py,sha256=Q1J9xgqDiBFKqeIczdBE5BpYiab7V8-k_BcHk2kq7tA,11506
6
- inferencesh/models/llm.py,sha256=FPkTqVkH4mdKxUEdDVNimaPEr_mkpGofX7qTK4G1IHo,27873
5
+ inferencesh/models/file.py,sha256=35bnHcBB4Whi7Pg7HcmKUSm2FhRO6RPbmgXfhT0Op6Y,11506
6
+ inferencesh/models/llm.py,sha256=N5eP549tWYh5qlbAwMbMTcHwx-bET7OCvxJ2SlSLfVk,27948
7
7
  inferencesh/utils/__init__.py,sha256=-xiD6uo2XzcrPAWFb_fUbaimmnW4KFKc-8IvBzaxNd4,148
8
8
  inferencesh/utils/download.py,sha256=DRGBudiPVa5bDS35KfR-DYeGRk7gO03WOelnisecwMo,1815
9
9
  inferencesh/utils/storage.py,sha256=E4J8emd4eFKdmdDgAqzz3TpaaDd3n0l8gYlMHuY8yIU,519
10
- inferencesh-0.4.13.dist-info/licenses/LICENSE,sha256=OsgqEWIh2el_QMj0y8O1A5Q5Dl-dxqqYbFE6fszuR4s,1086
11
- inferencesh-0.4.13.dist-info/METADATA,sha256=ocs7sJclodv5KopQutUoX0l2ULr6TAm7rOVkKb5hBLU,5406
12
- inferencesh-0.4.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
- inferencesh-0.4.13.dist-info/entry_points.txt,sha256=6IC-fyozAqW3ljsMLGCXxJ0_ui2Jb-2fLHtoH1RTnEE,45
14
- inferencesh-0.4.13.dist-info/top_level.txt,sha256=TSMHg3T1ThMl1HGAWmzBClwOYH1ump5neof9BfHIwaA,12
15
- inferencesh-0.4.13.dist-info/RECORD,,
10
+ inferencesh-0.4.15.dist-info/licenses/LICENSE,sha256=OsgqEWIh2el_QMj0y8O1A5Q5Dl-dxqqYbFE6fszuR4s,1086
11
+ inferencesh-0.4.15.dist-info/METADATA,sha256=DRHSLO6_-kedTgY0KBsZlPts1AfWL0bwie36dJkPKS0,5406
12
+ inferencesh-0.4.15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
+ inferencesh-0.4.15.dist-info/entry_points.txt,sha256=6IC-fyozAqW3ljsMLGCXxJ0_ui2Jb-2fLHtoH1RTnEE,45
14
+ inferencesh-0.4.15.dist-info/top_level.txt,sha256=TSMHg3T1ThMl1HGAWmzBClwOYH1ump5neof9BfHIwaA,12
15
+ inferencesh-0.4.15.dist-info/RECORD,,