wisent 0.5.8__py3-none-any.whl → 0.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of wisent might be problematic. Click here for more details.

wisent/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.5.8"
1
+ __version__ = "0.5.9"
@@ -258,6 +258,7 @@ class WisentModel:
258
258
  self,
259
259
  message: list[ChatMessage],
260
260
  add_generation_prompt: bool = True,
261
+ enable_thinking: bool = True,
261
262
  ) -> dict[str, torch.Tensor]:
262
263
  """
263
264
  Encode a single input in chat format.
@@ -267,6 +268,8 @@ class WisentModel:
267
268
  list of {'role': str, 'content': str} dicts (chat messages).
268
269
  add_generation_prompt:
269
270
  If True, append the model's generation prompt at the end.
271
+ enable_thinking:
272
+ If False, disable thinking/reasoning mode (prevents <think> tags for supported models like Qwen).
270
273
 
271
274
  returns:
272
275
  dict with 'input_ids' and 'attention_mask' tensors.
@@ -279,10 +282,10 @@ class WisentModel:
279
282
  >>> wm._encode_one(msgs, add_generation_prompt=True)
280
283
  {"input_ids": tensor([[...]]), "attention_mask": tensor([[...]])}
281
284
  """
282
-
285
+
283
286
  ids = self.tokenizer.apply_chat_template(
284
- message, tokenize=True, add_generation_prompt=add_generation_prompt, return_tensors="pt"
285
- )[0]
287
+ message, tokenize=True, add_generation_prompt=add_generation_prompt, enable_thinking=enable_thinking, return_tensors="pt"
288
+ )[0]
286
289
  return {
287
290
  "input_ids": ids,
288
291
  "attention_mask": torch.ones_like(ids),
@@ -292,6 +295,7 @@ class WisentModel:
292
295
  self,
293
296
  inputs: list[list[ChatMessage]],
294
297
  add_generation_prompt: bool = True,
298
+ enable_thinking: bool = True,
295
299
  ) -> dict[str, torch.Tensor]:
296
300
  """
297
301
  Batch-encode a list of chat messages.
@@ -301,7 +305,9 @@ class WisentModel:
301
305
  list of chat messages (each a list of {'role','content'} dicts).
302
306
  add_generation_prompt:
303
307
  If True, append the model's generation prompt at the end of each.
304
-
308
+ enable_thinking:
309
+ If False, disable thinking/reasoning mode (prevents <think> tags for supported models like Qwen).
310
+
305
311
  returns:
306
312
  dict with batched 'input_ids' and 'attention_mask' tensors.
307
313
 
@@ -316,10 +322,10 @@ class WisentModel:
316
322
  >>> wm._batch_encode([msgs1, msgs2], add_generation_prompt=True)
317
323
  {"input_ids": tensor([[...],[...]]), "attention_mask": tensor([[...],[...]])}
318
324
  """
319
-
325
+
320
326
  singles = []
321
327
  for item in inputs:
322
- singles.append(self._encode_one(item, add_generation_prompt=add_generation_prompt))
328
+ singles.append(self._encode_one(item, add_generation_prompt=add_generation_prompt, enable_thinking=enable_thinking))
323
329
 
324
330
  batch = self.tokenizer.pad(singles, padding=True, return_tensors="pt")
325
331
 
@@ -338,6 +344,7 @@ class WisentModel:
338
344
  num_return_sequences: int = 1,
339
345
  use_steering: bool = False,
340
346
  steering_plan: SteeringPlan | None = None,
347
+ enable_thinking: bool = True,
341
348
  **gen_kwargs: Any,
342
349
  ) -> list[str]:
343
350
  """
@@ -361,6 +368,8 @@ class WisentModel:
361
368
  steering_plan:
362
369
  optional SteeringPlan to use for this call only (overrides internal plan).
363
370
  If None, uses the internal plan.
371
+ enable_thinking:
372
+ If False, disable thinking/reasoning mode (prevents <think> tags for supported models like Qwen).
364
373
  **gen_kwargs:
365
374
  additional kwargs passed to 'model.generate()'.
366
375
 
@@ -439,7 +448,7 @@ class WisentModel:
439
448
  if use_steering:
440
449
  self.apply_steering(steering_plan)
441
450
 
442
- batch = self._batch_encode(inputs, add_generation_prompt=True)
451
+ batch = self._batch_encode(inputs, add_generation_prompt=True, enable_thinking=enable_thinking)
443
452
 
444
453
  gen_out = self.hf_model.generate(
445
454
  **batch,
@@ -472,6 +481,7 @@ class WisentModel:
472
481
  collect_topk: int = 5,
473
482
  use_steering: bool = False,
474
483
  steering_plan: SteeringPlan | None = None,
484
+ enable_thinking: bool = True,
475
485
  **gen_kwargs: Any,
476
486
  ) -> tuple[list[str], list[GenerationStats]]:
477
487
  """
@@ -486,7 +496,7 @@ class WisentModel:
486
496
  temperature:
487
497
  sampling temperature (0 = greedy, 1 = default sampling).
488
498
  top_p:
489
- nucleus sampling probability (0 = no filtering, 1 = full filtering).
499
+ nucleus sampling probability (0 = no filtering, 1 = full filtering).
490
500
  do_sample:
491
501
  if False, uses greedy decoding (top_k=1).
492
502
  num_return_sequences:
@@ -498,6 +508,8 @@ class WisentModel:
498
508
  steering_plan:
499
509
  optional SteeringPlan to use for this call only (overrides internal plan).
500
510
  If None, uses the internal plan.
511
+ enable_thinking:
512
+ If False, disable thinking/reasoning mode (prevents <think> tags for supported models like Qwen).
501
513
  **gen_kwargs:
502
514
  additional kwargs passed to 'model.generate()'.
503
515
 
@@ -537,7 +549,7 @@ class WisentModel:
537
549
  if use_steering:
538
550
  self.apply_steering(steering_plan)
539
551
 
540
- batch = self._batch_encode(inputs, add_generation_prompt=True)
552
+ batch = self._batch_encode(inputs, add_generation_prompt=True, enable_thinking=enable_thinking)
541
553
 
542
554
  out = self.hf_model.generate(
543
555
  **batch,
@@ -609,6 +621,7 @@ class WisentModel:
609
621
  steering_plan: SteeringPlan | None = None,
610
622
  skip_prompt: bool = True,
611
623
  skip_special_tokens: bool = True,
624
+ enable_thinking: bool = True,
612
625
  **gen_kwargs: Any,
613
626
  ) -> Iterable[str]:
614
627
  """
@@ -635,6 +648,8 @@ class WisentModel:
635
648
  if True, the yielded text excludes the input prompt.
636
649
  skip_special_tokens:
637
650
  if True, special tokens are removed from the yielded text.
651
+ enable_thinking:
652
+ If False, disable thinking/reasoning mode (prevents <think> tags for supported models like Qwen).
638
653
  **gen_kwargs:
639
654
  additional kwargs passed to 'model.generate()'.
640
655
 
@@ -649,7 +664,7 @@ class WisentModel:
649
664
  if use_steering:
650
665
  self.apply_steering(steering_plan)
651
666
 
652
- batch = self._batch_encode(inputs, add_generation_prompt=True)
667
+ batch = self._batch_encode(inputs, add_generation_prompt=True, enable_thinking=enable_thinking)
653
668
 
654
669
  streamer = TextIteratorStreamer(
655
670
  self.tokenizer,
@@ -165,7 +165,8 @@ class MultiSteering:
165
165
  prompt: str,
166
166
  max_new_tokens: int = 100,
167
167
  temperature: float = 0.7,
168
- top_p: float = 0.9
168
+ top_p: float = 0.9,
169
+ enable_thinking: bool = True
169
170
  ) -> Iterable[str]:
170
171
  """Apply the combined steering vector to generate text with streaming.
171
172
 
@@ -175,6 +176,7 @@ class MultiSteering:
175
176
  max_new_tokens: Maximum tokens to generate
176
177
  temperature: Sampling temperature
177
178
  top_p: Top-p sampling parameter
179
+ enable_thinking: If False, disable thinking/reasoning mode (prevents <think> tags for supported models like Qwen)
178
180
 
179
181
  Yields:
180
182
  Generated text chunks
@@ -213,7 +215,8 @@ class MultiSteering:
213
215
  use_steering=True,
214
216
  steering_plan=steering_plan,
215
217
  skip_prompt=True,
216
- skip_special_tokens=True
218
+ skip_special_tokens=True,
219
+ enable_thinking=enable_thinking
217
220
  )
218
221
 
219
222
  except Exception as e:
@@ -227,7 +230,8 @@ class MultiSteering:
227
230
  prompt: str,
228
231
  max_new_tokens: int = 100,
229
232
  temperature: float = 0.7,
230
- top_p: float = 0.9
233
+ top_p: float = 0.9,
234
+ enable_thinking: bool = True
231
235
  ) -> str:
232
236
  """Apply the combined steering vector to generate text (non-streaming).
233
237
 
@@ -237,6 +241,7 @@ class MultiSteering:
237
241
  max_new_tokens: Maximum tokens to generate
238
242
  temperature: Sampling temperature
239
243
  top_p: Top-p sampling parameter
244
+ enable_thinking: If False, disable thinking/reasoning mode (prevents <think> tags for supported models like Qwen)
240
245
 
241
246
  Returns:
242
247
  Generated text
@@ -273,7 +278,8 @@ class MultiSteering:
273
278
  temperature=temperature,
274
279
  top_p=top_p,
275
280
  use_steering=True,
276
- steering_plan=steering_plan
281
+ steering_plan=steering_plan,
282
+ enable_thinking=enable_thinking
277
283
  )
278
284
 
279
285
  return outputs[0] if outputs else ""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: wisent
3
- Version: 0.5.8
3
+ Version: 0.5.9
4
4
  Summary: Monitor and guard against harmful content in language models
5
5
  Home-page: https://github.com/yourusername/wisent-activation-guardrails
6
6
  Author: Wisent Team
@@ -1,4 +1,4 @@
1
- wisent/__init__.py,sha256=bDuZ37zImJZsQ3a4pW87q4kg-zsIBrUFAv1aumIf_7k,22
1
+ wisent/__init__.py,sha256=JXLyhF5WmLgRZBfWGz9zWe2g5ISKSLpn2jp8yLaC-s4,22
2
2
  wisent/benchmarks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  wisent/benchmarks/coding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  wisent/benchmarks/coding/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -64,7 +64,7 @@ wisent/core/managed_cached_benchmarks.py,sha256=JbvpZ1fgSuQQhyQVKEvqrQZRHGqfnjo9
64
64
  wisent/core/mixed_benchmark_sampler.py,sha256=tKQCHUXVuYeCyx4VZt8O1hGyB-TOY_SQ_SYi8cyApII,13585
65
65
  wisent/core/model_config_manager.py,sha256=rQAdSmk3GFlZXyHp3fSV1bORxiZWhmzIz1uo3H4JtkA,12009
66
66
  wisent/core/model_persistence.py,sha256=6_vc1Ndujd4v0O68giINSTvYhmb7-AiacWwAbqLOrls,10636
67
- wisent/core/multi_steering.py,sha256=o4YzkEMeOk8rWXC4JPa3gwVZi4_CjjwyLGXlkWxpPOw,11869
67
+ wisent/core/multi_steering.py,sha256=EMaKn4dZPlAsFupEUQZlxTZGJ0-ofpLcTCKQk8HaZL8,12295
68
68
  wisent/core/parser.py,sha256=_YDeSuQMx0zNknz9rX3Ls1YPT1x5eohoY8rfjeoqxV8,69091
69
69
  wisent/core/representation.py,sha256=hBl_N9qbr5Gsa7GCQ0nMWRm82RqYEfhd9cyf0PPH5LY,195
70
70
  wisent/core/sample_size_optimizer.py,sha256=6wegGXZpdGpiR4R0YJ1D2JqLr6yinMndEx2gB5FL80s,23666
@@ -134,7 +134,7 @@ wisent/core/evaluators/oracles/interactive.py,sha256=f3v2_N17fKzGyeOxONRJbrbn8i5
134
134
  wisent/core/evaluators/oracles/nlp_evaluator.py,sha256=KxbnF-I2IFbBQpoYyjQKGbYh4NErsEuhTCRYX_Tob8o,18220
135
135
  wisent/core/evaluators/oracles/user_specified.py,sha256=V1dKrNj3Oq7UC_I7DT0WGnktP7R_DSW6UAwDdrA8SnE,2360
136
136
  wisent/core/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
137
- wisent/core/models/wisent_model.py,sha256=yJBcz3GjR7O-ySTV2vvOsOrL9xDvXsG0W9Gr0HR_0sc,28729
137
+ wisent/core/models/wisent_model.py,sha256=_NDi4oHZnwtUbusqPw8vw1_YYifbsRnD_g25M2uCf08,29772
138
138
  wisent/core/models/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
139
139
  wisent/core/models/core/atoms.py,sha256=_Bpz0Sfiq6_VswThIltUwNGj_ukl5MhAg8RrgMKwEBM,15756
140
140
  wisent/core/optuna/__init__.py,sha256=sTfwRnrRyKrCNVsF_qCjBDFEZC0ZmUZ7m6IE0iHfTVs,1914
@@ -213,8 +213,8 @@ wisent/synthetic/generators/diversities/core/__init__.py,sha256=47DEQpj8HBSa-_TI
213
213
  wisent/synthetic/generators/diversities/core/core.py,sha256=TjSj5T7NE5kRH-ABcFqb1Hz_j3Z6F_TcV-95uHD5Xw8,2201
214
214
  wisent/synthetic/generators/diversities/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
215
215
  wisent/synthetic/generators/diversities/methods/fast_diversity.py,sha256=Z2UzTbzyJFM_ToxCoXM_LQQQ1Jc6BZknrbpikTG1MRw,8522
216
- wisent-0.5.8.dist-info/licenses/LICENSE,sha256=wy0iaw8b2tyqZAfKHib3lP3PJ9o88FDCg92oUHh3sDQ,1073
217
- wisent-0.5.8.dist-info/METADATA,sha256=u406l73QL6jE0Jw5GZW3O4RKZkgy-NTs_DOgLIAoGDM,2424
218
- wisent-0.5.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
219
- wisent-0.5.8.dist-info/top_level.txt,sha256=2Ts9Iyldnb3auIN2HBBaHPknRy7nSRDm2f6RGzYgr8A,7
220
- wisent-0.5.8.dist-info/RECORD,,
216
+ wisent-0.5.9.dist-info/licenses/LICENSE,sha256=wy0iaw8b2tyqZAfKHib3lP3PJ9o88FDCg92oUHh3sDQ,1073
217
+ wisent-0.5.9.dist-info/METADATA,sha256=lAomuCOIdAio3ai9_IunQG9hytR1WWJ3UjtiScFw9kc,2424
218
+ wisent-0.5.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
219
+ wisent-0.5.9.dist-info/top_level.txt,sha256=2Ts9Iyldnb3auIN2HBBaHPknRy7nSRDm2f6RGzYgr8A,7
220
+ wisent-0.5.9.dist-info/RECORD,,
File without changes