speedy-utils 1.1.23__py3-none-any.whl → 1.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llm_utils/__init__.py CHANGED
@@ -1,19 +1,22 @@
1
1
  from llm_utils.lm.openai_memoize import MOpenAI
2
- from llm_utils.lm import LLMTask, AsyncLM, AsyncLLMTask, LLMJudgeBase, ChainOfThought, TranslationEvaluatorJudge, Signature, InputField, OutputField, Input, Output
2
+ from llm_utils.lm import LLM, AsyncLM, AsyncLLMTask, LLMSignature, Signature, InputField, OutputField, Input, Output
3
3
  from llm_utils.vector_cache import VectorCache
4
4
  from llm_utils.lm.lm_base import get_model_name
5
5
  from llm_utils.lm.base_prompt_builder import BasePromptBuilder
6
6
 
7
- LLM = LLMTask
7
+ LLM_TASK = LLM
8
+
8
9
 
9
10
  # Convenience functions for killing VLLM servers
10
11
  def kill_all_vllm() -> int:
11
12
  """Kill all tracked VLLM server processes. Returns number of processes killed."""
12
- return LLMTask.kill_all_vllm()
13
+ return LLM.kill_all_vllm()
14
+
13
15
 
14
16
  def kill_vllm_on_port(port: int) -> bool:
15
17
  """Kill VLLM server on specific port. Returns True if server was killed."""
16
- return LLMTask.kill_vllm_on_port(port)
18
+ return LLM.kill_vllm_on_port(port)
19
+
17
20
 
18
21
  from llm_utils.chat_format import (
19
22
  build_chatml_input,
@@ -21,6 +24,7 @@ from llm_utils.chat_format import (
21
24
  display_conversations,
22
25
  format_msgs,
23
26
  get_conversation_one_turn,
27
+ show_chat_v2,
24
28
  show_chat,
25
29
  show_string_diff,
26
30
  transform_messages,
@@ -37,9 +41,10 @@ __all__ = [
37
41
  "build_chatml_input",
38
42
  "format_msgs",
39
43
  "display_chat_messages_as_html",
44
+ "show_chat_v2",
40
45
  "AsyncLM",
41
46
  "AsyncLLMTask",
42
- "LLMTask",
47
+ "LLM",
43
48
  "MOpenAI",
44
49
  "get_model_name",
45
50
  "VectorCache",
@@ -47,12 +52,11 @@ __all__ = [
47
52
  "LLM",
48
53
  "kill_all_vllm",
49
54
  "kill_vllm_on_port",
50
- "LLMJudgeBase",
51
- "ChainOfThought",
52
- "TranslationEvaluatorJudge",
55
+ "LLMSignature",
53
56
  "Signature",
54
57
  "InputField",
55
58
  "OutputField",
56
59
  "Input",
57
60
  "Output",
61
+ "LLM_TASK", # Alias for LLM class
58
62
  ]
@@ -11,6 +11,7 @@ from .display import (
11
11
  show_string_diff,
12
12
  display_conversations,
13
13
  display_chat_messages_as_html,
14
+ show_chat_v2,
14
15
  )
15
16
  from .utils import (
16
17
  build_chatml_input,
@@ -31,4 +32,5 @@ __all__ = [
31
32
  "show_string_diff",
32
33
  "display_conversations",
33
34
  "display_chat_messages_as_html",
35
+ "show_chat_v2",
34
36
  ]
@@ -25,49 +25,49 @@ def _preprocess_as_markdown(content: str) -> str:
25
25
  Preprocess content as markdown with proper formatting.
26
26
  """
27
27
  # Basic markdown preprocessing - convert common patterns
28
- lines = content.split('\n')
28
+ lines = content.split("\n")
29
29
  processed_lines = []
30
-
30
+
31
31
  for line in lines:
32
32
  # Convert **bold** to span with bold styling
33
- while '**' in line:
34
- first_pos = line.find('**')
33
+ while "**" in line:
34
+ first_pos = line.find("**")
35
35
  if first_pos != -1:
36
- second_pos = line.find('**', first_pos + 2)
36
+ second_pos = line.find("**", first_pos + 2)
37
37
  if second_pos != -1:
38
38
  before = line[:first_pos]
39
- bold_text = line[first_pos + 2:second_pos]
40
- after = line[second_pos + 2:]
39
+ bold_text = line[first_pos + 2 : second_pos]
40
+ after = line[second_pos + 2 :]
41
41
  line = f'{before}<span style="font-weight: bold;">{bold_text}</span>{after}'
42
42
  else:
43
43
  break
44
44
  else:
45
45
  break
46
-
46
+
47
47
  # Convert *italic* to span with italic styling
48
- while '*' in line and line.count('*') >= 2:
49
- first_pos = line.find('*')
48
+ while "*" in line and line.count("*") >= 2:
49
+ first_pos = line.find("*")
50
50
  if first_pos != -1:
51
- second_pos = line.find('*', first_pos + 1)
51
+ second_pos = line.find("*", first_pos + 1)
52
52
  if second_pos != -1:
53
53
  before = line[:first_pos]
54
- italic_text = line[first_pos + 1:second_pos]
55
- after = line[second_pos + 1:]
54
+ italic_text = line[first_pos + 1 : second_pos]
55
+ after = line[second_pos + 1 :]
56
56
  line = f'{before}<span style="font-style: italic;">{italic_text}</span>{after}'
57
57
  else:
58
58
  break
59
59
  else:
60
60
  break
61
-
61
+
62
62
  # Convert # headers to bold headers
63
- if line.strip().startswith('#'):
64
- level = len(line) - len(line.lstrip('#'))
65
- header_text = line.lstrip('# ').strip()
63
+ if line.strip().startswith("#"):
64
+ level = len(line) - len(line.lstrip("#"))
65
+ header_text = line.lstrip("# ").strip()
66
66
  line = f'<span style="font-weight: bold; font-size: 1.{min(4, level)}em;">{header_text}</span>'
67
-
67
+
68
68
  processed_lines.append(line)
69
-
70
- return '\n'.join(processed_lines)
69
+
70
+ return "\n".join(processed_lines)
71
71
 
72
72
 
73
73
  def show_chat(
@@ -80,7 +80,7 @@ def show_chat(
80
80
  ) -> Optional[str]:
81
81
  """
82
82
  Display chat messages as HTML.
83
-
83
+
84
84
  Args:
85
85
  msgs: Chat messages in various formats
86
86
  return_html: If True, return HTML string instead of displaying
@@ -148,13 +148,13 @@ def show_chat(
148
148
  name = tool_call["name"]
149
149
  args = tool_call["arguments"]
150
150
  content += f"Tool: {name}\nArguments: {args}"
151
-
151
+
152
152
  # Preprocess content based on format options
153
153
  if as_json:
154
154
  content = _preprocess_as_json(content)
155
155
  elif as_markdown:
156
156
  content = _preprocess_as_markdown(content)
157
-
157
+
158
158
  # Handle HTML escaping differently for markdown vs regular content
159
159
  if as_markdown:
160
160
  # For markdown, preserve HTML tags but escape other characters carefully
@@ -168,10 +168,7 @@ def show_chat(
168
168
  content = content.replace("\t", "&nbsp;&nbsp;&nbsp;&nbsp;")
169
169
  content = content.replace(" ", "&nbsp;&nbsp;")
170
170
  content = (
171
- content.replace("<br>", "TEMP_BR")
172
- .replace("<", "&lt;")
173
- .replace(">", "&gt;")
174
- .replace("TEMP_BR", "<br>")
171
+ content.replace("<br>", "TEMP_BR").replace("<", "&lt;").replace(">", "&gt;").replace("TEMP_BR", "<br>")
175
172
  )
176
173
  if role in color_scheme:
177
174
  background_color = color_scheme[role]["background"]
@@ -179,11 +176,11 @@ def show_chat(
179
176
  else:
180
177
  background_color = color_scheme["default"]["background"]
181
178
  text_color = color_scheme["default"]["text"]
182
-
179
+
183
180
  # Choose container based on whether we have markdown formatting
184
181
  content_container = "div" if as_markdown else "pre"
185
182
  container_style = 'style="white-space: pre-wrap;"' if as_markdown else ""
186
-
183
+
187
184
  if role == "system":
188
185
  conversation_html += (
189
186
  f'<div style="background-color: {background_color}; color: {text_color}; padding: 10px; margin-bottom: 10px;">'
@@ -264,9 +261,7 @@ def get_conversation_one_turn(
264
261
  if assistant_msg is not None:
265
262
  messages.append({"role": "assistant", "content": assistant_msg})
266
263
  if assistant_prefix is not None:
267
- assert return_format != "chatml", (
268
- 'Change return_format to "text" if you want to use assistant_prefix'
269
- )
264
+ assert return_format != "chatml", 'Change return_format to "text" if you want to use assistant_prefix'
270
265
  assert messages[-1]["role"] == "user"
271
266
  from .transform import transform_messages
272
267
 
@@ -291,21 +286,13 @@ def highlight_diff_chars(text1: str, text2: str) -> str:
291
286
  html.append(text1[i1:i2])
292
287
  elif tag == "replace":
293
288
  if i1 != i2:
294
- html.append(
295
- f'<span style="background-color:#ffd6d6; color:#b20000;">{text1[i1:i2]}</span>'
296
- )
289
+ html.append(f'<span style="background-color:#ffd6d6; color:#b20000;">{text1[i1:i2]}</span>')
297
290
  if j1 != j2:
298
- html.append(
299
- f'<span style="background-color:#d6ffd6; color:#006600;">{text2[j1:j2]}</span>'
300
- )
291
+ html.append(f'<span style="background-color:#d6ffd6; color:#006600;">{text2[j1:j2]}</span>')
301
292
  elif tag == "delete":
302
- html.append(
303
- f'<span style="background-color:#ffd6d6; color:#b20000;">{text1[i1:i2]}</span>'
304
- )
293
+ html.append(f'<span style="background-color:#ffd6d6; color:#b20000;">{text1[i1:i2]}</span>')
305
294
  elif tag == "insert":
306
- html.append(
307
- f'<span style="background-color:#d6ffd6; color:#006600;">{text2[j1:j2]}</span>'
308
- )
295
+ html.append(f'<span style="background-color:#d6ffd6; color:#006600;">{text2[j1:j2]}</span>')
309
296
  return "".join(html)
310
297
 
311
298
 
@@ -317,6 +304,90 @@ def show_string_diff(old: str, new: str) -> None:
317
304
  display(HTML(html1))
318
305
 
319
306
 
307
+ def show_chat_v2(messages: list[dict[str, str]]):
308
+ """
309
+ Print only content of messages in different colors:
310
+ system -> red, user -> orange, assistant -> green.
311
+ Automatically detects notebook environment and uses appropriate display.
312
+ """
313
+ # Detect if running in a notebook environment
314
+ try:
315
+ from IPython.core.getipython import get_ipython
316
+
317
+ ipython = get_ipython()
318
+ is_notebook = ipython is not None and "IPKernelApp" in ipython.config
319
+ except (ImportError, AttributeError):
320
+ is_notebook = False
321
+
322
+ if is_notebook:
323
+ # Use HTML display in notebook
324
+ from IPython.display import display, HTML
325
+
326
+ role_colors = {
327
+ "system": "red",
328
+ "user": "darkorange",
329
+ "assistant": "green",
330
+ }
331
+
332
+ role_labels = {
333
+ "system": "System Instruction:",
334
+ "user": "User:",
335
+ "assistant": "Assistant:",
336
+ }
337
+
338
+ html = "<div style='font-family:monospace; line-height:1.6em; white-space:pre-wrap;'>"
339
+ for i, msg in enumerate(messages):
340
+ role = msg.get("role", "unknown").lower()
341
+ content = msg.get("content", "")
342
+ # Escape HTML characters
343
+ content = (
344
+ content.replace("&", "&amp;")
345
+ .replace("<", "&lt;")
346
+ .replace(">", "&gt;")
347
+ .replace("\n", "<br>")
348
+ .replace("\t", "&nbsp;&nbsp;&nbsp;&nbsp;")
349
+ .replace(" ", "&nbsp;&nbsp;")
350
+ )
351
+ color = role_colors.get(role, "black")
352
+ label = role_labels.get(role, f"{role.capitalize()}:")
353
+ html += f"<div style='color:{color}'><strong>{label}</strong><br>{content}</div>"
354
+ # Add separator except after last message
355
+ if i < len(messages) - 1:
356
+ html += (
357
+ "<div style='color:#888; margin:0.5em 0;'>───────────────────────────────────────────────────</div>"
358
+ )
359
+ html += "</div>"
360
+
361
+ display(HTML(html))
362
+ else:
363
+ # Use normal terminal printing with ANSI colors
364
+ role_colors = {
365
+ "system": "\033[91m", # Red
366
+ "user": "\033[38;5;208m", # Orange
367
+ "assistant": "\033[92m", # Green
368
+ }
369
+ reset = "\033[0m"
370
+ separator_color = "\033[90m" # Gray
371
+ bold = "\033[1m"
372
+
373
+ role_labels = {
374
+ "system": "System Instruction:",
375
+ "user": "User:",
376
+ "assistant": "Assistant:",
377
+ }
378
+
379
+ for i, msg in enumerate(messages):
380
+ role = msg.get("role", "unknown").lower()
381
+ content = msg.get("content", "")
382
+ color = role_colors.get(role, "")
383
+ label = role_labels.get(role, f"{role.capitalize()}:")
384
+ print(f"{color}{bold}{label}{reset}")
385
+ print(f"{color}{content}{reset}")
386
+ # Add separator except after last message
387
+ if i < len(messages) - 1:
388
+ print(f"{separator_color}─────────────────────────────────────────────────────────{reset}")
389
+
390
+
320
391
  def display_conversations(data1: Any, data2: Any, theme: str = "light") -> None:
321
392
  """
322
393
  Display two conversations side by side.
llm_utils/lm/__init__.py CHANGED
@@ -1,23 +1,31 @@
1
1
  from .async_lm.async_lm import AsyncLM
2
2
  from .async_lm.async_llm_task import AsyncLLMTask
3
3
  from .lm_base import LMBase, get_model_name
4
- from .llm_task import LLMTask
4
+ from .llm import LLM
5
5
  from .base_prompt_builder import BasePromptBuilder
6
- from .llm_as_a_judge import LLMJudgeBase, ChainOfThought, TranslationEvaluatorJudge
6
+ from .llm_signature import LLMSignature
7
7
  from .signature import Signature, InputField, OutputField, Input, Output
8
+ from .mixins import (
9
+ TemperatureRangeMixin,
10
+ TwoStepPydanticMixin,
11
+ VLLMMixin,
12
+ ModelUtilsMixin,
13
+ )
8
14
 
9
15
  __all__ = [
10
16
  "LMBase",
11
- "LLMTask",
17
+ "LLM",
12
18
  "AsyncLM",
13
19
  "AsyncLLMTask",
14
20
  "BasePromptBuilder",
15
- "LLMJudgeBase",
16
- "ChainOfThought",
17
- "TranslationEvaluatorJudge",
21
+ "LLMSignature",
18
22
  "Signature",
19
23
  "InputField",
20
24
  "OutputField",
21
25
  "Input",
22
26
  "Output",
27
+ "TemperatureRangeMixin",
28
+ "TwoStepPydanticMixin",
29
+ "VLLMMixin",
30
+ "ModelUtilsMixin",
23
31
  ]