speedy-utils 1.1.21__py3-none-any.whl → 1.1.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_utils/__init__.py +22 -3
- llm_utils/lm/__init__.py +10 -0
- llm_utils/lm/llm_as_a_judge.py +390 -0
- llm_utils/lm/llm_task.py +172 -251
- llm_utils/lm/signature.py +282 -0
- llm_utils/lm/utils.py +332 -110
- speedy_utils/multi_worker/process.py +125 -25
- speedy_utils/multi_worker/thread.py +341 -226
- {speedy_utils-1.1.21.dist-info → speedy_utils-1.1.23.dist-info}/METADATA +1 -1
- {speedy_utils-1.1.21.dist-info → speedy_utils-1.1.23.dist-info}/RECORD +12 -11
- llm_utils/lm/lm.py +0 -207
- {speedy_utils-1.1.21.dist-info → speedy_utils-1.1.23.dist-info}/WHEEL +0 -0
- {speedy_utils-1.1.21.dist-info → speedy_utils-1.1.23.dist-info}/entry_points.txt +0 -0
|
@@ -1,16 +1,17 @@
|
|
|
1
|
-
llm_utils/__init__.py,sha256=
|
|
1
|
+
llm_utils/__init__.py,sha256=gUmdXk6DF7dTKgYr23LdnkXLNT6x8bUZoacKQJ9pi8I,1625
|
|
2
2
|
llm_utils/group_messages.py,sha256=Oe2tlhg-zRodG1-hodYebddrR77j9UdE05LzJw0EvYI,3622
|
|
3
3
|
llm_utils/chat_format/__init__.py,sha256=8dBIUqFJvkgQYedxBtcyxt-4tt8JxAKVap2JlTXmgaM,737
|
|
4
4
|
llm_utils/chat_format/display.py,sha256=3jKDm4OTrvytK1qBhSOjRLltUIObHsYFdBLgm8SVDE8,14159
|
|
5
5
|
llm_utils/chat_format/transform.py,sha256=eU0c3PdAHCNLuGP1UqPwln0B34Lv3bt_uV9v9BrlCN4,5402
|
|
6
6
|
llm_utils/chat_format/utils.py,sha256=xTxN4HrLHcRO2PfCTR43nH1M5zCa7v0kTTdzAcGkZg0,1229
|
|
7
|
-
llm_utils/lm/__init__.py,sha256=
|
|
7
|
+
llm_utils/lm/__init__.py,sha256=znjUTzke2tmCRWkR46sbOQPcRNe5oEbLo5zqg6Vxud0,632
|
|
8
8
|
llm_utils/lm/base_prompt_builder.py,sha256=OLqyxbA8QeYIVFzB9EqxUiE_P2p4_MD_Lq4WSwxFtKU,12136
|
|
9
|
-
llm_utils/lm/
|
|
10
|
-
llm_utils/lm/
|
|
9
|
+
llm_utils/lm/llm_as_a_judge.py,sha256=LwqzlIMSBbpv6A2Qq8-fhVO2CGO7_BtU6j0PXoIWFOA,14022
|
|
10
|
+
llm_utils/lm/llm_task.py,sha256=gawOtoP-LOH-8iaUI_2-TXvhFAVuj2fWGxeQZ1xytAo,25288
|
|
11
11
|
llm_utils/lm/lm_base.py,sha256=pqbHZOdR7yUMpvwt8uBG1dZnt76SY_Wk8BkXQQ-mpWs,9557
|
|
12
12
|
llm_utils/lm/openai_memoize.py,sha256=KToCcB_rhyrULxolnwMfQgl5GNrAeykePxuLS4hBjtc,3442
|
|
13
|
-
llm_utils/lm/
|
|
13
|
+
llm_utils/lm/signature.py,sha256=s3Zjxjs6AU97jbz1LZ2BTGw-F9aFCF3G346gSMtyEpE,10370
|
|
14
|
+
llm_utils/lm/utils.py,sha256=25oOznZhbBWfen1-X1PXQfO09kQZgP5V9CDuqLrf_ZU,12440
|
|
14
15
|
llm_utils/lm/async_lm/__init__.py,sha256=PUBbCuf5u6-0GBUu-2PI6YAguzsyXj-LPkU6vccqT6E,121
|
|
15
16
|
llm_utils/lm/async_lm/_utils.py,sha256=P1-pUDf_0pDmo8WTIi43t5ARlyGA1RIJfpAhz-gfA5g,6105
|
|
16
17
|
llm_utils/lm/async_lm/async_llm_task.py,sha256=-BVOk18ZD8eC2obTLgiPq39f2PP3cji17Ku-Gb7c7Xo,18683
|
|
@@ -39,12 +40,12 @@ speedy_utils/common/utils_io.py,sha256=-RkQjYGa3zVqpgVInsdp8dbS5oLwdJdUsRz1XIUSJ
|
|
|
39
40
|
speedy_utils/common/utils_misc.py,sha256=cdEuBBpiB1xpuzj0UBDHDuTIerqsMIw37ENq6EXliOw,1795
|
|
40
41
|
speedy_utils/common/utils_print.py,sha256=syRrnSFtguxrV-elx6DDVcSGu4Qy7D_xVNZhPwbUY4A,4864
|
|
41
42
|
speedy_utils/multi_worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
|
-
speedy_utils/multi_worker/process.py,sha256=
|
|
43
|
-
speedy_utils/multi_worker/thread.py,sha256=
|
|
43
|
+
speedy_utils/multi_worker/process.py,sha256=RGGGnbZXCbEbdmxFVmnNfyccClAlflzRPE0d1C3CeeE,11385
|
|
44
|
+
speedy_utils/multi_worker/thread.py,sha256=UniMl8mw-Xw1y3aU9bKGMtBSlQj05QhFouWb5aVkATI,21679
|
|
44
45
|
speedy_utils/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
45
46
|
speedy_utils/scripts/mpython.py,sha256=IvywP7Y0_V6tWfMP-4MjPvN5_KfxWF21xaLJsCIayCk,3821
|
|
46
47
|
speedy_utils/scripts/openapi_client_codegen.py,sha256=f2125S_q0PILgH5dyzoKRz7pIvNEjCkzpi4Q4pPFRZE,9683
|
|
47
|
-
speedy_utils-1.1.
|
|
48
|
-
speedy_utils-1.1.
|
|
49
|
-
speedy_utils-1.1.
|
|
50
|
-
speedy_utils-1.1.
|
|
48
|
+
speedy_utils-1.1.23.dist-info/METADATA,sha256=xmQbqlIBS8fHw9ZZFG2kLftUiZX37cCt0hMmFc7putk,8028
|
|
49
|
+
speedy_utils-1.1.23.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
50
|
+
speedy_utils-1.1.23.dist-info/entry_points.txt,sha256=1rrFMfqvaMUE9hvwGiD6vnVh98kmgy0TARBj-v0Lfhs,244
|
|
51
|
+
speedy_utils-1.1.23.dist-info/RECORD,,
|
llm_utils/lm/lm.py
DELETED
|
@@ -1,207 +0,0 @@
|
|
|
1
|
-
# # from ._utils import *
|
|
2
|
-
# from typing import (
|
|
3
|
-
# Any,
|
|
4
|
-
# List,
|
|
5
|
-
# Literal,
|
|
6
|
-
# Optional,
|
|
7
|
-
# Type,
|
|
8
|
-
# Union,
|
|
9
|
-
# cast,
|
|
10
|
-
# )
|
|
11
|
-
|
|
12
|
-
# from loguru import logger
|
|
13
|
-
# from openai import AuthenticationError, BadRequestError, OpenAI, RateLimitError
|
|
14
|
-
# from pydantic import BaseModel
|
|
15
|
-
# from speedy_utils import jloads
|
|
16
|
-
|
|
17
|
-
# # from llm_utils.lm.async_lm.async_llm_task import OutputModelType
|
|
18
|
-
# from llm_utils.lm.lm_base import LMBase
|
|
19
|
-
|
|
20
|
-
# from .async_lm._utils import (
|
|
21
|
-
# LegacyMsgs,
|
|
22
|
-
# Messages,
|
|
23
|
-
# OutputModelType,
|
|
24
|
-
# ParsedOutput,
|
|
25
|
-
# RawMsgs,
|
|
26
|
-
# )
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
# class LM(LMBase):
|
|
30
|
-
# """Unified **sync** language‑model wrapper with optional JSON parsing."""
|
|
31
|
-
|
|
32
|
-
# def __init__(
|
|
33
|
-
# self,
|
|
34
|
-
# *,
|
|
35
|
-
# model: Optional[str] = None,
|
|
36
|
-
# response_model: Optional[type[BaseModel]] = None,
|
|
37
|
-
# temperature: float = 0.0,
|
|
38
|
-
# max_tokens: int = 2_000,
|
|
39
|
-
# base_url: Optional[str] = None,
|
|
40
|
-
# api_key: Optional[str] = None,
|
|
41
|
-
# cache: bool = True,
|
|
42
|
-
# ports: Optional[List[int]] = None,
|
|
43
|
-
# top_p: float = 1.0,
|
|
44
|
-
# presence_penalty: float = 0.0,
|
|
45
|
-
# top_k: int = 1,
|
|
46
|
-
# repetition_penalty: float = 1.0,
|
|
47
|
-
# frequency_penalty: Optional[float] = None,
|
|
48
|
-
# ) -> None:
|
|
49
|
-
|
|
50
|
-
# if model is None:
|
|
51
|
-
# if base_url is None:
|
|
52
|
-
# raise ValueError("Either model or base_url must be provided")
|
|
53
|
-
# models = OpenAI(base_url=base_url, api_key=api_key or 'abc').models.list().data
|
|
54
|
-
# assert len(models) == 1, f"Found {len(models)} models, please specify one."
|
|
55
|
-
# model = models[0].id
|
|
56
|
-
# print(f"Using model: {model}")
|
|
57
|
-
|
|
58
|
-
# super().__init__(
|
|
59
|
-
# ports=ports,
|
|
60
|
-
# base_url=base_url,
|
|
61
|
-
# cache=cache,
|
|
62
|
-
# api_key=api_key,
|
|
63
|
-
# )
|
|
64
|
-
|
|
65
|
-
# # Model behavior options
|
|
66
|
-
# self.response_model = response_model
|
|
67
|
-
|
|
68
|
-
# # Store all model-related parameters in model_kwargs
|
|
69
|
-
# self.model_kwargs = dict(
|
|
70
|
-
# model=model,
|
|
71
|
-
# temperature=temperature,
|
|
72
|
-
# max_tokens=max_tokens,
|
|
73
|
-
# top_p=top_p,
|
|
74
|
-
# presence_penalty=presence_penalty,
|
|
75
|
-
# )
|
|
76
|
-
# self.extra_body = dict(
|
|
77
|
-
# top_k=top_k,
|
|
78
|
-
# repetition_penalty=repetition_penalty,
|
|
79
|
-
# frequency_penalty=frequency_penalty,
|
|
80
|
-
# )
|
|
81
|
-
|
|
82
|
-
# def _unified_client_call(
|
|
83
|
-
# self,
|
|
84
|
-
# messages: RawMsgs,
|
|
85
|
-
# extra_body: Optional[dict] = None,
|
|
86
|
-
# max_tokens: Optional[int] = None,
|
|
87
|
-
# ) -> dict:
|
|
88
|
-
# """Unified method for all client interactions (caching handled by MOpenAI)."""
|
|
89
|
-
# converted_messages: Messages = (
|
|
90
|
-
# self._convert_messages(cast(LegacyMsgs, messages))
|
|
91
|
-
# if messages and isinstance(messages[0], dict)
|
|
92
|
-
# else cast(Messages, messages)
|
|
93
|
-
# )
|
|
94
|
-
# if max_tokens is not None:
|
|
95
|
-
# self.model_kwargs["max_tokens"] = max_tokens
|
|
96
|
-
|
|
97
|
-
# try:
|
|
98
|
-
# # Get completion from API (caching handled by MOpenAI)
|
|
99
|
-
# call_kwargs = {
|
|
100
|
-
# "messages": converted_messages,
|
|
101
|
-
# **self.model_kwargs,
|
|
102
|
-
# }
|
|
103
|
-
# if extra_body:
|
|
104
|
-
# call_kwargs["extra_body"] = extra_body
|
|
105
|
-
|
|
106
|
-
# completion = self.client.chat.completions.create(**call_kwargs)
|
|
107
|
-
|
|
108
|
-
# if hasattr(completion, "model_dump"):
|
|
109
|
-
# completion = completion.model_dump()
|
|
110
|
-
|
|
111
|
-
# except (AuthenticationError, RateLimitError, BadRequestError) as exc:
|
|
112
|
-
# error_msg = f"OpenAI API error ({type(exc).__name__}): {exc}"
|
|
113
|
-
# logger.error(error_msg)
|
|
114
|
-
# raise
|
|
115
|
-
|
|
116
|
-
# return completion
|
|
117
|
-
|
|
118
|
-
# def __call__(
|
|
119
|
-
# self,
|
|
120
|
-
# prompt: Optional[str] = None,
|
|
121
|
-
# messages: Optional[RawMsgs] = None,
|
|
122
|
-
# max_tokens: Optional[int] = None,
|
|
123
|
-
# ): # -> tuple[Any | dict[Any, Any], list[ChatCompletionMessagePar...:# -> tuple[Any | dict[Any, Any], list[ChatCompletionMessagePar...:
|
|
124
|
-
# """Unified sync call for language model, returns (assistant_message.model_dump(), messages)."""
|
|
125
|
-
# if (prompt is None) == (messages is None):
|
|
126
|
-
# raise ValueError("Provide *either* `prompt` or `messages` (but not both).")
|
|
127
|
-
|
|
128
|
-
# if prompt is not None:
|
|
129
|
-
# messages = [{"role": "user", "content": prompt}]
|
|
130
|
-
|
|
131
|
-
# assert messages is not None
|
|
132
|
-
|
|
133
|
-
# openai_msgs: Messages = (
|
|
134
|
-
# self._convert_messages(cast(LegacyMsgs, messages))
|
|
135
|
-
# if isinstance(messages[0], dict)
|
|
136
|
-
# else cast(Messages, messages)
|
|
137
|
-
# )
|
|
138
|
-
|
|
139
|
-
# assert self.model_kwargs["model"] is not None, (
|
|
140
|
-
# "Model must be set before making a call."
|
|
141
|
-
# )
|
|
142
|
-
|
|
143
|
-
# # Use unified client call
|
|
144
|
-
# raw_response = self._unified_client_call(
|
|
145
|
-
# list(openai_msgs), max_tokens=max_tokens
|
|
146
|
-
# )
|
|
147
|
-
|
|
148
|
-
# if hasattr(raw_response, "model_dump"):
|
|
149
|
-
# raw_response = raw_response.model_dump() # type: ignore
|
|
150
|
-
|
|
151
|
-
# # Extract the assistant's message
|
|
152
|
-
# assistant_msg = raw_response["choices"][0]["message"]
|
|
153
|
-
# # Build the full messages list (input + assistant reply)
|
|
154
|
-
# full_messages = list(messages) + [
|
|
155
|
-
# {"role": assistant_msg["role"], "content": assistant_msg["content"]}
|
|
156
|
-
# ]
|
|
157
|
-
# # Return the OpenAI message as model_dump (if available) and the messages list
|
|
158
|
-
# if hasattr(assistant_msg, "model_dump"):
|
|
159
|
-
# msg_dump = assistant_msg.model_dump()
|
|
160
|
-
# else:
|
|
161
|
-
# msg_dump = dict(assistant_msg)
|
|
162
|
-
# return msg_dump, full_messages
|
|
163
|
-
|
|
164
|
-
# def parse(
|
|
165
|
-
# self,
|
|
166
|
-
# messages: Messages,
|
|
167
|
-
# response_model: Optional[type[BaseModel]] = None,
|
|
168
|
-
# ) -> ParsedOutput[BaseModel]:
|
|
169
|
-
# """Parse response using OpenAI's native parse API."""
|
|
170
|
-
# # Use provided response_model or fall back to instance default
|
|
171
|
-
# model_to_use = response_model or self.response_model
|
|
172
|
-
# assert model_to_use is not None, "response_model must be provided or set at init."
|
|
173
|
-
|
|
174
|
-
# # Use OpenAI's native parse API directly
|
|
175
|
-
# response = self.client.chat.completions.parse(
|
|
176
|
-
# model=self.model_kwargs["model"],
|
|
177
|
-
# messages=messages,
|
|
178
|
-
# response_format=model_to_use,
|
|
179
|
-
# **{k: v for k, v in self.model_kwargs.items() if k != "model"}
|
|
180
|
-
# )
|
|
181
|
-
|
|
182
|
-
# parsed = response.choices[0].message.parsed
|
|
183
|
-
# completion = response.model_dump() if hasattr(response, "model_dump") else {}
|
|
184
|
-
# full_messages = list(messages) + [
|
|
185
|
-
# {"role": "assistant", "content": parsed}
|
|
186
|
-
# ]
|
|
187
|
-
|
|
188
|
-
# return ParsedOutput(
|
|
189
|
-
# messages=full_messages,
|
|
190
|
-
# parsed=cast(BaseModel, parsed),
|
|
191
|
-
# completion=completion,
|
|
192
|
-
# model_kwargs=self.model_kwargs,
|
|
193
|
-
# )
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
# def __enter__(self):
|
|
198
|
-
# return self
|
|
199
|
-
|
|
200
|
-
# def __exit__(self, exc_type, exc_val, exc_tb):
|
|
201
|
-
# if hasattr(self, "_last_client"):
|
|
202
|
-
# last_client = self._last_client # type: ignore
|
|
203
|
-
# if hasattr(last_client, "close"):
|
|
204
|
-
# last_client.close()
|
|
205
|
-
# else:
|
|
206
|
-
# logger.warning("No last client to close")
|
|
207
|
-
LM = None
|
|
File without changes
|
|
File without changes
|