sutro 0.1.33__py3-none-any.whl → 0.1.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,117 @@
1
+ import json
2
+ from typing import Union, List
3
+ import polars as pl
4
+ import pandas as pd
5
+ from pydantic import BaseModel
6
+
7
+ from ..common import ModelOptions
8
+ from ..interfaces import BaseSutroClient
9
+
10
+
11
+ class ClassificationTemplates(BaseSutroClient):
12
+ def classify(
13
+ self,
14
+ data: Union[List, pd.DataFrame, pl.DataFrame, str],
15
+ classes: Union[dict[str, str], list[str]],
16
+ model: ModelOptions = "gemma-3-12b-it",
17
+ job_priority: int = 0,
18
+ name: Union[str, List[str]] = None,
19
+ description: Union[str, List[str]] = None,
20
+ output_column: str = "inference_result",
21
+ column: Union[str, List[str]] = None,
22
+ truncate_rows: bool = True,
23
+ include_scratchpad: bool = False,
24
+ ):
25
+ """
26
+ A simple template style function to perform classification on the provided data with Sutro. The intention is that the implemented code should be very easy to extend further, while showing a basic structure for large-scale classification with Sutro.
27
+
28
+ It uses structured outputs with a scratchpad field, enabling the model to reason step-by-step before providing the final classification.
29
+ The method supports various input formats including lists, DataFrames (Polars or Pandas), file paths, and datasets.
30
+ The method will wait for the classification job to complete before returning the results.
31
+
32
+ Args:
33
+ data (Union[List, pd.DataFrame, pl.DataFrame, str]): The data to classify. Each row should contain some text to classifiy that fits into one of the passed in labels.
34
+ classes (Union[dict[str, str], list[str]]): The classification classes. Can be either:
35
+ - A list of class names, ie ["Positive", "Negative", "Neutral"]
36
+ - A dict mapping class labels to descriptions, ie {"Positive": "Expresses satisfaction...", ...}
37
+ Providing descriptions can improve classification accuracy, especially for ambiguous or domain-specific categories.
38
+ model (ModelOptions, optional): The LLM to use. Defaults to "gemma-3-12b-it"; a model chosen for its balance of performance and efficiency, that also retains competency across a broad number of different domains.
39
+ job_priority (int, optional): The priority of the job. Defaults to 0.
40
+ name (Union[str, List[str]], optional): A job name for experiment/metadata tracking purposes. Defaults to None.
41
+ description (Union[str, List[str]], optional): A job description for experiment/metadata tracking purposes. Defaults to None.
42
+ output_column (str, optional): The column name to store the classification results in if the input is a DataFrame. Defaults to "inference_result".
43
+ column (Union[str, List[str]], optional): The column name to use for classification. Required if data is a DataFrame, file path, or dataset. If a list is supplied, it will concatenate the columns of the list into a single column, accepting separator strings.
44
+ truncate_rows (bool, optional): If True, any rows that have a token count exceeding the context window length of the selected model will be truncated to the max length that will fit within the context window. Defaults to True.
45
+ include_scratchpad (bool, optional): If True, includes the model's thinking scratchpad in the output. If False, only returns the final classification. Defaults to False.
46
+
47
+ Returns:
48
+ The completed classification results for the provided data. If include_scratchpad is True, returns both scratchpad and classification fields in JSON object. If False, returns only the classification as a string.
49
+
50
+ """
51
+ if isinstance(classes, dict):
52
+ formatted_classes = "\n".join(
53
+ [f"- {name}: {desc}" for name, desc in classes.items()]
54
+ )
55
+ else:
56
+ formatted_classes = "\n".join([f"- {c}" for c in classes])
57
+
58
+ system_prompt = f"""You are an expert classifier. Your task is to accurately categorize the input into one of the provided classes.
59
+
60
+ ## Classes
61
+
62
+ {formatted_classes}
63
+
64
+ ## Instructions
65
+
66
+ 1. **Analyze the input carefully**: Read and understand the full context - identify key elements, themes, and characteristics
67
+
68
+ 2. **Consider each class**: For each possible class, evaluate how similar the input is to its typical characteristics
69
+
70
+ 3. **Provide your reasoning in the scratchpad**: Think through which class fits best and why
71
+
72
+ 4. **Provide output**: Give your final classification
73
+
74
+ If needed, use the scratchpad field to work through steps 1-3, then provide your final answer in the classification field.
75
+
76
+ ## Guidelines
77
+
78
+ - Select exactly ONE class, even if multiple seem applicable (choose the best match)
79
+ - If the input is ambiguous, choose the closest fit and explain your reasoning
80
+ - Base your decision on the actual content, not assumptions or implications
81
+ - Similar inputs should receive the same classification
82
+
83
+ Respond using the structured format with scratchpad and classification fields."""
84
+
85
+ class ClassificationOutput(BaseModel):
86
+ # Since we're using structured outputs, we want to give the model some
87
+ # space to reason and think as needed
88
+ scratchpad: str
89
+ classification: str
90
+
91
+ job_id = self.infer(
92
+ data,
93
+ model,
94
+ name,
95
+ description,
96
+ system_prompt=system_prompt,
97
+ output_schema=ClassificationOutput,
98
+ column=column,
99
+ output_column=output_column,
100
+ job_priority=job_priority,
101
+ truncate_rows=truncate_rows,
102
+ stay_attached=False,
103
+ )
104
+
105
+ results = self.await_job_completion(job_id)
106
+
107
+ # Filter out scratchpad if not wanted
108
+ if not include_scratchpad:
109
+ results = results.with_columns(
110
+ pl.col(output_column)
111
+ .map_elements(
112
+ lambda x: json.loads(x)["classification"], return_dtype=pl.Utf8
113
+ )
114
+ .alias(output_column)
115
+ )
116
+
117
+ return results
@@ -0,0 +1,53 @@
1
+ from typing import Union, List
2
+ import polars as pl
3
+ import pandas as pd
4
+ from ..common import EmbeddingModelOptions
5
+ from ..interfaces import BaseSutroClient
6
+
7
+
8
+ class EmbeddingTemplates(BaseSutroClient):
9
+ def embed(
10
+ self,
11
+ data: Union[List, pd.DataFrame, pl.DataFrame, str],
12
+ model: EmbeddingModelOptions = "qwen-3-embedding-0.6b",
13
+ job_priority: int = 0,
14
+ name: Union[str, List[str]] = None,
15
+ description: Union[str, List[str]] = None,
16
+ output_column: str = "inference_result",
17
+ column: Union[str, List[str]] = None,
18
+ truncate_rows: bool = True,
19
+ ):
20
+ """
21
+ A simple template style function to generate embeddings for the provided data, with Sutro. The intention is that the implemented code should be very easy to extend further, while showing a basic structure for large scale embedding generation with Sutro.
22
+
23
+ This method allows you to generate vector embeddings for the provided data using Sutro.
24
+ It supports various options for inputting data, such as lists, DataFrames (Polars or Pandas), file paths and datasets.
25
+ The method will wait for the embedding job to complete before returning the results.
26
+
27
+ Args:
28
+ data (Union[List, pd.DataFrame, pl.DataFrame, str]): The data to generate embeddings for.
29
+ model (ModelOptions, optional): The embedding model to use. Defaults to "qwen-3-embedding-0.6b"; a model we chose as its small & fast, yet performs well on a variety of tasks.
30
+ job_priority (int, optional): The priority of the job. Defaults to 0.
31
+ name (Union[str, List[str]], optional): A job name for experiment/metadata tracking purposes. Defaults to None.
32
+ description (Union[str, List[str]], optional): A job description for experiment/metadata tracking purposes. Defaults to None.
33
+ output_column (str, optional): The column name to store the embedding results in if the input is a DataFrame. Defaults to "inference_result".
34
+ column (Union[str, List[str]], optional): The column name to use for embedding generation. Required if data is a DataFrame, file path, or dataset. If a list is supplied, it will concatenate the columns of the list into a single column, accepting separator strings.
35
+ truncate_rows (bool, optional): If True, any rows that have a token count exceeding the context window length of the selected model will be truncated to the max length that will fit within the context window. Defaults to True.
36
+
37
+ Returns:
38
+ The completed embedding results for the provided data.
39
+
40
+ """
41
+ job_id = self.infer(
42
+ data,
43
+ model,
44
+ name,
45
+ description,
46
+ column,
47
+ output_column,
48
+ job_priority,
49
+ truncate_rows=truncate_rows,
50
+ stay_attached=False,
51
+ )
52
+
53
+ return self.await_job_completion(job_id)
@@ -0,0 +1,340 @@
1
+ from typing import Union, List, Tuple
2
+ import polars as pl
3
+ import pandas as pd
4
+ import numpy as np
5
+ import math
6
+ from ..common import ModelOptions
7
+ from ..interfaces import BaseSutroClient
8
+ from collections import defaultdict
9
+ from itertools import combinations
10
+
11
+
12
+ class Score(BaseSutroClient):
13
+ def score(
14
+ self,
15
+ data: Union[List, pd.DataFrame, pl.DataFrame, str],
16
+ model: ModelOptions = "gemma-3-12b-it",
17
+ job_priority: int = 0,
18
+ name: Union[str, List[str]] = None,
19
+ description: Union[str, List[str]] = None,
20
+ column: Union[str, List[str]] = None,
21
+ # function-specific parameters
22
+ criteria: Union[str, List[str]] = None,
23
+ score_column_name: str = "score",
24
+ range: Tuple[int, int] = (0, 10),
25
+ ):
26
+ """
27
+ A simple invocation of an LLM-as-a-judge numerical scoring function, with a default 0-10 range.
28
+
29
+ Accepts a normal Sutro input data type, as well as a string or list of strings to use as criteria,
30
+ a column name to use for the scoring, and a range to use for the scoring (default 0-10).
31
+
32
+ Returns a pandas or polars dataframe with the scores as a column, corresponding to the column name provided.
33
+ """
34
+
35
+ if isinstance(criteria, str):
36
+ criteria = [criteria]
37
+
38
+ system_prompt = f"""You are a judge. Your job is to score the data presented to you according to the following criteria:
39
+ {", ".join(criteria)}
40
+ Return a score between {range[0]} and {range[1]}, and nothing else."""
41
+
42
+ json_schema = {
43
+ "type": "object",
44
+ "properties": {
45
+ f"{score_column_name}": {
46
+ "type": "integer",
47
+ "minimum": range[0],
48
+ "maximum": range[1],
49
+ },
50
+ },
51
+ "required": [score_column_name],
52
+ }
53
+
54
+ job_id = self.infer(
55
+ data=data,
56
+ model=model,
57
+ name=name,
58
+ description=description,
59
+ column=column,
60
+ system_prompt=system_prompt,
61
+ output_schema=json_schema,
62
+ job_priority=job_priority,
63
+ stay_attached=False,
64
+ )
65
+
66
+ res = self.await_job_completion(job_id)
67
+ if isinstance(data, pl.DataFrame):
68
+ return data.with_columns(
69
+ pl.Series(score_column_name, res[score_column_name])
70
+ )
71
+ elif isinstance(data, pd.DataFrame):
72
+ return data.assign(**{score_column_name: res[score_column_name]})
73
+ else:
74
+ return res
75
+
76
+
77
+ class Rank(BaseSutroClient):
78
+ def rank(
79
+ self,
80
+ model: ModelOptions = "gemma-3-12b-it",
81
+ job_priority: int = 0,
82
+ name: Union[str, List[str]] = None,
83
+ description: Union[str, List[str]] = None,
84
+ # function-specific parameters
85
+ data: Union[
86
+ List[List], pd.DataFrame, pl.DataFrame, str
87
+ ] = None, # data is always required, but this method accepts a list of lists as well
88
+ option_labels: List[str] = None,
89
+ criteria: Union[str, List[str]] = None,
90
+ ranking_column_name: str = "ranking",
91
+ run_elo: bool = True,
92
+ ):
93
+ """
94
+ A simple invocation of a LLM-as-a-judge ranking (pairwise comparison) function, accepting multiple options to rank.
95
+ Accepts a list of lists, a pandas or polars dataframe, as well as option labels to use for the ranking.
96
+
97
+ If using a lists of lists, the option labels should correspond to the labels you would like to use for the ranking, in the same order as the lists.
98
+ If using a pandas or polars dataframe, the option labels should correspond to the column names of the dataframe to use for the ranking.
99
+
100
+ Returns a list of lists of rankings ordered from best to worst, corresponding to the option labels.
101
+ If using a pandas or polars dataframe, the rankings will be returned as a column in the original dataframe.
102
+ """
103
+
104
+ if isinstance(criteria, str):
105
+ criteria = [criteria]
106
+
107
+ system_prompt = f"""You are a judge. Your job is to rank the options presented to you according to the following criteria:
108
+ {", ".join(criteria)}
109
+ The option labels are: {", ".join(option_labels)}
110
+ Return a ranking of the options as an ordered list of the labels from best to worst, and nothing else."""
111
+
112
+ json_schema = {
113
+ "type": "object",
114
+ "properties": {
115
+ f"{ranking_column_name}": {
116
+ "type": "array",
117
+ "items": {"type": "string"},
118
+ },
119
+ },
120
+ "required": [ranking_column_name],
121
+ }
122
+
123
+ if isinstance(data, list):
124
+ # create a polars dataframe from the list of lists
125
+ data = pl.DataFrame(data, schema=option_labels)
126
+ elif isinstance(data, pd.DataFrame):
127
+ # convert to polars dataframe
128
+ data = data.from_pandas(data)
129
+
130
+ exprs = [] # because the option literals are the same as the same as the column names we don't use the built-in column concatenation helper function
131
+ for _, label in enumerate(option_labels):
132
+ exprs.append(pl.lit(label + ":"))
133
+ exprs.append(pl.col(label))
134
+
135
+ data = data.select(
136
+ pl.concat_str(exprs, separator=" ", ignore_nulls=False).alias(
137
+ "options_with_labels"
138
+ )
139
+ )
140
+
141
+ job_id = self.infer(
142
+ data=data,
143
+ column="options_with_labels",
144
+ model=model,
145
+ name=name,
146
+ description=description,
147
+ system_prompt=system_prompt,
148
+ output_schema=json_schema,
149
+ job_priority=job_priority,
150
+ stay_attached=False,
151
+ )
152
+
153
+ res = self.await_job_completion(job_id, output_column=ranking_column_name)
154
+
155
+ # This doesn't work when do as a single step for some reason
156
+ res = (
157
+ res.with_columns(
158
+ pl.col(ranking_column_name).str.json_decode().alias("_decoded")
159
+ )
160
+ .with_columns(
161
+ pl.col("_decoded")
162
+ .struct.field(ranking_column_name)
163
+ .alias(ranking_column_name)
164
+ )
165
+ .drop("_decoded")
166
+ )
167
+
168
+ if run_elo:
169
+ elo_ratings = self.elo(data=res, column=ranking_column_name)
170
+ print(elo_ratings[["elo", "wins", "losses", "matches"]].to_markdown())
171
+
172
+ if isinstance(data, pl.DataFrame):
173
+ return data.with_columns(
174
+ pl.Series(ranking_column_name, res[ranking_column_name])
175
+ )
176
+ elif isinstance(data, pd.DataFrame):
177
+ return data.assign(**{ranking_column_name: res[ranking_column_name]})
178
+ else:
179
+ return res
180
+
181
+ @staticmethod
182
+ def elo(
183
+ data: Union[List, pd.DataFrame, pl.DataFrame] = None,
184
+ column: Union[str, List[str]] = None,
185
+ laplace: float = 0.5,
186
+ max_iter: int = 1000,
187
+ tol: float = 1e-8,
188
+ elo_mean: float = 1500.0,
189
+ ):
190
+ """
191
+ Accepts ordered ranking outputs produced by the rank method, and produces an Elo rating for each label option.
192
+ """
193
+
194
+ if isinstance(data, pl.DataFrame):
195
+ if column is None:
196
+ raise ValueError("column is required when using a polars dataframe")
197
+ rankings = data.select(pl.col(column)).to_series().to_list()
198
+ elif isinstance(data, pd.DataFrame):
199
+ if column is None:
200
+ raise ValueError("column is required when using a pandas dataframe")
201
+ rankings = data.loc[:, [column]].tolist()
202
+ else:
203
+ rankings = data
204
+
205
+ """
206
+ Convert ballots of ordered rankings into pairwise counts, then run Bradley–Terry MM.
207
+
208
+ rankings:
209
+ - Strict order: ["B", "A", "C"] means B>A, B>C, A>C
210
+ - With ties: ["B", ("A","C"), "D"] means B > A=C > D
211
+ (A and C tie once on that ballot)
212
+
213
+ Other params are passed to the underlying BT solver.
214
+ """
215
+ # --- build (winner, loser) counts and tie counts from rankings ---
216
+ pair_counts = defaultdict(float)
217
+ tie_counts = defaultdict(float) # unordered keys (min(name), max(name))
218
+
219
+ def as_group(x):
220
+ # allow tuple/list/set to denote a tie group; strings remain atomic
221
+ if isinstance(x, (list, tuple, set)) and not isinstance(x, (str, bytes)):
222
+ return list(x)
223
+ return [x]
224
+
225
+ for ballot in rankings:
226
+ # normalize ballot into list of groups, each group a list of items tied at that rank
227
+ groups = [as_group(g) for g in ballot if g is not None]
228
+
229
+ # wins across groups: every item in an earlier (better) group beats every item in any later group
230
+ for gi in range(len(groups)):
231
+ for gj in range(gi + 1, len(groups)):
232
+ for w in groups[gi]:
233
+ for l in groups[gj]:
234
+ if w != l:
235
+ pair_counts[(str(w), str(l))] += 1.0
236
+
237
+ # ties within a group: count one tie per unordered pair inside that group
238
+ for g in groups:
239
+ if len(g) >= 2:
240
+ for a, b in combinations(g, 2):
241
+ a, b = str(a), str(b)
242
+ key = (a, b) if a < b else (b, a)
243
+ if a != b:
244
+ tie_counts[key] += 1.0
245
+
246
+ pair_counts = dict(pair_counts)
247
+ ties = dict(tie_counts) if tie_counts else None
248
+
249
+ """
250
+ pair_counts: { (winner, loser): wins } for all observed directed pairs.
251
+ ties: optional { (a, b): tie_count } counted once per unordered pair (a,b).
252
+ If provided, each tie contributes 0.5 win to both directions.
253
+ laplace: additive smoothing to each *directed* count (prevents zeros).
254
+ """
255
+
256
+ # ---- Build model list ----
257
+ models = sorted(set([k[0] for k in pair_counts] + [k[1] for k in pair_counts]))
258
+ m = len(models)
259
+ idx = {name: i for i, name in enumerate(models)}
260
+
261
+ # ---- Build directed wins matrix W[i,j] = times i beat j ----
262
+ W = np.zeros((m, m), dtype=float)
263
+ for (w, l), c in pair_counts.items():
264
+ if w == l:
265
+ continue
266
+ W[idx[w], idx[l]] += float(c)
267
+
268
+ # ---- Optional ties: add 0.5 to both directions for each tie ----
269
+ if ties:
270
+ for (a, b), t in ties.items():
271
+ if a == b:
272
+ continue
273
+ i, j = idx[a], idx[b]
274
+ W[i, j] += 0.5 * t
275
+ W[j, i] += 0.5 * t
276
+
277
+ # ---- Laplace smoothing on directed edges (excluding diagonal) ----
278
+ if laplace and laplace > 0:
279
+ W += laplace
280
+ np.fill_diagonal(W, 0.0)
281
+
282
+ # Unordered totals N_ij = W_ij + W_ji
283
+ N = W + W.T
284
+ np.fill_diagonal(N, 0.0)
285
+
286
+ # Guard: drop models with zero matches
287
+ active = N.sum(axis=1) > 0
288
+ if not np.all(active):
289
+ keep = np.where(active)[0]
290
+ models = [models[i] for i in keep]
291
+ idx = {name: i for i, name in enumerate(models)}
292
+ W = W[np.ix_(keep, keep)]
293
+ N = N[np.ix_(keep, keep)]
294
+ m = len(models)
295
+
296
+ # ---- Bradley–Terry via MM updates (Hunter 2004) ----
297
+ s = np.ones(m, dtype=float) # abilities (positive)
298
+ for _ in range(max_iter):
299
+ s_old = s.copy()
300
+ w_i = W.sum(axis=1) # total (smoothed) wins per model
301
+ # denom_i = sum_j N_ij / (s_i + s_j)
302
+ denom = (N / (s.reshape(-1, 1) + s.reshape(1, -1) + 1e-12)).sum(axis=1)
303
+ upd = denom > 0
304
+ s[upd] = w_i[upd] / denom[upd]
305
+ # normalize to keep scale stable (geometric mean = 1)
306
+ s /= np.prod(s) ** (1.0 / m)
307
+ if np.max(np.abs(np.log(s + 1e-12) - np.log(s_old + 1e-12))) < tol:
308
+ break
309
+
310
+ # ---- Convert to beta and Elo-like ratings ----
311
+ beta = np.log(s + 1e-12)
312
+ elo = (400.0 / math.log(10.0)) * beta
313
+ elo = elo - np.mean(elo) + elo_mean # center
314
+
315
+ # ---- Summaries and expected probabilities ----
316
+ wins = W.sum(axis=1)
317
+ losses = W.sum(axis=0)
318
+ matches = N.sum(axis=1) # unordered total vs all opponents
319
+
320
+ ratings = pd.DataFrame(
321
+ {
322
+ "ability": s,
323
+ "beta": beta,
324
+ "elo": elo,
325
+ "wins": wins,
326
+ "losses": losses,
327
+ "matches": matches,
328
+ },
329
+ index=models,
330
+ ).sort_values("elo", ascending=False)
331
+
332
+ P = s.reshape(-1, 1) / (s.reshape(-1, 1) + s.reshape(1, -1))
333
+ np.fill_diagonal(P, np.nan)
334
+ p_matrix = pd.DataFrame(P, index=models, columns=models)
335
+
336
+ return ratings
337
+
338
+
339
+ class EvalTemplates(Score, Rank):
340
+ pass
sutro/validation.py ADDED
@@ -0,0 +1,60 @@
1
+ import importlib.metadata
2
+ import json
3
+ import os
4
+
5
+ import requests
6
+
7
+ from sutro.common import to_colored_text
8
+
9
+
10
+ def check_version(package_name: str):
11
+ try:
12
+ # Local version
13
+ local_version = importlib.metadata.version(package_name)
14
+ except importlib.metadata.PackageNotFoundError:
15
+ print(f"{package_name} is not installed.")
16
+ return
17
+
18
+ try:
19
+ # Latest release from PyPI
20
+ resp = requests.get(f"https://pypi.org/pypi/{package_name}/json", timeout=2)
21
+ resp.raise_for_status()
22
+ latest_version = resp.json()["info"]["version"]
23
+
24
+ if local_version != latest_version:
25
+ msg = (
26
+ f"⚠️ You are using {package_name} {local_version}, "
27
+ f"but the latest release is {latest_version}. "
28
+ f"Run `[uv] pip install -U {package_name}` to upgrade."
29
+ )
30
+ print(to_colored_text(msg, state="callout"))
31
+ except Exception:
32
+ # Fail silently or log, you don’t want this blocking usage
33
+ pass
34
+
35
+
36
+ def check_for_api_key():
37
+ """
38
+ Check for an API key in the user's home directory.
39
+
40
+ This method looks for a configuration file named 'config.json' in the
41
+ '.sutro' directory within the user's home directory.
42
+ If the file exists, it attempts to read the API key from it.
43
+
44
+ Returns:
45
+ str or None: The API key if found in the configuration file, or None if not found.
46
+
47
+ Note:
48
+ The expected structure of the config.json file is:
49
+ {
50
+ "api_key": "your_api_key_here"
51
+ }
52
+ """
53
+ CONFIG_DIR = os.path.expanduser("~/.sutro")
54
+ CONFIG_FILE = os.path.join(CONFIG_DIR, "config.json")
55
+ if os.path.exists(CONFIG_FILE):
56
+ with open(CONFIG_FILE, "r") as f:
57
+ config = json.load(f)
58
+ return config.get("api_key")
59
+ else:
60
+ return None
@@ -1,24 +1,22 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sutro
3
- Version: 0.1.33
3
+ Version: 0.1.40
4
4
  Summary: Sutro Python SDK
5
- Project-URL: Homepage, https://sutro.sh
6
- Project-URL: Documentation, https://docs.sutro.sh
7
5
  License-Expression: Apache-2.0
8
- License-File: LICENSE
6
+ Requires-Dist: numpy>=2.1.1,<3.0.0
7
+ Requires-Dist: requests>=2.32.3,<3.0.0
8
+ Requires-Dist: pandas>=2.2.3,<3.0.0
9
+ Requires-Dist: polars>=1.33.0,<=1.34.0
10
+ Requires-Dist: click>=8.1.7,<9.0.0
11
+ Requires-Dist: colorama>=0.4.4,<1.0.0
12
+ Requires-Dist: yaspin>=3.2.0,<4.0.0
13
+ Requires-Dist: tqdm>=4.67.1,<5.0.0
14
+ Requires-Dist: pydantic>=2.11.4,<3.0.0
15
+ Requires-Dist: pyarrow>=21.0.0,<22.0.0
16
+ Requires-Dist: tabulate>=0.9.0,<1.0.0
9
17
  Requires-Python: >=3.10
10
- Requires-Dist: click==8.1.7
11
- Requires-Dist: colorama==0.4.4
12
- Requires-Dist: numpy==2.1.1
13
- Requires-Dist: pandas==2.2.3
14
- Requires-Dist: polars==1.8.2
15
- Requires-Dist: pyarrow==21.0.0
16
- Requires-Dist: pydantic==2.11.4
17
- Requires-Dist: requests==2.32.3
18
- Requires-Dist: tqdm==4.67.1
19
- Requires-Dist: yaspin==3.1.0
20
- Provides-Extra: dev
21
- Requires-Dist: ruff==0.13.1; extra == 'dev'
18
+ Project-URL: Documentation, https://docs.sutro.sh
19
+ Project-URL: Homepage, https://sutro.sh
22
20
  Description-Content-Type: text/markdown
23
21
 
24
22
  ![Sutro Logo](./assets/sutro-logo-dark.png)
@@ -0,0 +1,13 @@
1
+ sutro/__init__.py,sha256=jf7vtLNyWoY6IISSpfrDRR_anBt3UyeRh1XTgitcd6U,584
2
+ sutro/cli.py,sha256=0NuqRInXA-_7TRw-T0OxP8otmUHUarMtY7kuLbWDous,13751
3
+ sutro/common.py,sha256=FuTYTzy82Ul56r9SVH0XMOqcBBspDAFvrtHM93ZbT_8,6945
4
+ sutro/interfaces.py,sha256=nYdb2jj1-0MGCPqq_33E-dBChJs8XuzQ51AV75F58J0,3021
5
+ sutro/sdk.py,sha256=KKmk10AiXG7CgH_LaF22QNVmk_Noqs9ilLpLeopihS8,58446
6
+ sutro/templates/classification.py,sha256=iNFiyuR8bZc9Xe-NdimklpQUveqg3p_eJOlEAHaj7Is,6080
7
+ sutro/templates/embed.py,sha256=csvLA0hw5Qaro_yZvALRRp9_SbfWABFN0iQXrf8E8_I,2941
8
+ sutro/templates/evals.py,sha256=YRmA2yrvfZ0cBvhCdXgp5SXnEvGzxJ7t85hrHlz7vhE,12910
9
+ sutro/validation.py,sha256=FlFH5e5PAPIPpCrzU7mwfZKDDvrmkHt2yYsFm0Ahfmg,1849
10
+ sutro-0.1.40.dist-info/WHEEL,sha256=X16MKk8bp2DRsAuyteHJ-9qOjzmnY0x1aj0P1ftqqWA,78
11
+ sutro-0.1.40.dist-info/entry_points.txt,sha256=s-dtPZ0AScjvR8S_ykhzXxtVcUjrRlxVxyJymI81A3E,41
12
+ sutro-0.1.40.dist-info/METADATA,sha256=d36n5krdsupu_rQC2-Maq6NMO2Ymk5VHtCFG93tZhAI,6232
13
+ sutro-0.1.40.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: uv 0.9.2
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -1,2 +1,3 @@
1
1
  [console_scripts]
2
2
  sutro = sutro.cli:cli
3
+
@@ -1,8 +0,0 @@
1
- sutro/__init__.py,sha256=yUiVwcZ8QamSqDdRHgzoANyTZ-x3cPzlt2Fs5OllR_w,402
2
- sutro/cli.py,sha256=_FU8PwP4dMzXXg5ldxCXP3kaZvQtOKdA8Kzjc34xmQ0,13727
3
- sutro/sdk.py,sha256=DOvAQuL_5RiXwTfQC3FnNiE3x9DZBugEhDJ8r494NKo,56980
4
- sutro-0.1.33.dist-info/METADATA,sha256=rvf-ZGByzYtE8ArzXDvx49TCUCtJxXAEVpJpBjtJfYk,6206
5
- sutro-0.1.33.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
- sutro-0.1.33.dist-info/entry_points.txt,sha256=eXvr4dvMV4UmZgR0zmrY8KOmNpo64cJkhNDywiadRFM,40
7
- sutro-0.1.33.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
8
- sutro-0.1.33.dist-info/RECORD,,
@@ -1,4 +0,0 @@
1
- Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
3
- Root-Is-Purelib: true
4
- Tag: py3-none-any