docent-python 0.1.21a0__py3-none-any.whl → 0.1.23a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of docent-python might be problematic. Click here for more details.

docent/sdk/client.py CHANGED
@@ -8,7 +8,8 @@ from tqdm import tqdm
8
8
 
9
9
  from docent._log_util.logger import get_logger
10
10
  from docent.data_models.agent_run import AgentRun
11
- from docent.data_models.judge import JudgeRunLabel
11
+ from docent.data_models.judge import Label
12
+ from docent.judges.util.meta_schema import validate_judge_result_schema
12
13
  from docent.loaders import load_inspect
13
14
 
14
15
  logger = get_logger(__name__)
@@ -50,9 +51,15 @@ class Docent:
50
51
  self._login(api_key)
51
52
 
52
53
  def _handle_response_errors(self, response: requests.Response):
53
- """Handle API response and raise informative errors.
54
- TODO: make this more informative."""
55
- response.raise_for_status()
54
+ """Handle API response and raise informative errors."""
55
+ if response.status_code >= 400:
56
+ try:
57
+ error_data = response.json()
58
+ detail = error_data.get("detail", response.text)
59
+ except Exception:
60
+ detail = response.text
61
+
62
+ raise requests.HTTPError(f"HTTP {response.status_code}: {detail}", response=response)
56
63
 
57
64
  def _login(self, api_key: str):
58
65
  """Login with email/password to establish session."""
@@ -253,30 +260,59 @@ class Docent:
253
260
  clustering_state = self.get_clustering_state(collection_id, rubric_id)
254
261
  return clustering_state.get("assignments", {})
255
262
 
263
+ def create_label_set(
264
+ self,
265
+ collection_id: str,
266
+ name: str,
267
+ label_schema: dict[str, Any],
268
+ description: str | None = None,
269
+ ) -> str:
270
+ """Create a new label set with a JSON schema.
271
+
272
+ Args:
273
+ collection_id: ID of the collection.
274
+ name: Name of the label set.
275
+ label_schema: JSON schema for validating labels in this set.
276
+ description: Optional description of the label set.
277
+
278
+ Returns:
279
+ str: The ID of the created label set.
280
+
281
+ Raises:
282
+ ValueError: If the response is missing the label_set_id.
283
+ jsonschema.ValidationError: If the label schema is invalid.
284
+ requests.exceptions.HTTPError: If the API request fails.
285
+ """
286
+ validate_judge_result_schema(label_schema)
287
+
288
+ url = f"{self._server_url}/label/{collection_id}/label_set"
289
+ payload = {
290
+ "name": name,
291
+ "label_schema": label_schema,
292
+ "description": description,
293
+ }
294
+ response = self._session.post(url, json=payload)
295
+ self._handle_response_errors(response)
296
+ return response.json()["label_set_id"]
297
+
256
298
  def add_label(
257
299
  self,
258
300
  collection_id: str,
259
- rubric_id: str,
260
- label: JudgeRunLabel,
261
- ) -> dict[str, Any]:
262
- """Attach a manual label to an agent run for a rubric.
301
+ label: Label,
302
+ ) -> dict[str, str]:
303
+ """Create a label in a label set.
263
304
 
264
305
  Args:
265
- collection_id: ID of the Collection that owns the rubric.
266
- rubric_id: ID of the rubric the label applies to.
267
- label: A `JudgeRunLabel` that must comply with the rubric's output schema.
306
+ collection_id: ID of the Collection.
307
+ label: A `Label` object that must comply with the label set's schema.
268
308
 
269
309
  Returns:
270
- dict: API response containing a status message.
310
+ dict: API response containing the label_id.
271
311
 
272
312
  Raises:
273
- ValueError: If the label does not target the rubric specified in the path.
274
313
  requests.exceptions.HTTPError: If the API request fails or validation errors occur.
275
314
  """
276
- if label.rubric_id != rubric_id:
277
- raise ValueError("Label rubric_id must match the rubric_id argument")
278
-
279
- url = f"{self._server_url}/rubric/{collection_id}/rubric/{rubric_id}/label"
315
+ url = f"{self._server_url}/label/{collection_id}/label"
280
316
  payload = {"label": label.model_dump(mode="json")}
281
317
  response = self._session.post(url, json=payload)
282
318
  self._handle_response_errors(response)
@@ -285,55 +321,50 @@ class Docent:
285
321
  def add_labels(
286
322
  self,
287
323
  collection_id: str,
288
- rubric_id: str,
289
- labels: list[JudgeRunLabel],
324
+ labels: list[Label],
290
325
  ) -> dict[str, Any]:
291
- """Attach multiple manual labels to a rubric.
326
+ """Create multiple labels.
292
327
 
293
328
  Args:
294
- collection_id: ID of the Collection that owns the rubric.
295
- rubric_id: ID of the rubric the labels apply to.
296
- labels: List of `JudgeRunLabel` objects.
329
+ collection_id: ID of the Collection.
330
+ labels: List of `Label` objects.
297
331
 
298
332
  Returns:
299
- dict: API response containing status information.
333
+ dict: API response containing label_ids list and optional errors list.
300
334
 
301
335
  Raises:
302
336
  ValueError: If no labels are provided.
303
- ValueError: If any label targets a different rubric.
304
337
  requests.exceptions.HTTPError: If the API request fails.
305
338
  """
306
339
  if not labels:
307
340
  raise ValueError("labels must contain at least one entry")
308
341
 
309
- rubric_ids = {label.rubric_id for label in labels}
310
- if rubric_ids != {rubric_id}:
311
- raise ValueError(
312
- "All labels must specify the same rubric_id that is provided to add_labels"
313
- )
314
-
315
- payload = {"labels": [l.model_dump(mode="json") for l in labels]}
316
-
317
- url = f"{self._server_url}/rubric/{collection_id}/rubric/{rubric_id}/labels"
342
+ url = f"{self._server_url}/label/{collection_id}/labels"
343
+ payload = {"labels": [label.model_dump(mode="json") for label in labels]}
318
344
  response = self._session.post(url, json=payload)
319
345
  self._handle_response_errors(response)
320
346
  return response.json()
321
347
 
322
- def get_labels(self, collection_id: str, rubric_id: str) -> list[dict[str, Any]]:
323
- """Retrieve all manual labels for a rubric.
348
+ def get_labels(
349
+ self, collection_id: str, label_set_id: str, filter_valid_labels: bool = False
350
+ ) -> list[dict[str, Any]]:
351
+ """Retrieve all labels in a label set.
324
352
 
325
353
  Args:
326
- collection_id: ID of the Collection that owns the rubric.
327
- rubric_id: ID of the rubric to fetch labels for.
354
+ collection_id: ID of the Collection.
355
+ label_set_id: ID of the label set to fetch labels for.
356
+ filter_valid_labels: If True, only return labels that match the label set schema
357
+ INCLUDING requirements. Default is False (returns all labels).
328
358
 
329
359
  Returns:
330
- list: List of label dictionaries. Each includes agent_run_id and label content.
360
+ list: List of label dictionaries.
331
361
 
332
362
  Raises:
333
363
  requests.exceptions.HTTPError: If the API request fails.
334
364
  """
335
- url = f"{self._server_url}/rubric/{collection_id}/rubric/{rubric_id}/labels"
336
- response = self._session.get(url)
365
+ url = f"{self._server_url}/label/{collection_id}/label_set/{label_set_id}/labels"
366
+ params = {"filter_valid_labels": filter_valid_labels}
367
+ response = self._session.get(url, params=params)
337
368
  self._handle_response_errors(response)
338
369
  return response.json()
339
370
 
docent/trace.py CHANGED
@@ -43,6 +43,7 @@ class Instruments(Enum):
43
43
  ANTHROPIC = "anthropic"
44
44
  BEDROCK = "bedrock"
45
45
  LANGCHAIN = "langchain"
46
+ GOOGLE_GENERATIVEAI = "google_generativeai"
46
47
 
47
48
 
48
49
  class DocentTracer:
@@ -392,6 +393,23 @@ class DocentTracer:
392
393
  except Exception as e:
393
394
  logger.warning(f"Failed to instrument LangChain: {e}")
394
395
 
396
+ # Instrument Google Generative AI with our isolated tracer provider
397
+ if Instruments.GOOGLE_GENERATIVEAI in enabled_instruments:
398
+ try:
399
+ if is_package_installed("google-generativeai") or is_package_installed(
400
+ "google-genai"
401
+ ):
402
+ from opentelemetry.instrumentation.google_generativeai import (
403
+ GoogleGenerativeAiInstrumentor,
404
+ )
405
+
406
+ GoogleGenerativeAiInstrumentor().instrument(
407
+ tracer_provider=self._tracer_provider
408
+ )
409
+ logger.info("Instrumented Google Generative AI")
410
+ except Exception as e:
411
+ logger.warning(f"Failed to instrument Google Generative AI: {e}")
412
+
395
413
  # Register cleanup handlers
396
414
  self._register_cleanup()
397
415
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: docent-python
3
- Version: 0.1.21a0
3
+ Version: 0.1.23a0
4
4
  Summary: Docent SDK
5
5
  Project-URL: Homepage, https://github.com/TransluceAI/docent
6
6
  Project-URL: Issues, https://github.com/TransluceAI/docent/issues
@@ -20,6 +20,7 @@ Requires-Dist: opentelemetry-exporter-otlp-proto-grpc>=1.34.1
20
20
  Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.34.1
21
21
  Requires-Dist: opentelemetry-instrumentation-anthropic>=0.40.14
22
22
  Requires-Dist: opentelemetry-instrumentation-bedrock>=0.40.14
23
+ Requires-Dist: opentelemetry-instrumentation-google-generativeai>=0.40.14
23
24
  Requires-Dist: opentelemetry-instrumentation-langchain>=0.40.14
24
25
  Requires-Dist: opentelemetry-instrumentation-openai>=0.40.14
25
26
  Requires-Dist: opentelemetry-instrumentation-threading>=0.55b1
@@ -1,31 +1,29 @@
1
1
  docent/__init__.py,sha256=fuhETwJPcesiB76Zxa64HBJxeaaTyRalIH-fs77TWsU,112
2
2
  docent/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- docent/trace.py,sha256=_JvDmtWVFARPYvXsNx8-RKRdev4mMxNK6iq9AARzoJE,66362
4
- docent/trace_2.py,sha256=-OxzXF2kOFkhto1UGXHWVM797EN_BT_uwDSbzgMme8o,67145
3
+ docent/trace.py,sha256=u_1M_B1ncCR5a0Yy5ppQY_0k0AFYe4UhSIGeTuZCH0w,67271
5
4
  docent/trace_temp.py,sha256=Z0lAPwVzXjFvxpiU-CuvfWIslq9Q4alNkZMoQ77Xudk,40711
6
5
  docent/_llm_util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- docent/_llm_util/llm_cache.py,sha256=p8pJ-B1vyJQlNn48ce1Pqv2gLocBVD6fZLPsd2VC5LA,6386
6
+ docent/_llm_util/llm_cache.py,sha256=nGrvfFikFbEnfmzZRvWvZ60gfVSTvW1iC8-ciCXwbAk,6430
7
+ docent/_llm_util/llm_svc.py,sha256=PQ-96UDJrnPa9csTKL_JDO8jzOrLzysVBqUHywuij0w,18046
8
8
  docent/_llm_util/model_registry.py,sha256=8Y4VwrA2f2EX78cG1VBIBHVvT_p4qqBTdu9a9zJpfTo,3382
9
- docent/_llm_util/prod_llms.py,sha256=HuGOg5Bhnpk_TijC3mOH8CTRIBy2C8w0_SebiEouNoE,16859
10
9
  docent/_llm_util/data_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
10
  docent/_llm_util/data_models/exceptions.py,sha256=IW4BVMVp8r5TufNXyrhy3acgwJiQQQPQjB9VA4RVXw8,1489
12
- docent/_llm_util/data_models/llm_output.py,sha256=fuYPJ-SwxZjB4XGATA6XpLyc42Ix-kXHgBqFr_jPhK8,10123
13
- docent/_llm_util/data_models/simple_svc.py,sha256=0twuXP6aEU-jYY0obDSEgjT2lDSJCuZG_NgiqEzZIPM,2881
11
+ docent/_llm_util/data_models/llm_output.py,sha256=ZAIIcgfxMZtTft8bXTPAhUcXEO48GLG3epkul_4gQNQ,10239
14
12
  docent/_llm_util/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
13
  docent/_llm_util/providers/anthropic.py,sha256=-1oPd5FB4aFwKSmNvXzG8PVewjhgsogLRX1SCpnCxoA,18720
16
14
  docent/_llm_util/providers/common.py,sha256=dgcTuU4XkCKoAaM48UW8zMgRYUzj7TDBhvWqtnxBO7g,1166
17
15
  docent/_llm_util/providers/google.py,sha256=2D9mDgenZW0pt0_V7koX-aoZzpl8jo8xE5EWOLK7I0k,20314
18
16
  docent/_llm_util/providers/openai.py,sha256=4niQV9CNaJ-iiEwYG0BSFxCwcsCAWZz0JuUs4wBKu9M,25904
19
17
  docent/_llm_util/providers/openrouter.py,sha256=sT2onpeQ1gAwJLjkQbzD2RodJWTm013Q-siTXezca10,11958
20
- docent/_llm_util/providers/preference_types.py,sha256=z-TOxj_es1_cs5DzknZaganGyjMkmh5NgtiDYKMRI1I,3751
18
+ docent/_llm_util/providers/preference_types.py,sha256=K1IH15YyU8E8F7Jk7LgLKiCJeDNk9uASTdnZGk5hSDc,3773
21
19
  docent/_llm_util/providers/provider_registry.py,sha256=EPYGQlegYPtg4ogEusCftm_5PZP-_XVKH1qg3xjPFTU,6337
22
20
  docent/_log_util/__init__.py,sha256=3HXXrxrSm8PxwG4llotrCnSnp7GuroK1FNHsdg6f7aE,73
23
21
  docent/_log_util/logger.py,sha256=kwM0yRW1IJd6-XTorjWn48B4l8qvD2ZM6VDjY5eskQI,4422
24
- docent/data_models/__init__.py,sha256=bE_Wy4Ql-9-0ZPcolMCPHhYvaE_Ug6h-jV7wOJ_DAi0,399
22
+ docent/data_models/__init__.py,sha256=vEcFppE6wtKFp37KF_hUv00Ncn6fK_qUbVGZE5ltz-o,383
25
23
  docent/data_models/_tiktoken_util.py,sha256=hC0EDDWItv5-0cONBnHWgZtQOflDU7ZNEhXPFo4DvPc,3057
26
24
  docent/data_models/agent_run.py,sha256=7_37I9aS9rhDTkAvMPwoJGssQldvvKte8qVb93EnAiY,19329
27
25
  docent/data_models/citation.py,sha256=2_M1-_olVOJtjCGGFx1GIwGYWl0ILHxRsW8-EFDS9j0,7844
28
- docent/data_models/judge.py,sha256=zPbTqztn-yWu6tgD3R5JTyGnNiDhY6cWQ-gz3e_eM5k,340
26
+ docent/data_models/judge.py,sha256=BOKAfZmNoLPclJNz_b7NvH8G8FzfR7kc6OpIv91GMDQ,336
29
27
  docent/data_models/metadata_util.py,sha256=E-EClAP5vVm9xbfTlPSz0tUyCalOfN9Jujd6JGoRnBg,487
30
28
  docent/data_models/regex.py,sha256=0ciIerkrNwb91bY5mTcyO5nDWH67xx2tZYObV52fmBo,1684
31
29
  docent/data_models/remove_invalid_citation_ranges.py,sha256=3RSMsOzFO2cSjkxI549TAo12qdvD-AGHd05Jxu0amvs,6282
@@ -36,14 +34,17 @@ docent/data_models/chat/__init__.py,sha256=ws77P3raDiOv6XesAMycUwu-uT75D5f9aNgjF
36
34
  docent/data_models/chat/content.py,sha256=Co-jO8frQa_DSP11wJuhPX0s-GpJk8yqtKqPeiAIZ_U,1672
37
35
  docent/data_models/chat/message.py,sha256=_72xeTdgv8ogQd4WLl1P3yXfIDkIEQrHlWgdvObeQxY,4291
38
36
  docent/data_models/chat/tool.py,sha256=MMglNHzkwHqUoK0xDWqs2FtelPsgHqwVpGpI1F8KZyw,3049
39
- docent/judges/__init__.py,sha256=Sob1uxJRgmr2S2sz4J6skHP8iqcVoiUq7Jlh8S5Sj9Y,462
40
- docent/judges/impl.py,sha256=qiItNKWPvB0KlB5b0rQoIfT-7m1xzyI028WtgvgvRhU,8864
41
- docent/judges/types.py,sha256=NlLv42iLDORbPAHppCz-YWZ6ksR4QYDWAweGw75izJ0,8439
37
+ docent/judges/__init__.py,sha256=aTsQ2mIQnZt8HEMau02KrEA4m5w-lGC3U9Dirkj3to4,500
38
+ docent/judges/analysis.py,sha256=bn7XIT7mj77LjFHMh1PqjALknq3nN-fRXqgg8cfJF8o,2486
39
+ docent/judges/impl.py,sha256=JOq2tEBTqNbWIG2gRuI8OmEW2dHdx7nfnJnHeGwdyOk,24035
40
+ docent/judges/runner.py,sha256=ANUVrrfgT61_zTV9pErLXoerMiD6x_RIJQGpwxWIIMg,1928
41
+ docent/judges/stats.py,sha256=zejJle583xHG2G3gcYHiWcHoIOkeKwpSkl8lfeKQhFs,7805
42
+ docent/judges/types.py,sha256=goNaKs3PF5wMHWLnFerYCEjUjPR0IVI9cVrxCK2TfjI,11539
42
43
  docent/judges/util/forgiving_json.py,sha256=zSh0LF3UVHdSjuMNvEiqUmSxpxPaqK1rSLiI6KCNihg,3549
43
- docent/judges/util/meta_schema.json,sha256=g3MUa_6e38I3GqZryy8b1w_Y9Krx2xSiWIuaG8Zpszc,2055
44
+ docent/judges/util/meta_schema.json,sha256=7VHCGQUM0PbMIiwWDar15Sqaodi2y2Ty7yIW3PDL268,2105
44
45
  docent/judges/util/meta_schema.py,sha256=6IrIRHERJ6tkRcUtUShJ84I68yUJgkwfFeBjgt42qEA,930
45
- docent/judges/util/parse_output.py,sha256=qvqt7TEnrAqvzYHqip48boMQSUcoGa-1PA1gIGn-w4s,3381
46
- docent/judges/util/voting.py,sha256=cAty9b4w7M1OWeW-j8t6vxpZn7VXyE3aBL9Ex2ERKcU,3071
46
+ docent/judges/util/parse_output.py,sha256=XMdU-hd3iozlGa72bpTrOADRpRze2sg68RSkQAw-yD4,2975
47
+ docent/judges/util/voting.py,sha256=IRYXXYLsdc8MsgdzBBNVI1nnsx4kxnnWLPeQyLrDhwc,5152
47
48
  docent/loaders/load_inspect.py,sha256=VLrtpvcVZ44n2DIPMwUivXqbvOWjaooGw6moY8UQ0VE,6789
48
49
  docent/samples/__init__.py,sha256=roDFnU6515l9Q8v17Es_SpWyY9jbm5d6X9lV01V0MZo,143
49
50
  docent/samples/load.py,sha256=ZGE07r83GBNO4A0QBh5aQ18WAu3mTWA1vxUoHd90nrM,207
@@ -51,8 +52,8 @@ docent/samples/log.eval,sha256=orrW__9WBfANq7NwKsPSq9oTsQRcG6KohG5tMr_X_XY,39770
51
52
  docent/samples/tb_airline.json,sha256=eR2jFFRtOw06xqbEglh6-dPewjifOk-cuxJq67Dtu5I,47028
52
53
  docent/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
54
  docent/sdk/agent_run_writer.py,sha256=0AWdxejoqZyuj9JSA39WlEwGcMSYTWNqnzIuluySY-M,11043
54
- docent/sdk/client.py,sha256=K1NVkj_CFj0q-2mSFvWfh8NTqXqosED--dv5aLD7yOE,18239
55
- docent_python-0.1.21a0.dist-info/METADATA,sha256=H0iEQ39cv90MW0lRZ94XhER6C_znvNU3DBPc6M72i9g,1277
56
- docent_python-0.1.21a0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
57
- docent_python-0.1.21a0.dist-info/licenses/LICENSE.md,sha256=QIMv2UiT6MppRasso4ymaA0w7ltkqmlL0HCt8CLD7Rc,580
58
- docent_python-0.1.21a0.dist-info/RECORD,,
55
+ docent/sdk/client.py,sha256=ZRJDz1RKcV7_gA0zHbr3bIdvUqFrSU99Zes__2kVnKo,19184
56
+ docent_python-0.1.23a0.dist-info/METADATA,sha256=sxm-OrFzMKg2U2G_EmrHlGeceqhpY7cGzKa0H989FOo,1351
57
+ docent_python-0.1.23a0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
58
+ docent_python-0.1.23a0.dist-info/licenses/LICENSE.md,sha256=QIMv2UiT6MppRasso4ymaA0w7ltkqmlL0HCt8CLD7Rc,580
59
+ docent_python-0.1.23a0.dist-info/RECORD,,
@@ -1,79 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from abc import ABC, abstractmethod
4
- from typing import Literal
5
-
6
- from docent._llm_util.data_models.llm_output import (
7
- AsyncLLMOutputStreamingCallback,
8
- LLMOutput,
9
- )
10
- from docent._llm_util.prod_llms import MessagesInput, get_llm_completions_async
11
- from docent._llm_util.providers.preference_types import ModelOption
12
- from docent.data_models.chat import ToolInfo
13
-
14
- __all__ = ["BaseLLMService"]
15
-
16
-
17
- class BaseLLMService(ABC):
18
- """Common interface for LLM services."""
19
-
20
- @abstractmethod
21
- async def get_completions(
22
- self,
23
- *,
24
- inputs: list[MessagesInput],
25
- model_options: list[ModelOption],
26
- tools: list[ToolInfo] | None = None,
27
- tool_choice: Literal["auto", "required"] | None = None,
28
- max_new_tokens: int = 1024,
29
- temperature: float = 1.0,
30
- logprobs: bool = False,
31
- top_logprobs: int | None = None,
32
- max_concurrency: int = 100,
33
- timeout: float = 120.0,
34
- streaming_callback: AsyncLLMOutputStreamingCallback | None = None,
35
- validation_callback: AsyncLLMOutputStreamingCallback | None = None,
36
- completion_callback: AsyncLLMOutputStreamingCallback | None = None,
37
- use_cache: bool = False,
38
- ) -> list[LLMOutput]:
39
- """Request completions from a configured LLM provider."""
40
-
41
-
42
- class SimpleLLMService(BaseLLMService):
43
- """Lightweight LLM service that simply forwards completion requests.
44
- Does not support cost tracking, usage limits, global scheduling or rate limiting."""
45
-
46
- async def get_completions(
47
- self,
48
- *,
49
- inputs: list[MessagesInput],
50
- model_options: list[ModelOption],
51
- tools: list[ToolInfo] | None = None,
52
- tool_choice: Literal["auto", "required"] | None = None,
53
- max_new_tokens: int = 1024,
54
- temperature: float = 1.0,
55
- logprobs: bool = False,
56
- top_logprobs: int | None = None,
57
- max_concurrency: int = 100,
58
- timeout: float = 120.0,
59
- streaming_callback: AsyncLLMOutputStreamingCallback | None = None,
60
- validation_callback: AsyncLLMOutputStreamingCallback | None = None,
61
- completion_callback: AsyncLLMOutputStreamingCallback | None = None,
62
- use_cache: bool = False,
63
- ) -> list[LLMOutput]:
64
- return await get_llm_completions_async(
65
- inputs=inputs,
66
- model_options=model_options,
67
- tools=tools,
68
- tool_choice=tool_choice,
69
- max_new_tokens=max_new_tokens,
70
- temperature=temperature,
71
- logprobs=logprobs,
72
- top_logprobs=top_logprobs,
73
- max_concurrency=max_concurrency,
74
- timeout=timeout,
75
- streaming_callback=streaming_callback,
76
- validation_callback=validation_callback,
77
- completion_callback=completion_callback,
78
- use_cache=use_cache,
79
- )