docent-python 0.1.17a0__py3-none-any.whl → 0.1.19a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of docent-python might be problematic. Click here for more details.
- docent/data_models/__init__.py +2 -0
- docent/data_models/agent_run.py +5 -5
- docent/data_models/chat/__init__.py +6 -1
- docent/data_models/citation.py +103 -22
- docent/data_models/judge.py +16 -0
- docent/data_models/metadata_util.py +16 -0
- docent/data_models/remove_invalid_citation_ranges.py +23 -10
- docent/data_models/transcript.py +18 -16
- docent/sdk/agent_run_writer.py +62 -19
- docent/sdk/client.py +104 -20
- docent/trace.py +54 -49
- {docent_python-0.1.17a0.dist-info → docent_python-0.1.19a0.dist-info}/METADATA +1 -1
- {docent_python-0.1.17a0.dist-info → docent_python-0.1.19a0.dist-info}/RECORD +15 -15
- docent/data_models/metadata.py +0 -229
- docent/data_models/yaml_util.py +0 -12
- {docent_python-0.1.17a0.dist-info → docent_python-0.1.19a0.dist-info}/WHEEL +0 -0
- {docent_python-0.1.17a0.dist-info → docent_python-0.1.19a0.dist-info}/licenses/LICENSE.md +0 -0
docent/sdk/client.py
CHANGED
|
@@ -8,6 +8,7 @@ from tqdm import tqdm
|
|
|
8
8
|
|
|
9
9
|
from docent._log_util.logger import get_logger
|
|
10
10
|
from docent.data_models.agent_run import AgentRun
|
|
11
|
+
from docent.data_models.judge import JudgeRunLabel
|
|
11
12
|
from docent.loaders import load_inspect
|
|
12
13
|
|
|
13
14
|
logger = get_logger(__name__)
|
|
@@ -48,13 +49,18 @@ class Docent:
|
|
|
48
49
|
|
|
49
50
|
self._login(api_key)
|
|
50
51
|
|
|
52
|
+
def _handle_response_errors(self, response: requests.Response):
|
|
53
|
+
"""Handle API response and raise informative errors.
|
|
54
|
+
TODO: make this more informative."""
|
|
55
|
+
response.raise_for_status()
|
|
56
|
+
|
|
51
57
|
def _login(self, api_key: str):
|
|
52
58
|
"""Login with email/password to establish session."""
|
|
53
59
|
self._session.headers.update({"Authorization": f"Bearer {api_key}"})
|
|
54
60
|
|
|
55
61
|
url = f"{self._server_url}/api-keys/test"
|
|
56
62
|
response = self._session.get(url)
|
|
57
|
-
|
|
63
|
+
self._handle_response_errors(response)
|
|
58
64
|
|
|
59
65
|
logger.info("Logged in with API key")
|
|
60
66
|
return
|
|
@@ -90,7 +96,7 @@ class Docent:
|
|
|
90
96
|
}
|
|
91
97
|
|
|
92
98
|
response = self._session.post(url, json=payload)
|
|
93
|
-
|
|
99
|
+
self._handle_response_errors(response)
|
|
94
100
|
|
|
95
101
|
response_data = response.json()
|
|
96
102
|
collection_id = response_data.get("collection_id")
|
|
@@ -134,13 +140,13 @@ class Docent:
|
|
|
134
140
|
payload = {"agent_runs": [ar.model_dump(mode="json") for ar in batch]}
|
|
135
141
|
|
|
136
142
|
response = self._session.post(url, json=payload)
|
|
137
|
-
|
|
143
|
+
self._handle_response_errors(response)
|
|
138
144
|
|
|
139
145
|
pbar.update(len(batch))
|
|
140
146
|
|
|
141
147
|
url = f"{self._server_url}/{collection_id}/compute_embeddings"
|
|
142
148
|
response = self._session.post(url)
|
|
143
|
-
|
|
149
|
+
self._handle_response_errors(response)
|
|
144
150
|
|
|
145
151
|
logger.info(f"Successfully added {total_runs} agent runs to Collection '{collection_id}'")
|
|
146
152
|
return {"status": "success", "total_runs_added": total_runs}
|
|
@@ -156,7 +162,7 @@ class Docent:
|
|
|
156
162
|
"""
|
|
157
163
|
url = f"{self._server_url}/collections"
|
|
158
164
|
response = self._session.get(url)
|
|
159
|
-
|
|
165
|
+
self._handle_response_errors(response)
|
|
160
166
|
return response.json()
|
|
161
167
|
|
|
162
168
|
def list_rubrics(self, collection_id: str) -> list[dict[str, Any]]:
|
|
@@ -173,7 +179,7 @@ class Docent:
|
|
|
173
179
|
"""
|
|
174
180
|
url = f"{self._server_url}/rubric/{collection_id}/rubrics"
|
|
175
181
|
response = self._session.get(url)
|
|
176
|
-
|
|
182
|
+
self._handle_response_errors(response)
|
|
177
183
|
return response.json()
|
|
178
184
|
|
|
179
185
|
def get_rubric_run_state(self, collection_id: str, rubric_id: str) -> dict[str, Any]:
|
|
@@ -191,7 +197,7 @@ class Docent:
|
|
|
191
197
|
"""
|
|
192
198
|
url = f"{self._server_url}/rubric/{collection_id}/{rubric_id}/rubric_run_state"
|
|
193
199
|
response = self._session.get(url)
|
|
194
|
-
|
|
200
|
+
self._handle_response_errors(response)
|
|
195
201
|
return response.json()
|
|
196
202
|
|
|
197
203
|
def get_clustering_state(self, collection_id: str, rubric_id: str) -> dict[str, Any]:
|
|
@@ -209,7 +215,7 @@ class Docent:
|
|
|
209
215
|
"""
|
|
210
216
|
url = f"{self._server_url}/rubric/{collection_id}/{rubric_id}/clustering_job"
|
|
211
217
|
response = self._session.get(url)
|
|
212
|
-
|
|
218
|
+
self._handle_response_errors(response)
|
|
213
219
|
return response.json()
|
|
214
220
|
|
|
215
221
|
def get_cluster_centroids(self, collection_id: str, rubric_id: str) -> list[dict[str, Any]]:
|
|
@@ -244,6 +250,90 @@ class Docent:
|
|
|
244
250
|
clustering_state = self.get_clustering_state(collection_id, rubric_id)
|
|
245
251
|
return clustering_state.get("assignments", {})
|
|
246
252
|
|
|
253
|
+
def add_label(
|
|
254
|
+
self,
|
|
255
|
+
collection_id: str,
|
|
256
|
+
rubric_id: str,
|
|
257
|
+
label: JudgeRunLabel,
|
|
258
|
+
) -> dict[str, Any]:
|
|
259
|
+
"""Attach a manual label to an agent run for a rubric.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
collection_id: ID of the Collection that owns the rubric.
|
|
263
|
+
rubric_id: ID of the rubric the label applies to.
|
|
264
|
+
label: A `JudgeRunLabel` that must comply with the rubric's output schema.
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
dict: API response containing a status message.
|
|
268
|
+
|
|
269
|
+
Raises:
|
|
270
|
+
ValueError: If the label does not target the rubric specified in the path.
|
|
271
|
+
requests.exceptions.HTTPError: If the API request fails or validation errors occur.
|
|
272
|
+
"""
|
|
273
|
+
if label.rubric_id != rubric_id:
|
|
274
|
+
raise ValueError("Label rubric_id must match the rubric_id argument")
|
|
275
|
+
|
|
276
|
+
url = f"{self._server_url}/rubric/{collection_id}/rubric/{rubric_id}/label"
|
|
277
|
+
payload = {"label": label.model_dump(mode="json")}
|
|
278
|
+
response = self._session.post(url, json=payload)
|
|
279
|
+
self._handle_response_errors(response)
|
|
280
|
+
return response.json()
|
|
281
|
+
|
|
282
|
+
def add_labels(
|
|
283
|
+
self,
|
|
284
|
+
collection_id: str,
|
|
285
|
+
rubric_id: str,
|
|
286
|
+
labels: list[JudgeRunLabel],
|
|
287
|
+
) -> dict[str, Any]:
|
|
288
|
+
"""Attach multiple manual labels to a rubric.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
collection_id: ID of the Collection that owns the rubric.
|
|
292
|
+
rubric_id: ID of the rubric the labels apply to.
|
|
293
|
+
labels: List of `JudgeRunLabel` objects.
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
dict: API response containing status information.
|
|
297
|
+
|
|
298
|
+
Raises:
|
|
299
|
+
ValueError: If no labels are provided.
|
|
300
|
+
ValueError: If any label targets a different rubric.
|
|
301
|
+
requests.exceptions.HTTPError: If the API request fails.
|
|
302
|
+
"""
|
|
303
|
+
if not labels:
|
|
304
|
+
raise ValueError("labels must contain at least one entry")
|
|
305
|
+
|
|
306
|
+
rubric_ids = {label.rubric_id for label in labels}
|
|
307
|
+
if rubric_ids != {rubric_id}:
|
|
308
|
+
raise ValueError(
|
|
309
|
+
"All labels must specify the same rubric_id that is provided to add_labels"
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
payload = {"labels": [l.model_dump(mode="json") for l in labels]}
|
|
313
|
+
|
|
314
|
+
url = f"{self._server_url}/rubric/{collection_id}/rubric/{rubric_id}/labels"
|
|
315
|
+
response = self._session.post(url, json=payload)
|
|
316
|
+
self._handle_response_errors(response)
|
|
317
|
+
return response.json()
|
|
318
|
+
|
|
319
|
+
def get_labels(self, collection_id: str, rubric_id: str) -> list[dict[str, Any]]:
|
|
320
|
+
"""Retrieve all manual labels for a rubric.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
collection_id: ID of the Collection that owns the rubric.
|
|
324
|
+
rubric_id: ID of the rubric to fetch labels for.
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
list: List of label dictionaries. Each includes agent_run_id and label content.
|
|
328
|
+
|
|
329
|
+
Raises:
|
|
330
|
+
requests.exceptions.HTTPError: If the API request fails.
|
|
331
|
+
"""
|
|
332
|
+
url = f"{self._server_url}/rubric/{collection_id}/rubric/{rubric_id}/labels"
|
|
333
|
+
response = self._session.get(url)
|
|
334
|
+
self._handle_response_errors(response)
|
|
335
|
+
return response.json()
|
|
336
|
+
|
|
247
337
|
def get_agent_run(self, collection_id: str, agent_run_id: str) -> AgentRun | None:
|
|
248
338
|
"""Get a specific agent run by its ID.
|
|
249
339
|
|
|
@@ -259,7 +349,7 @@ class Docent:
|
|
|
259
349
|
"""
|
|
260
350
|
url = f"{self._server_url}/{collection_id}/agent_run"
|
|
261
351
|
response = self._session.get(url, params={"agent_run_id": agent_run_id})
|
|
262
|
-
|
|
352
|
+
self._handle_response_errors(response)
|
|
263
353
|
if response.json() is None:
|
|
264
354
|
return None
|
|
265
355
|
else:
|
|
@@ -281,7 +371,7 @@ class Docent:
|
|
|
281
371
|
"""
|
|
282
372
|
url = f"{self._server_url}/{collection_id}/make_public"
|
|
283
373
|
response = self._session.post(url)
|
|
284
|
-
|
|
374
|
+
self._handle_response_errors(response)
|
|
285
375
|
|
|
286
376
|
logger.info(f"Successfully made Collection '{collection_id}' public")
|
|
287
377
|
return response.json()
|
|
@@ -303,13 +393,7 @@ class Docent:
|
|
|
303
393
|
payload = {"email": email}
|
|
304
394
|
response = self._session.post(url, json=payload)
|
|
305
395
|
|
|
306
|
-
|
|
307
|
-
response.raise_for_status()
|
|
308
|
-
except requests.exceptions.HTTPError:
|
|
309
|
-
if response.status_code == 404:
|
|
310
|
-
raise ValueError(f"The user you are trying to share with ({email}) does not exist.")
|
|
311
|
-
else:
|
|
312
|
-
raise # Re-raise the original exception
|
|
396
|
+
self._handle_response_errors(response)
|
|
313
397
|
|
|
314
398
|
logger.info(f"Successfully shared Collection '{collection_id}' with {email}")
|
|
315
399
|
return response.json()
|
|
@@ -328,7 +412,7 @@ class Docent:
|
|
|
328
412
|
"""
|
|
329
413
|
url = f"{self._server_url}/{collection_id}/agent_run_ids"
|
|
330
414
|
response = self._session.get(url)
|
|
331
|
-
|
|
415
|
+
self._handle_response_errors(response)
|
|
332
416
|
return response.json()
|
|
333
417
|
|
|
334
418
|
def recursively_ingest_inspect_logs(self, collection_id: str, fpath: str):
|
|
@@ -393,7 +477,7 @@ class Docent:
|
|
|
393
477
|
payload = {"agent_runs": [ar.model_dump(mode="json") for ar in batch_list]}
|
|
394
478
|
|
|
395
479
|
response = self._session.post(url, json=payload)
|
|
396
|
-
|
|
480
|
+
self._handle_response_errors(response)
|
|
397
481
|
|
|
398
482
|
runs_from_file += len(batch_list)
|
|
399
483
|
file_pbar.update(len(batch_list))
|
|
@@ -406,7 +490,7 @@ class Docent:
|
|
|
406
490
|
logger.info("Computing embeddings for added runs...")
|
|
407
491
|
url = f"{self._server_url}/{collection_id}/compute_embeddings"
|
|
408
492
|
response = self._session.post(url)
|
|
409
|
-
|
|
493
|
+
self._handle_response_errors(response)
|
|
410
494
|
|
|
411
495
|
logger.info(
|
|
412
496
|
f"Successfully ingested {total_runs_added} total agent runs from {len(eval_files)} files"
|
docent/trace.py
CHANGED
|
@@ -21,7 +21,7 @@ from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExport
|
|
|
21
21
|
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as HTTPExporter
|
|
22
22
|
from opentelemetry.instrumentation.threading import ThreadingInstrumentor
|
|
23
23
|
from opentelemetry.sdk.resources import Resource
|
|
24
|
-
from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor, TracerProvider
|
|
24
|
+
from opentelemetry.sdk.trace import ReadableSpan, SpanLimits, SpanProcessor, TracerProvider
|
|
25
25
|
from opentelemetry.sdk.trace.export import (
|
|
26
26
|
BatchSpanProcessor,
|
|
27
27
|
ConsoleSpanExporter,
|
|
@@ -29,20 +29,13 @@ from opentelemetry.sdk.trace.export import (
|
|
|
29
29
|
)
|
|
30
30
|
from opentelemetry.trace import Span
|
|
31
31
|
|
|
32
|
-
# Configure logging
|
|
33
32
|
logger = logging.getLogger(__name__)
|
|
34
|
-
logger.setLevel(logging.ERROR)
|
|
35
33
|
|
|
36
34
|
# Default configuration
|
|
37
35
|
DEFAULT_ENDPOINT = "https://api.docent.transluce.org/rest/telemetry"
|
|
38
36
|
DEFAULT_COLLECTION_NAME = "default-collection-name"
|
|
39
37
|
|
|
40
38
|
|
|
41
|
-
def _is_tracing_disabled() -> bool:
|
|
42
|
-
"""Check if tracing is disabled via environment variable."""
|
|
43
|
-
return os.environ.get("DOCENT_DISABLE_TRACING", "").lower() == "true"
|
|
44
|
-
|
|
45
|
-
|
|
46
39
|
class Instruments(Enum):
|
|
47
40
|
"""Enumeration of available instrument types."""
|
|
48
41
|
|
|
@@ -52,16 +45,10 @@ class Instruments(Enum):
|
|
|
52
45
|
LANGCHAIN = "langchain"
|
|
53
46
|
|
|
54
47
|
|
|
55
|
-
def _is_notebook() -> bool:
|
|
56
|
-
"""Check if we're running in a Jupyter notebook."""
|
|
57
|
-
try:
|
|
58
|
-
return "ipykernel" in sys.modules
|
|
59
|
-
except Exception:
|
|
60
|
-
return False
|
|
61
|
-
|
|
62
|
-
|
|
63
48
|
class DocentTracer:
|
|
64
|
-
"""
|
|
49
|
+
"""
|
|
50
|
+
Manages Docent tracing setup and provides tracing utilities.
|
|
51
|
+
"""
|
|
65
52
|
|
|
66
53
|
def __init__(
|
|
67
54
|
self,
|
|
@@ -77,22 +64,6 @@ class DocentTracer:
|
|
|
77
64
|
instruments: Optional[Set[Instruments]] = None,
|
|
78
65
|
block_instruments: Optional[Set[Instruments]] = None,
|
|
79
66
|
):
|
|
80
|
-
"""
|
|
81
|
-
Initialize Docent tracing manager.
|
|
82
|
-
|
|
83
|
-
Args:
|
|
84
|
-
collection_name: Name of the collection for resource attributes
|
|
85
|
-
collection_id: Optional collection ID (auto-generated if not provided)
|
|
86
|
-
agent_run_id: Optional agent_run_id to use for code outside of an agent run context (auto-generated if not provided)
|
|
87
|
-
endpoint: OTLP endpoint URL(s) - can be a single string or list of strings for multiple endpoints
|
|
88
|
-
headers: Optional headers for authentication
|
|
89
|
-
api_key: Optional API key for bearer token authentication (takes precedence over env var)
|
|
90
|
-
enable_console_export: Whether to export to console
|
|
91
|
-
enable_otlp_export: Whether to export to OTLP endpoint
|
|
92
|
-
disable_batch: Whether to disable batch processing (use SimpleSpanProcessor)
|
|
93
|
-
instruments: Set of instruments to enable (None = all instruments)
|
|
94
|
-
block_instruments: Set of instruments to explicitly disable
|
|
95
|
-
"""
|
|
96
67
|
self._initialized: bool = False
|
|
97
68
|
# Check if tracing is disabled via environment variable
|
|
98
69
|
if _is_tracing_disabled():
|
|
@@ -163,8 +134,12 @@ class DocentTracer:
|
|
|
163
134
|
"""
|
|
164
135
|
Get the current agent run ID from context.
|
|
165
136
|
|
|
137
|
+
Retrieves the agent run ID that was set in the current execution context.
|
|
138
|
+
If no agent run context is active, returns the default agent run ID.
|
|
139
|
+
|
|
166
140
|
Returns:
|
|
167
|
-
The current agent run ID if available,
|
|
141
|
+
The current agent run ID if available, or the default agent run ID
|
|
142
|
+
if no context is active.
|
|
168
143
|
"""
|
|
169
144
|
try:
|
|
170
145
|
return self._agent_run_id_var.get()
|
|
@@ -249,12 +224,23 @@ class DocentTracer:
|
|
|
249
224
|
return
|
|
250
225
|
|
|
251
226
|
try:
|
|
227
|
+
|
|
228
|
+
# Check for OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT environment variable
|
|
229
|
+
default_attribute_limit = 1024
|
|
230
|
+
env_value = os.environ.get("OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", "0")
|
|
231
|
+
env_limit = int(env_value) if env_value.isdigit() else 0
|
|
232
|
+
attribute_limit = max(env_limit, default_attribute_limit)
|
|
233
|
+
|
|
234
|
+
span_limits = SpanLimits(
|
|
235
|
+
max_attributes=attribute_limit,
|
|
236
|
+
)
|
|
237
|
+
|
|
252
238
|
# Create our own isolated tracer provider
|
|
253
239
|
self._tracer_provider = TracerProvider(
|
|
254
|
-
resource=Resource.create({"service.name": self.collection_name})
|
|
240
|
+
resource=Resource.create({"service.name": self.collection_name}),
|
|
241
|
+
span_limits=span_limits,
|
|
255
242
|
)
|
|
256
243
|
|
|
257
|
-
# Add custom span processor for agent_run_id and transcript_id
|
|
258
244
|
class ContextSpanProcessor(SpanProcessor):
|
|
259
245
|
def __init__(self, manager: "DocentTracer"):
|
|
260
246
|
self.manager: "DocentTracer" = manager
|
|
@@ -312,11 +298,7 @@ class DocentTracer:
|
|
|
312
298
|
)
|
|
313
299
|
|
|
314
300
|
def on_end(self, span: ReadableSpan) -> None:
|
|
315
|
-
|
|
316
|
-
span_attrs = span.attributes or {}
|
|
317
|
-
logger.debug(
|
|
318
|
-
f"Completed span: name='{span.name}', collection_id={span_attrs.get('collection_id')}, agent_run_id={span_attrs.get('agent_run_id')}, transcript_id={span_attrs.get('transcript_id')}, duration_ns={span.end_time - span.start_time if span.end_time and span.start_time else 'unknown'}"
|
|
319
|
-
)
|
|
301
|
+
pass
|
|
320
302
|
|
|
321
303
|
def shutdown(self) -> None:
|
|
322
304
|
pass
|
|
@@ -422,7 +404,17 @@ class DocentTracer:
|
|
|
422
404
|
raise
|
|
423
405
|
|
|
424
406
|
def cleanup(self):
|
|
425
|
-
"""
|
|
407
|
+
"""
|
|
408
|
+
Clean up Docent tracing resources.
|
|
409
|
+
|
|
410
|
+
Flushes all pending spans to exporters and shuts down the tracer provider.
|
|
411
|
+
This method is automatically called during application shutdown via atexit
|
|
412
|
+
handlers, but can also be called manually for explicit cleanup.
|
|
413
|
+
|
|
414
|
+
The cleanup process:
|
|
415
|
+
1. Flushes all span processors to ensure data is exported
|
|
416
|
+
2. Shuts down the tracer provider and releases resources
|
|
417
|
+
"""
|
|
426
418
|
if self._disabled:
|
|
427
419
|
return
|
|
428
420
|
|
|
@@ -473,7 +465,7 @@ class DocentTracer:
|
|
|
473
465
|
if disabled and self._initialized:
|
|
474
466
|
self.cleanup()
|
|
475
467
|
|
|
476
|
-
def
|
|
468
|
+
def is_initialized(self) -> bool:
|
|
477
469
|
"""Verify if the manager is properly initialized."""
|
|
478
470
|
return self._initialized
|
|
479
471
|
|
|
@@ -1063,8 +1055,9 @@ def initialize_tracing(
|
|
|
1063
1055
|
collection_id: Optional collection ID (auto-generated if not provided)
|
|
1064
1056
|
endpoint: OTLP endpoint URL(s) for span export - can be a single string or list of strings for multiple endpoints
|
|
1065
1057
|
headers: Optional headers for authentication
|
|
1066
|
-
api_key: Optional API key for bearer token authentication (takes precedence
|
|
1067
|
-
|
|
1058
|
+
api_key: Optional API key for bearer token authentication (takes precedence
|
|
1059
|
+
over DOCENT_API_KEY environment variable)
|
|
1060
|
+
enable_console_export: Whether to export spans to console for debugging
|
|
1068
1061
|
enable_otlp_export: Whether to export spans to OTLP endpoint
|
|
1069
1062
|
disable_batch: Whether to disable batch processing (use SimpleSpanProcessor)
|
|
1070
1063
|
instruments: Set of instruments to enable (None = all instruments).
|
|
@@ -1074,7 +1067,6 @@ def initialize_tracing(
|
|
|
1074
1067
|
The initialized Docent tracer
|
|
1075
1068
|
|
|
1076
1069
|
Example:
|
|
1077
|
-
# Basic setup
|
|
1078
1070
|
initialize_tracing("my-collection")
|
|
1079
1071
|
"""
|
|
1080
1072
|
|
|
@@ -1137,17 +1129,17 @@ def close_tracing() -> None:
|
|
|
1137
1129
|
def flush_tracing() -> None:
|
|
1138
1130
|
"""Force flush all spans to exporters."""
|
|
1139
1131
|
if _global_tracer:
|
|
1140
|
-
logger.debug("Flushing
|
|
1132
|
+
logger.debug("Flushing Docent tracer")
|
|
1141
1133
|
_global_tracer.flush()
|
|
1142
1134
|
else:
|
|
1143
1135
|
logger.debug("No global tracer available to flush")
|
|
1144
1136
|
|
|
1145
1137
|
|
|
1146
|
-
def
|
|
1138
|
+
def is_initialized() -> bool:
|
|
1147
1139
|
"""Verify if the global Docent tracer is properly initialized."""
|
|
1148
1140
|
if _global_tracer is None:
|
|
1149
1141
|
return False
|
|
1150
|
-
return _global_tracer.
|
|
1142
|
+
return _global_tracer.is_initialized()
|
|
1151
1143
|
|
|
1152
1144
|
|
|
1153
1145
|
def is_disabled() -> bool:
|
|
@@ -1764,3 +1756,16 @@ def transcript_group_context(
|
|
|
1764
1756
|
return TranscriptGroupContext(
|
|
1765
1757
|
name, transcript_group_id, description, metadata, parent_transcript_group_id
|
|
1766
1758
|
)
|
|
1759
|
+
|
|
1760
|
+
|
|
1761
|
+
def _is_tracing_disabled() -> bool:
|
|
1762
|
+
"""Check if tracing is disabled via environment variable."""
|
|
1763
|
+
return os.environ.get("DOCENT_DISABLE_TRACING", "").lower() == "true"
|
|
1764
|
+
|
|
1765
|
+
|
|
1766
|
+
def _is_notebook() -> bool:
|
|
1767
|
+
"""Check if we're running in a Jupyter notebook."""
|
|
1768
|
+
try:
|
|
1769
|
+
return "ipykernel" in sys.modules
|
|
1770
|
+
except Exception:
|
|
1771
|
+
return False
|
|
@@ -1,20 +1,20 @@
|
|
|
1
1
|
docent/__init__.py,sha256=fuhETwJPcesiB76Zxa64HBJxeaaTyRalIH-fs77TWsU,112
|
|
2
2
|
docent/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
docent/trace.py,sha256=
|
|
3
|
+
docent/trace.py,sha256=CEDT7StBE6DaEffWEiW-Lntx5OxNmv9vyXFGI9UlW28,66357
|
|
4
4
|
docent/trace_temp.py,sha256=Z0lAPwVzXjFvxpiU-CuvfWIslq9Q4alNkZMoQ77Xudk,40711
|
|
5
5
|
docent/_log_util/__init__.py,sha256=3HXXrxrSm8PxwG4llotrCnSnp7GuroK1FNHsdg6f7aE,73
|
|
6
6
|
docent/_log_util/logger.py,sha256=kwM0yRW1IJd6-XTorjWn48B4l8qvD2ZM6VDjY5eskQI,4422
|
|
7
|
-
docent/data_models/__init__.py,sha256=
|
|
7
|
+
docent/data_models/__init__.py,sha256=bE_Wy4Ql-9-0ZPcolMCPHhYvaE_Ug6h-jV7wOJ_DAi0,399
|
|
8
8
|
docent/data_models/_tiktoken_util.py,sha256=hC0EDDWItv5-0cONBnHWgZtQOflDU7ZNEhXPFo4DvPc,3057
|
|
9
|
-
docent/data_models/agent_run.py,sha256=
|
|
10
|
-
docent/data_models/citation.py,sha256=
|
|
11
|
-
docent/data_models/
|
|
9
|
+
docent/data_models/agent_run.py,sha256=7_37I9aS9rhDTkAvMPwoJGssQldvvKte8qVb93EnAiY,19329
|
|
10
|
+
docent/data_models/citation.py,sha256=2_M1-_olVOJtjCGGFx1GIwGYWl0ILHxRsW8-EFDS9j0,7844
|
|
11
|
+
docent/data_models/judge.py,sha256=zPbTqztn-yWu6tgD3R5JTyGnNiDhY6cWQ-gz3e_eM5k,340
|
|
12
|
+
docent/data_models/metadata_util.py,sha256=E-EClAP5vVm9xbfTlPSz0tUyCalOfN9Jujd6JGoRnBg,487
|
|
12
13
|
docent/data_models/regex.py,sha256=0ciIerkrNwb91bY5mTcyO5nDWH67xx2tZYObV52fmBo,1684
|
|
13
|
-
docent/data_models/remove_invalid_citation_ranges.py,sha256=
|
|
14
|
+
docent/data_models/remove_invalid_citation_ranges.py,sha256=3RSMsOzFO2cSjkxI549TAo12qdvD-AGHd05Jxu0amvs,6282
|
|
14
15
|
docent/data_models/shared_types.py,sha256=jjm-Dh5S6v7UKInW7SEqoziOsx6Z7Uu4e3VzgCbTWvc,225
|
|
15
|
-
docent/data_models/transcript.py,sha256=
|
|
16
|
-
docent/data_models/
|
|
17
|
-
docent/data_models/chat/__init__.py,sha256=GleyRzYqKRkwwSRm_tQJw5BudCbgu9WRSa71Fntz0L0,610
|
|
16
|
+
docent/data_models/transcript.py,sha256=7cdj2KAO_e2k3rj7OPzJzmzrkxPHIW7fbHygKTr7EZg,19940
|
|
17
|
+
docent/data_models/chat/__init__.py,sha256=ws77P3raDiOv6XesAMycUwu-uT75D5f9aNgjFeJbUH8,631
|
|
18
18
|
docent/data_models/chat/content.py,sha256=Co-jO8frQa_DSP11wJuhPX0s-GpJk8yqtKqPeiAIZ_U,1672
|
|
19
19
|
docent/data_models/chat/message.py,sha256=_72xeTdgv8ogQd4WLl1P3yXfIDkIEQrHlWgdvObeQxY,4291
|
|
20
20
|
docent/data_models/chat/tool.py,sha256=MMglNHzkwHqUoK0xDWqs2FtelPsgHqwVpGpI1F8KZyw,3049
|
|
@@ -24,9 +24,9 @@ docent/samples/load.py,sha256=ZGE07r83GBNO4A0QBh5aQ18WAu3mTWA1vxUoHd90nrM,207
|
|
|
24
24
|
docent/samples/log.eval,sha256=orrW__9WBfANq7NwKsPSq9oTsQRcG6KohG5tMr_X_XY,397708
|
|
25
25
|
docent/samples/tb_airline.json,sha256=eR2jFFRtOw06xqbEglh6-dPewjifOk-cuxJq67Dtu5I,47028
|
|
26
26
|
docent/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
|
-
docent/sdk/agent_run_writer.py,sha256=
|
|
28
|
-
docent/sdk/client.py,sha256=
|
|
29
|
-
docent_python-0.1.
|
|
30
|
-
docent_python-0.1.
|
|
31
|
-
docent_python-0.1.
|
|
32
|
-
docent_python-0.1.
|
|
27
|
+
docent/sdk/agent_run_writer.py,sha256=0AWdxejoqZyuj9JSA39WlEwGcMSYTWNqnzIuluySY-M,11043
|
|
28
|
+
docent/sdk/client.py,sha256=Y7vhb_auT4TJLy884QVD2SML7NplxqmUTaQfWpYzk-Y,18062
|
|
29
|
+
docent_python-0.1.19a0.dist-info/METADATA,sha256=52oo-zJRM9keALjZ7eTF68xyVRF9RnzeKJb3xBtOPy0,1110
|
|
30
|
+
docent_python-0.1.19a0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
31
|
+
docent_python-0.1.19a0.dist-info/licenses/LICENSE.md,sha256=QIMv2UiT6MppRasso4ymaA0w7ltkqmlL0HCt8CLD7Rc,580
|
|
32
|
+
docent_python-0.1.19a0.dist-info/RECORD,,
|