docent-python 0.1.4a0__tar.gz → 0.1.6a0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of docent-python might be problematic. Click here for more details.

Files changed (34) hide show
  1. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/.gitignore +1 -0
  2. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/PKG-INFO +1 -2
  3. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/agent_run.py +3 -0
  4. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/transcript.py +7 -0
  5. docent_python-0.1.6a0/docent/loaders/load_inspect.py +210 -0
  6. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/sdk/client.py +33 -23
  7. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/trace.py +92 -30
  8. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/pyproject.toml +1 -2
  9. docent_python-0.1.6a0/uv.lock +954 -0
  10. docent_python-0.1.4a0/docent/loaders/load_inspect.py +0 -88
  11. docent_python-0.1.4a0/docent/trace_alt.py +0 -513
  12. docent_python-0.1.4a0/uv.lock +0 -2030
  13. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/LICENSE.md +0 -0
  14. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/README.md +0 -0
  15. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/__init__.py +0 -0
  16. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/_log_util/__init__.py +0 -0
  17. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/_log_util/logger.py +0 -0
  18. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/__init__.py +0 -0
  19. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/_tiktoken_util.py +0 -0
  20. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/chat/__init__.py +0 -0
  21. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/chat/content.py +0 -0
  22. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/chat/message.py +0 -0
  23. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/chat/tool.py +0 -0
  24. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/citation.py +0 -0
  25. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/metadata.py +0 -0
  26. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/regex.py +0 -0
  27. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/data_models/shared_types.py +0 -0
  28. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/py.typed +0 -0
  29. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/samples/__init__.py +0 -0
  30. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/samples/load.py +0 -0
  31. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/samples/log.eval +0 -0
  32. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/samples/tb_airline.json +0 -0
  33. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/sdk/__init__.py +0 -0
  34. {docent_python-0.1.4a0 → docent_python-0.1.6a0}/docent/trace_temp.py +0 -0
@@ -6,6 +6,7 @@
6
6
  .DS_Store
7
7
  # *.sql (neil: disabled for ursid)
8
8
  *.gz
9
+ *.tgz
9
10
 
10
11
  *.tfstate
11
12
  *.tfstate.backup
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: docent-python
3
- Version: 0.1.4a0
3
+ Version: 0.1.6a0
4
4
  Summary: Docent SDK
5
5
  Project-URL: Homepage, https://github.com/TransluceAI/docent
6
6
  Project-URL: Issues, https://github.com/TransluceAI/docent/issues
@@ -22,4 +22,3 @@ Requires-Dist: pydantic>=2.11.7
22
22
  Requires-Dist: pyyaml>=6.0.2
23
23
  Requires-Dist: tiktoken>=0.7.0
24
24
  Requires-Dist: tqdm>=4.67.1
25
- Requires-Dist: traceloop-sdk>=0.44.1
@@ -15,6 +15,7 @@ from pydantic import (
15
15
  from docent.data_models._tiktoken_util import get_token_count, group_messages_into_ranges
16
16
  from docent.data_models.transcript import (
17
17
  Transcript,
18
+ TranscriptGroup,
18
19
  TranscriptWithoutMetadataValidator,
19
20
  fake_model_dump,
20
21
  )
@@ -36,6 +37,7 @@ class AgentRun(BaseModel):
36
37
  name: Optional human-readable name for the agent run.
37
38
  description: Optional description of the agent run.
38
39
  transcripts: Dict mapping transcript IDs to Transcript objects.
40
+ transcript_groups: Dict mapping transcript group IDs to TranscriptGroup objects.
39
41
  metadata: Additional structured metadata about the agent run as a JSON-serializable dictionary.
40
42
  """
41
43
 
@@ -44,6 +46,7 @@ class AgentRun(BaseModel):
44
46
  description: str | None = None
45
47
 
46
48
  transcripts: dict[str, Transcript]
49
+ transcript_groups: dict[str, TranscriptGroup] = Field(default_factory=dict)
47
50
  metadata: dict[str, Any] = Field(default_factory=dict)
48
51
 
49
52
  @field_serializer("metadata")
@@ -1,4 +1,5 @@
1
1
  import sys
2
+ from datetime import datetime
2
3
  from typing import Any
3
4
  from uuid import uuid4
4
5
 
@@ -73,6 +74,8 @@ class TranscriptGroup(BaseModel):
73
74
  id: Unique identifier for the transcript group, auto-generated by default.
74
75
  name: Optional human-readable name for the transcript group.
75
76
  description: Optional description of the transcript group.
77
+ collection_id: ID of the collection this transcript group belongs to.
78
+ agent_run_id: ID of the agent run this transcript group belongs to.
76
79
  parent_transcript_group_id: Optional ID of the parent transcript group.
77
80
  metadata: Additional structured metadata about the transcript group.
78
81
  """
@@ -80,7 +83,10 @@ class TranscriptGroup(BaseModel):
80
83
  id: str = Field(default_factory=lambda: str(uuid4()))
81
84
  name: str | None = None
82
85
  description: str | None = None
86
+ collection_id: str
87
+ agent_run_id: str
83
88
  parent_transcript_group_id: str | None = None
89
+ created_at: datetime | None = None
84
90
  metadata: dict[str, Any] = Field(default_factory=dict)
85
91
 
86
92
  @field_serializer("metadata")
@@ -129,6 +135,7 @@ class Transcript(BaseModel):
129
135
  name: str | None = None
130
136
  description: str | None = None
131
137
  transcript_group_id: str | None = None
138
+ created_at: datetime | None = None
132
139
 
133
140
  messages: list[ChatMessage]
134
141
  metadata: dict[str, Any] = Field(default_factory=dict)
@@ -0,0 +1,210 @@
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Any, BinaryIO, Generator, Tuple
4
+ from zipfile import ZipFile
5
+
6
+ from inspect_ai.log import EvalLog
7
+ from inspect_ai.scorer import CORRECT, INCORRECT, NOANSWER, PARTIAL, Score
8
+
9
+ from docent.data_models import AgentRun, Transcript
10
+ from docent.data_models.chat import parse_chat_message
11
+
12
+
13
+ def _normalize_inspect_score(score: Score | dict[str, Any]) -> Any:
14
+ """
15
+ Normalize an inspect score to a float. Logic mirrors inspect_ai.scorer._metric.value_to_float.
16
+
17
+ Args:
18
+ score: The inspect score to normalize.
19
+
20
+ Returns:
21
+ The normalized score as a float, or None if the score is not a valid value.
22
+ """
23
+
24
+ def _leaf_normalize(value: Any) -> Any:
25
+ if value is None:
26
+ return None
27
+ if isinstance(value, int | float | bool):
28
+ return float(value)
29
+ if value == CORRECT:
30
+ return 1.0
31
+ if value == PARTIAL:
32
+ return 0.5
33
+ if value in [INCORRECT, NOANSWER]:
34
+ return 0
35
+ value = str(value).lower()
36
+ if value in ["yes", "true"]:
37
+ return 1.0
38
+ if value in ["no", "false"]:
39
+ return 0.0
40
+ if value.replace(".", "").isnumeric():
41
+ return float(value)
42
+ return value
43
+
44
+ if isinstance(score, dict):
45
+ value = score["value"]
46
+ else:
47
+ value = score.value
48
+
49
+ if isinstance(value, int | float | bool | str):
50
+ return _leaf_normalize(value)
51
+ if isinstance(value, list):
52
+ return [_leaf_normalize(v) for v in value] # type: ignore
53
+ assert isinstance(value, dict), "Inspect score must be leaf value, list, or dict"
54
+ return {k: _leaf_normalize(v) for k, v in value.items()} # type: ignore
55
+
56
+
57
+ def load_inspect_log(log: EvalLog) -> list[AgentRun]:
58
+ if log.samples is None:
59
+ return []
60
+
61
+ # TODO(vincent): fix this
62
+ agent_runs: list[AgentRun] = []
63
+
64
+ for s in log.samples:
65
+ sample_id = s.id
66
+ epoch_id = s.epoch
67
+
68
+ if s.scores is None:
69
+ sample_scores = {}
70
+ else:
71
+ sample_scores = {k: _normalize_inspect_score(v) for k, v in s.scores.items()}
72
+
73
+ metadata = {
74
+ "task_id": log.eval.task,
75
+ "sample_id": str(sample_id),
76
+ "epoch_id": epoch_id,
77
+ "model": log.eval.model,
78
+ "additional_metadata": s.metadata,
79
+ "scores": sample_scores,
80
+ # Scores could have answers, explanations, and other metadata besides the values we extract
81
+ "scoring_metadata": s.scores,
82
+ }
83
+
84
+ agent_runs.append(
85
+ AgentRun(
86
+ transcripts={
87
+ "main": Transcript(
88
+ messages=[parse_chat_message(m.model_dump()) for m in s.messages],
89
+ metadata={},
90
+ )
91
+ },
92
+ metadata=metadata,
93
+ )
94
+ )
95
+
96
+ return agent_runs
97
+
98
+
99
+ def _read_sample_as_run(data: dict[str, Any], header_metadata: dict[str, Any] = {}) -> AgentRun:
100
+ if "scores" in data:
101
+ normalized_scores = {k: _normalize_inspect_score(v) for k, v in data["scores"].items()}
102
+ else:
103
+ normalized_scores = {}
104
+
105
+ if "metadata" in data:
106
+ sample_metadata = data["metadata"]
107
+ else:
108
+ sample_metadata = {}
109
+
110
+ run_metadata: dict[str, Any] = {
111
+ "sample_id": data.get("id"),
112
+ "epoch": data.get("epoch"),
113
+ "target": data.get("target"),
114
+ # Scores could have answers, explanations, and other metadata besides the values we extract
115
+ "scoring_metadata": data.get("scores"),
116
+ "scores": normalized_scores,
117
+ # If a key exists in header and sample, sample takes precedence
118
+ **header_metadata,
119
+ **sample_metadata,
120
+ }
121
+
122
+ run = AgentRun(
123
+ transcripts={
124
+ "main": Transcript(
125
+ messages=[parse_chat_message(m) for m in data["messages"]], metadata={}
126
+ ),
127
+ },
128
+ metadata=run_metadata,
129
+ )
130
+ return run
131
+
132
+
133
+ def _run_metadata_from_header(header: dict[str, Any]) -> dict[str, Any]:
134
+ """
135
+ Inspect logs often have a lot of metadata.
136
+ This function tries to get the most important stuff without adding clutter.
137
+ """
138
+ m: dict[str, Any] = {}
139
+ if e := header.get("eval"):
140
+ m["task"] = e["task"]
141
+ m["model"] = e["model"]
142
+ return m
143
+
144
+
145
+ def get_total_samples(file_path: Path, format: str = "json") -> int:
146
+ """Return the total number of samples in the provided file."""
147
+ with open(file_path, "rb") as f:
148
+ if format == "json":
149
+ data = json.load(f)
150
+ return len(data.get("samples", []))
151
+ elif format == "eval":
152
+ z = ZipFile(f, mode="r")
153
+ try:
154
+ return sum(
155
+ 1
156
+ for name in z.namelist()
157
+ if name.startswith("samples/") and name.endswith(".json")
158
+ )
159
+ finally:
160
+ z.close()
161
+ else:
162
+ raise ValueError(f"Format must be 'json' or 'eval': {format}")
163
+
164
+
165
+ def _runs_from_eval_file(
166
+ file: BinaryIO,
167
+ ) -> Tuple[dict[str, Any], Generator[AgentRun, None, None]]:
168
+ zip = ZipFile(file, mode="r")
169
+ header: dict[str, Any] = json.load(zip.open("header.json", "r"))
170
+ header_metadata = _run_metadata_from_header(header)
171
+
172
+ def _iter_runs() -> Generator[AgentRun, None, None]:
173
+ try:
174
+ for sample_file in zip.namelist():
175
+ if not (sample_file.startswith("samples/") and sample_file.endswith(".json")):
176
+ continue
177
+ with zip.open(sample_file, "r") as f:
178
+ data = json.load(f)
179
+ run: AgentRun = _read_sample_as_run(data, header_metadata)
180
+ yield run
181
+ finally:
182
+ zip.close()
183
+
184
+ return header_metadata, _iter_runs()
185
+
186
+
187
+ def _runs_from_json_file(
188
+ file: BinaryIO,
189
+ ) -> Tuple[dict[str, Any], Generator[AgentRun, None, None]]:
190
+ data = json.load(file)
191
+ header_metadata = _run_metadata_from_header(data)
192
+
193
+ def _iter_runs() -> Generator[AgentRun, None, None]:
194
+ for sample in data["samples"]:
195
+ run: AgentRun = _read_sample_as_run(sample, header_metadata)
196
+ yield run
197
+
198
+ return header_metadata, _iter_runs()
199
+
200
+
201
+ def runs_from_file(
202
+ file: BinaryIO, format: str = "json"
203
+ ) -> Tuple[dict[str, Any], Generator[AgentRun, None, None]]:
204
+ if format == "json":
205
+ result = _runs_from_json_file(file)
206
+ elif format == "eval":
207
+ result = _runs_from_eval_file(file)
208
+ else:
209
+ raise ValueError(f"Format must be 'json' or 'eval': {format}")
210
+ return result
@@ -197,75 +197,85 @@ class Docent:
197
197
  return response.json()
198
198
 
199
199
  def list_searches(self, collection_id: str) -> list[dict[str, Any]]:
200
- """List all searches for a given collection.
200
+ """List all rubrics for a given collection.
201
201
 
202
202
  Args:
203
203
  collection_id: ID of the Collection.
204
204
 
205
205
  Returns:
206
- list: List of dictionaries containing search query information.
206
+ list: List of dictionaries containing rubric information.
207
207
 
208
208
  Raises:
209
209
  requests.exceptions.HTTPError: If the API request fails.
210
210
  """
211
- url = f"{self._server_url}/{collection_id}/list_search_queries"
211
+ url = f"{self._server_url}/rubric/{collection_id}/rubrics"
212
212
  response = self._session.get(url)
213
213
  response.raise_for_status()
214
214
  return response.json()
215
215
 
216
- def get_search_results(self, collection_id: str, search_query: str) -> list[dict[str, Any]]:
217
- """Get search results for a given collection and search query.
218
- Pass in either search_query or query_id.
216
+ def get_search_results(
217
+ self, collection_id: str, rubric_id: str, rubric_version: int
218
+ ) -> list[dict[str, Any]]:
219
+ """Get rubric results for a given collection, rubric and version.
219
220
 
220
221
  Args:
221
222
  collection_id: ID of the Collection.
222
- search_query: The search query to get results for.
223
+ rubric_id: The ID of the rubric to get results for.
224
+ rubric_version: The version of the rubric to get results for.
223
225
 
224
226
  Returns:
225
- list: List of dictionaries containing search result information.
227
+ list: List of dictionaries containing rubric result information.
226
228
 
227
229
  Raises:
228
230
  requests.exceptions.HTTPError: If the API request fails.
229
231
  """
230
- url = f"{self._server_url}/{collection_id}/get_search_results"
231
- response = self._session.post(url, json={"search_query": search_query})
232
+ url = f"{self._server_url}/rubric/{collection_id}/{rubric_id}/results"
233
+ response = self._session.get(url, params={"rubric_version": rubric_version})
232
234
  response.raise_for_status()
233
235
  return response.json()
234
236
 
235
- def list_search_clusters(self, collection_id: str, search_query: str) -> list[dict[str, Any]]:
236
- """List all search clusters for a given collection.
237
- Pass in either search_query or query_id.
237
+ def list_search_clusters(
238
+ self, collection_id: str, rubric_id: str, rubric_version: int | None = None
239
+ ) -> list[dict[str, Any]]:
240
+ """List all centroids for a given collection and rubric.
238
241
 
239
242
  Args:
240
243
  collection_id: ID of the Collection.
241
- search_query: The search query to get clusters for.
244
+ rubric_id: The ID of the rubric to get centroids for.
245
+ rubric_version: Optional version of the rubric. If not provided, uses latest.
242
246
 
243
247
  Returns:
244
- list: List of dictionaries containing search cluster information.
248
+ list: List of dictionaries containing centroid information.
245
249
 
246
250
  Raises:
247
251
  requests.exceptions.HTTPError: If the API request fails.
248
252
  """
249
- url = f"{self._server_url}/{collection_id}/list_search_clusters"
250
- response = self._session.post(url, json={"search_query": search_query})
253
+ url = f"{self._server_url}/rubric/{collection_id}/{rubric_id}/centroids"
254
+ params: dict[str, int] = {}
255
+ if rubric_version is not None:
256
+ params["rubric_version"] = rubric_version
257
+ response = self._session.get(url, params=params)
251
258
  response.raise_for_status()
252
259
  return response.json()
253
260
 
254
- def get_cluster_matches(self, collection_id: str, centroid: str) -> list[dict[str, Any]]:
255
- """Get the matches for a given cluster.
261
+ def get_cluster_matches(
262
+ self, collection_id: str, rubric_id: str, rubric_version: int
263
+ ) -> list[dict[str, Any]]:
264
+ """Get centroid assignments for a given rubric.
256
265
 
257
266
  Args:
258
267
  collection_id: ID of the Collection.
259
- cluster_id: The ID of the cluster to get matches for.
268
+ rubric_id: The ID of the rubric to get assignments for.
269
+ rubric_version: The version of the rubric to get assignments for.
260
270
 
261
271
  Returns:
262
- list: List of dictionaries containing the search results that match the cluster.
272
+ list: List of dictionaries containing centroid assignment information.
263
273
 
264
274
  Raises:
265
275
  requests.exceptions.HTTPError: If the API request fails.
266
276
  """
267
- url = f"{self._server_url}/{collection_id}/get_cluster_matches"
268
- response = self._session.post(url, json={"centroid": centroid})
277
+ url = f"{self._server_url}/rubric/{collection_id}/{rubric_id}/assignments"
278
+ response = self._session.get(url, params={"rubric_version": rubric_version})
269
279
  response.raise_for_status()
270
280
  return response.json()
271
281
 
@@ -11,17 +11,15 @@ from collections import defaultdict
11
11
  from contextlib import asynccontextmanager, contextmanager
12
12
  from contextvars import ContextVar, Token
13
13
  from datetime import datetime, timezone
14
- from typing import Any, AsyncIterator, Callable, Dict, Iterator, List, Optional, Union
14
+ from enum import Enum
15
+ from importlib.metadata import Distribution, distributions
16
+ from typing import Any, AsyncIterator, Callable, Dict, Iterator, List, Optional, Set, Union
15
17
 
16
18
  import requests
17
19
  from opentelemetry import trace
18
20
  from opentelemetry.context import Context
19
21
  from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter as GRPCExporter
20
22
  from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as HTTPExporter
21
- from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
22
- from opentelemetry.instrumentation.bedrock import BedrockInstrumentor
23
- from opentelemetry.instrumentation.langchain import LangchainInstrumentor
24
- from opentelemetry.instrumentation.openai import OpenAIInstrumentor
25
23
  from opentelemetry.instrumentation.threading import ThreadingInstrumentor
26
24
  from opentelemetry.sdk.resources import Resource
27
25
  from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor, TracerProvider
@@ -33,15 +31,23 @@ from opentelemetry.sdk.trace.export import (
33
31
  from opentelemetry.trace import Span
34
32
 
35
33
  # Configure logging
36
- logging.basicConfig(level=logging.INFO)
37
34
  logger = logging.getLogger(__name__)
38
- logger.disabled = True
35
+ logger.setLevel(logging.ERROR)
39
36
 
40
37
  # Default configuration
41
38
  DEFAULT_ENDPOINT = "https://api.docent.transluce.org/rest/telemetry"
42
39
  DEFAULT_COLLECTION_NAME = "default-collection-name"
43
40
 
44
41
 
42
+ class Instruments(Enum):
43
+ """Enumeration of available instrument types."""
44
+
45
+ OPENAI = "openai"
46
+ ANTHROPIC = "anthropic"
47
+ BEDROCK = "bedrock"
48
+ LANGCHAIN = "langchain"
49
+
50
+
45
51
  def _is_notebook() -> bool:
46
52
  """Check if we're running in a Jupyter notebook."""
47
53
  try:
@@ -64,6 +70,8 @@ class DocentTracer:
64
70
  enable_console_export: bool = False,
65
71
  enable_otlp_export: bool = True,
66
72
  disable_batch: bool = False,
73
+ instruments: Optional[Set[Instruments]] = None,
74
+ block_instruments: Optional[Set[Instruments]] = None,
67
75
  ):
68
76
  """
69
77
  Initialize Docent tracing manager.
@@ -78,6 +86,8 @@ class DocentTracer:
78
86
  enable_console_export: Whether to export to console
79
87
  enable_otlp_export: Whether to export to OTLP endpoint
80
88
  disable_batch: Whether to disable batch processing (use SimpleSpanProcessor)
89
+ instruments: Set of instruments to enable (None = all instruments)
90
+ block_instruments: Set of instruments to explicitly disable
81
91
  """
82
92
  self.collection_name: str = collection_name
83
93
  self.collection_id: str = collection_id if collection_id else str(uuid.uuid4())
@@ -105,6 +115,9 @@ class DocentTracer:
105
115
  self.enable_console_export = enable_console_export
106
116
  self.enable_otlp_export = enable_otlp_export
107
117
  self.disable_batch = disable_batch
118
+ self.disabled_instruments: Set[Instruments] = {Instruments.LANGCHAIN}
119
+ self.instruments = instruments or (set(Instruments) - self.disabled_instruments)
120
+ self.block_instruments = block_instruments or set()
108
121
 
109
122
  # Use separate tracer provider to avoid interfering with existing OTEL setup
110
123
  self._tracer_provider: Optional[TracerProvider] = None
@@ -206,7 +219,7 @@ class DocentTracer:
206
219
  exporters.append(exporter)
207
220
  logger.info(f"Initialized exporter for endpoint: {endpoint}")
208
221
  else:
209
- logger.warning(f"Failed to initialize exporter for endpoint: {endpoint}")
222
+ logger.critical(f"Failed to initialize exporter for endpoint: {endpoint}")
210
223
 
211
224
  return exporters
212
225
 
@@ -309,8 +322,6 @@ class DocentTracer:
309
322
  logger.info(
310
323
  f"Added {len(otlp_exporters)} OTLP exporters for {len(self.endpoints)} endpoints"
311
324
  )
312
- else:
313
- logger.warning("Failed to initialize OTLP exporter")
314
325
 
315
326
  if self.enable_console_export:
316
327
  console_exporter: ConsoleSpanExporter = ConsoleSpanExporter()
@@ -333,33 +344,51 @@ class DocentTracer:
333
344
  except Exception as e:
334
345
  logger.warning(f"Failed to instrument threading: {e}")
335
346
 
347
+ enabled_instruments = self.instruments - self.block_instruments
348
+
336
349
  # Instrument OpenAI with our isolated tracer provider
337
- try:
338
- OpenAIInstrumentor().instrument(tracer_provider=self._tracer_provider)
339
- logger.info("Instrumented OpenAI")
340
- except Exception as e:
341
- logger.warning(f"Failed to instrument OpenAI: {e}")
350
+ if Instruments.OPENAI in enabled_instruments:
351
+ try:
352
+ if is_package_installed("openai"):
353
+ from opentelemetry.instrumentation.openai import OpenAIInstrumentor
354
+
355
+ OpenAIInstrumentor().instrument(tracer_provider=self._tracer_provider)
356
+ logger.info("Instrumented OpenAI")
357
+ except Exception as e:
358
+ logger.warning(f"Failed to instrument OpenAI: {e}")
342
359
 
343
360
  # Instrument Anthropic with our isolated tracer provider
344
- try:
345
- AnthropicInstrumentor().instrument(tracer_provider=self._tracer_provider)
346
- logger.info("Instrumented Anthropic")
347
- except Exception as e:
348
- logger.warning(f"Failed to instrument Anthropic: {e}")
361
+ if Instruments.ANTHROPIC in enabled_instruments:
362
+ try:
363
+ if is_package_installed("anthropic"):
364
+ from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
365
+
366
+ AnthropicInstrumentor().instrument(tracer_provider=self._tracer_provider)
367
+ logger.info("Instrumented Anthropic")
368
+ except Exception as e:
369
+ logger.warning(f"Failed to instrument Anthropic: {e}")
349
370
 
350
371
  # Instrument Bedrock with our isolated tracer provider
351
- try:
352
- BedrockInstrumentor().instrument(tracer_provider=self._tracer_provider)
353
- logger.info("Instrumented Bedrock")
354
- except Exception as e:
355
- logger.warning(f"Failed to instrument Bedrock: {e}")
372
+ if Instruments.BEDROCK in enabled_instruments:
373
+ try:
374
+ if is_package_installed("boto3"):
375
+ from opentelemetry.instrumentation.bedrock import BedrockInstrumentor
376
+
377
+ BedrockInstrumentor().instrument(tracer_provider=self._tracer_provider)
378
+ logger.info("Instrumented Bedrock")
379
+ except Exception as e:
380
+ logger.warning(f"Failed to instrument Bedrock: {e}")
356
381
 
357
382
  # Instrument LangChain with our isolated tracer provider
358
- try:
359
- LangchainInstrumentor().instrument(tracer_provider=self._tracer_provider)
360
- logger.info("Instrumented LangChain")
361
- except Exception as e:
362
- logger.warning(f"Failed to instrument LangChain: {e}")
383
+ if Instruments.LANGCHAIN in enabled_instruments:
384
+ try:
385
+ if is_package_installed("langchain") or is_package_installed("langgraph"):
386
+ from opentelemetry.instrumentation.langchain import LangchainInstrumentor
387
+
388
+ LangchainInstrumentor().instrument(tracer_provider=self._tracer_provider)
389
+ logger.info("Instrumented LangChain")
390
+ except Exception as e:
391
+ logger.warning(f"Failed to instrument LangChain: {e}")
363
392
 
364
393
  # Register cleanup handlers
365
394
  self._register_cleanup()
@@ -789,9 +818,19 @@ class DocentTracer:
789
818
  metadata: Optional metadata to send
790
819
  """
791
820
  collection_id = self.collection_id
821
+
822
+ # Get agent_run_id from current context
823
+ agent_run_id = self.get_current_agent_run_id()
824
+ if not agent_run_id:
825
+ logger.error(
826
+ f"Cannot send transcript group metadata for {transcript_group_id} - no agent_run_id in context"
827
+ )
828
+ return
829
+
792
830
  payload: Dict[str, Any] = {
793
831
  "collection_id": collection_id,
794
832
  "transcript_group_id": transcript_group_id,
833
+ "agent_run_id": agent_run_id,
795
834
  "timestamp": datetime.now(timezone.utc).isoformat(),
796
835
  }
797
836
 
@@ -942,6 +981,8 @@ def initialize_tracing(
942
981
  enable_console_export: bool = False,
943
982
  enable_otlp_export: bool = True,
944
983
  disable_batch: bool = False,
984
+ instruments: Optional[Set[Instruments]] = None,
985
+ block_instruments: Optional[Set[Instruments]] = None,
945
986
  ) -> DocentTracer:
946
987
  """
947
988
  Initialize the global Docent tracer.
@@ -958,6 +999,8 @@ def initialize_tracing(
958
999
  enable_console_export: Whether to export spans to console
959
1000
  enable_otlp_export: Whether to export spans to OTLP endpoint
960
1001
  disable_batch: Whether to disable batch processing (use SimpleSpanProcessor)
1002
+ instruments: Set of instruments to enable (None = all instruments).
1003
+ block_instruments: Set of instruments to explicitly disable.
961
1004
 
962
1005
  Returns:
963
1006
  The initialized Docent tracer
@@ -966,6 +1009,7 @@ def initialize_tracing(
966
1009
  # Basic setup
967
1010
  initialize_tracing("my-collection")
968
1011
  """
1012
+
969
1013
  global _global_tracer
970
1014
 
971
1015
  # Check for API key in environment variable if not provided as parameter
@@ -983,12 +1027,30 @@ def initialize_tracing(
983
1027
  enable_console_export=enable_console_export,
984
1028
  enable_otlp_export=enable_otlp_export,
985
1029
  disable_batch=disable_batch,
1030
+ instruments=instruments,
1031
+ block_instruments=block_instruments,
986
1032
  )
987
1033
  _global_tracer.initialize()
988
1034
 
989
1035
  return _global_tracer
990
1036
 
991
1037
 
1038
+ def _get_package_name(dist: Distribution) -> str | None:
1039
+ try:
1040
+ return dist.name.lower()
1041
+ except (KeyError, AttributeError):
1042
+ return None
1043
+
1044
+
1045
+ installed_packages = {
1046
+ name for dist in distributions() if (name := _get_package_name(dist)) is not None
1047
+ }
1048
+
1049
+
1050
+ def is_package_installed(package_name: str) -> bool:
1051
+ return package_name.lower() in installed_packages
1052
+
1053
+
992
1054
  def get_tracer() -> DocentTracer:
993
1055
  """Get the global Docent tracer."""
994
1056
  if _global_tracer is None:
@@ -1,7 +1,7 @@
1
1
  [project]
2
2
  name = "docent-python"
3
3
  description = "Docent SDK"
4
- version = "0.1.4-alpha"
4
+ version = "0.1.6-alpha"
5
5
  authors = [
6
6
  { name="Transluce", email="info@transluce.org" },
7
7
  ]
@@ -25,7 +25,6 @@ dependencies = [
25
25
  "opentelemetry-instrumentation-langchain>=0.44.1",
26
26
  "opentelemetry-instrumentation-openai>=0.44.1",
27
27
  "opentelemetry-instrumentation-threading>=0.55b1",
28
- "traceloop-sdk>=0.44.1",
29
28
  ]
30
29
 
31
30
  [build-system]