docent-python 0.1.19a0__py3-none-any.whl → 0.1.21a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of docent-python might be problematic. Click here for more details.

Files changed (34) hide show
  1. docent/_llm_util/__init__.py +0 -0
  2. docent/_llm_util/data_models/__init__.py +0 -0
  3. docent/_llm_util/data_models/exceptions.py +48 -0
  4. docent/_llm_util/data_models/llm_output.py +320 -0
  5. docent/_llm_util/data_models/simple_svc.py +79 -0
  6. docent/_llm_util/llm_cache.py +193 -0
  7. docent/_llm_util/model_registry.py +126 -0
  8. docent/_llm_util/prod_llms.py +454 -0
  9. docent/_llm_util/providers/__init__.py +0 -0
  10. docent/_llm_util/providers/anthropic.py +537 -0
  11. docent/_llm_util/providers/common.py +41 -0
  12. docent/_llm_util/providers/google.py +530 -0
  13. docent/_llm_util/providers/openai.py +745 -0
  14. docent/_llm_util/providers/openrouter.py +375 -0
  15. docent/_llm_util/providers/preference_types.py +104 -0
  16. docent/_llm_util/providers/provider_registry.py +164 -0
  17. docent/data_models/transcript.py +2 -0
  18. docent/data_models/util.py +170 -0
  19. docent/judges/__init__.py +21 -0
  20. docent/judges/impl.py +222 -0
  21. docent/judges/types.py +240 -0
  22. docent/judges/util/forgiving_json.py +108 -0
  23. docent/judges/util/meta_schema.json +84 -0
  24. docent/judges/util/meta_schema.py +29 -0
  25. docent/judges/util/parse_output.py +95 -0
  26. docent/judges/util/voting.py +84 -0
  27. docent/sdk/client.py +5 -2
  28. docent/trace.py +1 -1
  29. docent/trace_2.py +1842 -0
  30. {docent_python-0.1.19a0.dist-info → docent_python-0.1.21a0.dist-info}/METADATA +10 -5
  31. docent_python-0.1.21a0.dist-info/RECORD +58 -0
  32. docent_python-0.1.19a0.dist-info/RECORD +0 -32
  33. {docent_python-0.1.19a0.dist-info → docent_python-0.1.21a0.dist-info}/WHEEL +0 -0
  34. {docent_python-0.1.19a0.dist-info → docent_python-0.1.21a0.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,108 @@
1
+ import json
2
+ from typing import Any
3
+
4
+
5
+ def _repair_json(text: str) -> str:
6
+ """Strip leading/trailing text and fix unescaped quotes/newlines."""
7
+
8
+ json_start = None
9
+ for i, char in enumerate(text):
10
+ remaining = text[i:]
11
+ if (
12
+ char in '[{"'
13
+ or char.isdigit()
14
+ or char == "-"
15
+ or remaining.startswith("null")
16
+ or remaining.startswith("true")
17
+ or remaining.startswith("false")
18
+ ):
19
+ json_start = i
20
+ break
21
+ if json_start is None:
22
+ raise ValueError("No valid JSON start found")
23
+
24
+ result: list[str] = []
25
+ in_string = False
26
+ escape_next = False
27
+ depth = 0
28
+ started_with_container = text[json_start] in "[{"
29
+
30
+ for i in range(json_start, len(text)):
31
+ char = text[i]
32
+
33
+ if escape_next:
34
+ if in_string:
35
+ # Check if this is a valid escape sequence
36
+ is_valid_escape = char in '\\/bfnrt"' or (
37
+ char == "u"
38
+ and i + 4 < len(text)
39
+ and all(c in "0123456789abcdefABCDEF" for c in text[i + 1 : i + 5])
40
+ )
41
+ if not is_valid_escape:
42
+ # Invalid escape sequence - add another backslash to escape it
43
+ result.append("\\")
44
+ result.append(char)
45
+ escape_next = False
46
+ continue
47
+
48
+ if char == "\\":
49
+ result.append(char)
50
+ escape_next = True
51
+ continue
52
+
53
+ if char == '"':
54
+ if in_string:
55
+ # Check if quote should be escaped by looking at what follows
56
+ remaining = text[i + 1 :].lstrip()
57
+ if remaining and remaining[0] not in ':,}]"':
58
+ result.append('\\"')
59
+ continue
60
+ in_string = False
61
+ result.append(char)
62
+ # If we're at depth 0 and closed a top-level string, we're done
63
+ if depth == 0 and not started_with_container:
64
+ return "".join(result)
65
+ else:
66
+ in_string = True
67
+ result.append(char)
68
+ elif in_string and char == "\n":
69
+ result.append("\\n")
70
+ else:
71
+ result.append(char)
72
+
73
+ if not in_string:
74
+ if char in "[{":
75
+ depth += 1
76
+ elif char in "]}":
77
+ depth -= 1
78
+ if depth == 0:
79
+ return "".join(result)
80
+ # For primitives at top level (depth 0), stop at whitespace if we've consumed content
81
+ elif depth == 0 and not started_with_container and result and char in " \t\n\r":
82
+ # Check if this is trailing whitespace after a complete primitive
83
+ current = "".join(result).strip()
84
+ if current:
85
+ try:
86
+ json.loads(current)
87
+ return current
88
+ except (json.JSONDecodeError, ValueError):
89
+ pass
90
+
91
+ return "".join(result)
92
+
93
+
94
+ def forgiving_json_loads(text: str) -> Any:
95
+ """
96
+ Parse JSON from text, applying heuristics to fix common LLM mistakes.
97
+
98
+ Repairs applied:
99
+ - Strip leading/trailing non-JSON text
100
+ - Escape unescaped quotes and newlines inside strings
101
+ - Fix invalid escape sequences inside strings
102
+ """
103
+ if not text or not text.strip():
104
+ raise ValueError("Empty or whitespace-only input")
105
+
106
+ text = _repair_json(text)
107
+
108
+ return json.loads(text)
@@ -0,0 +1,84 @@
1
+ {
2
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
3
+ "$id": "https://example.com/meta/mini-schema",
4
+ "title": "Meta-schema for Docent judge outputs. Makes some restrictions to 2020-12.",
5
+ "type": "object",
6
+ "additionalProperties": false,
7
+ "properties": {
8
+ "type": { "const": "object" },
9
+ "additionalProperties": { "const": false },
10
+ "required": {
11
+ "type": "array",
12
+ "items": { "type": "string" }
13
+ },
14
+
15
+ "properties": {
16
+ "type": "object",
17
+ "propertyNames": { "type": "string" },
18
+ "additionalProperties": {
19
+ "type": "object",
20
+ "additionalProperties": false,
21
+ "required": ["type"],
22
+
23
+ "properties": {
24
+ "type": {
25
+ "type": "string",
26
+ "enum": ["string", "integer", "number", "boolean"]
27
+ },
28
+ "description": {
29
+ "type": "string"
30
+ },
31
+ "citations": {
32
+ "type": "boolean"
33
+ },
34
+ "enum": {
35
+ "type": "array",
36
+ "items": { "type": "string" }
37
+ },
38
+ "format": {
39
+ "type": "string",
40
+ "enum": [
41
+ "date-time",
42
+ "date",
43
+ "time",
44
+ "email",
45
+ "hostname",
46
+ "ipv4",
47
+ "ipv6",
48
+ "uri",
49
+ "uuid"
50
+ ]
51
+ },
52
+ "minLength": {
53
+ "type": "integer",
54
+ "minimum": 0
55
+ },
56
+ "maxLength": {
57
+ "type": "integer",
58
+ "minimum": 0
59
+ },
60
+ "pattern": {
61
+ "type": "string"
62
+ },
63
+ "minimum": {
64
+ "type": "number"
65
+ },
66
+ "maximum": {
67
+ "type": "number"
68
+ },
69
+ "exclusiveMinimum": {
70
+ "type": "number"
71
+ },
72
+ "exclusiveMaximum": {
73
+ "type": "number"
74
+ },
75
+ "multipleOf": {
76
+ "type": "number",
77
+ "exclusiveMinimum": 0
78
+ }
79
+ }
80
+ }
81
+ }
82
+ },
83
+ "required": ["type", "properties"]
84
+ }
@@ -0,0 +1,29 @@
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Any
4
+
5
+ import jsonschema
6
+
7
+
8
+ def _load_meta_schema() -> dict[str, Any]:
9
+ """Load the rubric meta-schema from the adjacent JSON file."""
10
+ meta_schema_path = Path(__file__).with_suffix(".json")
11
+ with meta_schema_path.open("r", encoding="utf-8") as f:
12
+ return json.load(f)
13
+
14
+
15
+ _META_VALIDATOR = jsonschema.Draft202012Validator(_load_meta_schema())
16
+
17
+
18
+ def validate_judge_result_schema(schema: dict[str, Any]):
19
+ """Validate a proposed schema against the rubric meta-schema.
20
+
21
+ Raises:
22
+ jsonschema.ValidationError: If the schema is invalid
23
+ jsonschema.SchemaError: If the schema is not a valid 2020-12 schema
24
+ """
25
+ # First check that this is a valid 2020-12 schema
26
+ jsonschema.Draft202012Validator.check_schema(schema)
27
+
28
+ # Then check that it conforms to our subset of the 2020-12 schema
29
+ _META_VALIDATOR.validate(schema) # type: ignore
@@ -0,0 +1,95 @@
1
+ import json
2
+ from typing import Any, cast
3
+
4
+ import jsonschema
5
+
6
+ from docent._llm_util.data_models.exceptions import ValidationFailedException
7
+ from docent._llm_util.data_models.llm_output import LLMOutput
8
+ from docent._log_util import get_logger
9
+ from docent.data_models.agent_run import AgentRun
10
+ from docent.data_models.remove_invalid_citation_ranges import remove_invalid_citation_ranges
11
+ from docent.judges.types import traverse_schema_and_transform
12
+ from docent.judges.util.forgiving_json import forgiving_json_loads
13
+
14
+ logger = get_logger(__name__)
15
+
16
+
17
+ def _validate_rubric_output(
18
+ output: dict[str, Any], output_schema: dict[str, Any], agent_run: AgentRun
19
+ ) -> dict[str, Any]:
20
+ """Validate and filter citation text ranges in rubric results.
21
+ Also check that the output conforms to the output schema.
22
+
23
+ Args:
24
+ output: Raw results from LLM judge
25
+ agent_run: Agent run containing transcript data for validation
26
+
27
+ Returns:
28
+ Validated result dict with invalid citations removed
29
+
30
+ Raises:
31
+ ValidationFailedException: If validation fails
32
+ """
33
+
34
+ def _validate_citation_string(text: str) -> str:
35
+ validated_text = remove_invalid_citation_ranges(text, agent_run)
36
+ if validated_text != text:
37
+ logger.warning(
38
+ f"Citation validation removed invalid text range from citation in judge result. "
39
+ f"Agent run ID: {agent_run.id}, "
40
+ f"Original text: {text}, "
41
+ f"Validated text: {validated_text}, "
42
+ )
43
+ return validated_text
44
+
45
+ try:
46
+ jsonschema.validate(output, output_schema)
47
+ except jsonschema.ValidationError as e:
48
+ raise ValidationFailedException(f"Schema validation failed: {e}", failed_output=str(output))
49
+
50
+ try:
51
+ return traverse_schema_and_transform(output, output_schema, _validate_citation_string)
52
+ except Exception as e:
53
+ raise ValidationFailedException(
54
+ f"Citation validation failed: {e}", failed_output=str(output)
55
+ )
56
+
57
+
58
+ def parse_and_validate_llm_output(
59
+ llm_output: LLMOutput,
60
+ output_schema: dict[str, Any],
61
+ agent_run: AgentRun,
62
+ ) -> dict[str, Any]:
63
+ """Parse and validate LLM output for rubric evaluation.
64
+
65
+ Args:
66
+ llm_output: The LLM output to parse
67
+ output_schema: The schema to validate against
68
+ agent_run: Agent run for citation validation
69
+
70
+ Returns:
71
+ Validated output dict
72
+
73
+ Raises:
74
+ ValidationFailedException: If parsing or validation fails
75
+ """
76
+ if llm_output.first_text is None:
77
+ raise ValidationFailedException("LLM output has no text", failed_output=None)
78
+
79
+ try:
80
+ output = forgiving_json_loads(llm_output.first_text)
81
+ except json.JSONDecodeError as e:
82
+ raise ValidationFailedException(
83
+ f"Failed to parse JSON: {e}. Raw text: `{llm_output.first_text}`",
84
+ failed_output=llm_output.first_text,
85
+ )
86
+
87
+ if not isinstance(output, dict):
88
+ logger.error(f"Expected dict output, got {type(output)}")
89
+ logger.error(f"LLM output: {llm_output.first_text}")
90
+ raise ValidationFailedException(
91
+ f"Expected dict output, got {type(output)}. Raw text: {llm_output.first_text}",
92
+ failed_output=llm_output.first_text,
93
+ )
94
+
95
+ return _validate_rubric_output(cast(dict[str, Any], output), output_schema, agent_run)
@@ -0,0 +1,84 @@
1
+ from collections import Counter
2
+ from typing import Any, cast
3
+
4
+
5
+ def get_agreement_keys(schema: dict[str, Any]) -> list[str]:
6
+ """Get list of top-level keys in schema that we want to measure agreement on.
7
+
8
+ This includes enum, bool, and int fields. We skip float and strings.
9
+
10
+ Args:
11
+ schema: JSON schema dict
12
+
13
+ Returns:
14
+ List of field names (keys) that should be used for measuring agreement
15
+ """
16
+ agreement_keys: list[str] = []
17
+
18
+ properties = schema.get("properties", {})
19
+ assert isinstance(properties, dict)
20
+ properties = cast(dict[str, Any], properties)
21
+
22
+ for key, field_schema in properties.items():
23
+ assert isinstance(field_schema, dict)
24
+ field_schema = cast(dict[str, Any], field_schema)
25
+
26
+ field_type = field_schema.get("type")
27
+ assert isinstance(field_type, str)
28
+
29
+ # Include boolean fields
30
+ if field_type == "boolean":
31
+ agreement_keys.append(key)
32
+ # Include integer fields
33
+ elif field_type == "integer":
34
+ agreement_keys.append(key)
35
+ # Include enum fields (even strings)
36
+ elif "enum" in field_schema:
37
+ agreement_keys.append(key)
38
+
39
+ return agreement_keys
40
+
41
+
42
+ def find_modal_result(indep_results: list[dict[str, Any]], agreement_keys: list[str]):
43
+ """Find the result that best matches modal values across agreement keys.
44
+
45
+ Args:
46
+ indep_results: List of independent results to analyze
47
+ agreement_keys: Keys to measure agreement on
48
+
49
+ Returns:
50
+ Tuple of (max_idx, agt_key_modes_and_counts) where:
51
+ - max_idx is the index of the result that best matches modal values
52
+ - agt_key_modes_and_counts maps each key to (modal_value, count) or None if no values exist for that key
53
+
54
+ Raises:
55
+ ValueError: If no results are provided
56
+ """
57
+ if not indep_results:
58
+ raise ValueError("No results to score")
59
+
60
+ # For each agreement key, compute the mode and count (or None, if no values exist for that key)
61
+ agt_key_modes_and_counts: dict[str, tuple[str | bool | int, int] | None] = {}
62
+ for key in agreement_keys:
63
+ key_modes = Counter(v for r in indep_results if (v := r.get(key)) is not None)
64
+ if most_common_one := key_modes.most_common(1):
65
+ agt_key_modes_and_counts[key] = most_common_one[0]
66
+ else:
67
+ agt_key_modes_and_counts[key] = None
68
+
69
+ # Score each rollout based on how many agreement keys they match
70
+ # If there is no mode for a key, or if a certain result doesn't have that key, it doesn't count.
71
+ # TODO(mengk): This may bias towards results that have more keys.
72
+ indep_result_scores: list[int] = []
73
+ for r in indep_results:
74
+ score = 0
75
+ for key in agreement_keys:
76
+ mode_and_count = agt_key_modes_and_counts[key]
77
+ if mode_and_count and r.get(key) == mode_and_count[0]:
78
+ score += 1
79
+ indep_result_scores.append(score)
80
+
81
+ # Argmax
82
+ max_idx = indep_result_scores.index(max(indep_result_scores))
83
+
84
+ return max_idx, agt_key_modes_and_counts
docent/sdk/client.py CHANGED
@@ -182,12 +182,15 @@ class Docent:
182
182
  self._handle_response_errors(response)
183
183
  return response.json()
184
184
 
185
- def get_rubric_run_state(self, collection_id: str, rubric_id: str) -> dict[str, Any]:
185
+ def get_rubric_run_state(
186
+ self, collection_id: str, rubric_id: str, version: int | None = None
187
+ ) -> dict[str, Any]:
186
188
  """Get rubric run state for a given collection and rubric.
187
189
 
188
190
  Args:
189
191
  collection_id: ID of the Collection.
190
192
  rubric_id: The ID of the rubric to get run state for.
193
+ version: The version of the rubric to get run state for. If None, the latest version is used.
191
194
 
192
195
  Returns:
193
196
  dict: Dictionary containing rubric run state with results, job_id, and total_agent_runs.
@@ -196,7 +199,7 @@ class Docent:
196
199
  requests.exceptions.HTTPError: If the API request fails.
197
200
  """
198
201
  url = f"{self._server_url}/rubric/{collection_id}/{rubric_id}/rubric_run_state"
199
- response = self._session.get(url)
202
+ response = self._session.get(url, params={"version": version})
200
203
  self._handle_response_errors(response)
201
204
  return response.json()
202
205
 
docent/trace.py CHANGED
@@ -226,7 +226,7 @@ class DocentTracer:
226
226
  try:
227
227
 
228
228
  # Check for OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT environment variable
229
- default_attribute_limit = 1024
229
+ default_attribute_limit = 1024 * 16
230
230
  env_value = os.environ.get("OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", "0")
231
231
  env_limit = int(env_value) if env_value.isdigit() else 0
232
232
  attribute_limit = max(env_limit, default_attribute_limit)