braintrust 0.3.15__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. braintrust/_generated_types.py +737 -672
  2. braintrust/audit.py +2 -2
  3. braintrust/cli/eval.py +6 -7
  4. braintrust/cli/push.py +11 -11
  5. braintrust/context.py +12 -17
  6. braintrust/contrib/temporal/__init__.py +16 -27
  7. braintrust/contrib/temporal/test_temporal.py +8 -3
  8. braintrust/devserver/auth.py +8 -8
  9. braintrust/devserver/cache.py +3 -4
  10. braintrust/devserver/cors.py +8 -7
  11. braintrust/devserver/dataset.py +3 -5
  12. braintrust/devserver/eval_hooks.py +7 -6
  13. braintrust/devserver/schemas.py +22 -19
  14. braintrust/devserver/server.py +19 -12
  15. braintrust/devserver/test_cached_login.py +4 -4
  16. braintrust/framework.py +128 -140
  17. braintrust/framework2.py +88 -87
  18. braintrust/functions/invoke.py +66 -59
  19. braintrust/functions/stream.py +3 -2
  20. braintrust/generated_types.py +3 -1
  21. braintrust/git_fields.py +11 -11
  22. braintrust/gitutil.py +2 -3
  23. braintrust/graph_util.py +10 -10
  24. braintrust/id_gen.py +2 -2
  25. braintrust/logger.py +346 -357
  26. braintrust/merge_row_batch.py +10 -9
  27. braintrust/oai.py +21 -20
  28. braintrust/otel/__init__.py +49 -49
  29. braintrust/otel/context.py +16 -30
  30. braintrust/otel/test_distributed_tracing.py +14 -11
  31. braintrust/otel/test_otel_bt_integration.py +32 -31
  32. braintrust/parameters.py +8 -8
  33. braintrust/prompt.py +14 -14
  34. braintrust/prompt_cache/disk_cache.py +5 -4
  35. braintrust/prompt_cache/lru_cache.py +3 -2
  36. braintrust/prompt_cache/prompt_cache.py +13 -14
  37. braintrust/queue.py +4 -4
  38. braintrust/score.py +4 -4
  39. braintrust/serializable_data_class.py +4 -4
  40. braintrust/span_identifier_v1.py +1 -2
  41. braintrust/span_identifier_v2.py +3 -4
  42. braintrust/span_identifier_v3.py +23 -20
  43. braintrust/span_identifier_v4.py +34 -25
  44. braintrust/test_framework.py +16 -6
  45. braintrust/test_helpers.py +5 -5
  46. braintrust/test_id_gen.py +2 -3
  47. braintrust/test_otel.py +61 -53
  48. braintrust/test_queue.py +0 -1
  49. braintrust/test_score.py +1 -3
  50. braintrust/test_span_components.py +29 -44
  51. braintrust/util.py +9 -8
  52. braintrust/version.py +2 -2
  53. braintrust/wrappers/_anthropic_utils.py +4 -4
  54. braintrust/wrappers/agno/__init__.py +3 -4
  55. braintrust/wrappers/agno/agent.py +1 -2
  56. braintrust/wrappers/agno/function_call.py +1 -2
  57. braintrust/wrappers/agno/model.py +1 -2
  58. braintrust/wrappers/agno/team.py +1 -2
  59. braintrust/wrappers/agno/utils.py +12 -12
  60. braintrust/wrappers/anthropic.py +7 -8
  61. braintrust/wrappers/claude_agent_sdk/__init__.py +3 -4
  62. braintrust/wrappers/claude_agent_sdk/_wrapper.py +29 -27
  63. braintrust/wrappers/dspy.py +15 -17
  64. braintrust/wrappers/google_genai/__init__.py +16 -16
  65. braintrust/wrappers/langchain.py +22 -24
  66. braintrust/wrappers/litellm.py +4 -3
  67. braintrust/wrappers/openai.py +15 -15
  68. braintrust/wrappers/pydantic_ai.py +21 -20
  69. braintrust/wrappers/test_agno.py +0 -1
  70. braintrust/wrappers/test_dspy.py +0 -1
  71. braintrust/wrappers/test_google_genai.py +2 -3
  72. braintrust/wrappers/test_litellm.py +0 -1
  73. {braintrust-0.3.15.dist-info → braintrust-0.4.0.dist-info}/METADATA +3 -2
  74. braintrust-0.4.0.dist-info/RECORD +120 -0
  75. braintrust-0.3.15.dist-info/RECORD +0 -120
  76. {braintrust-0.3.15.dist-info → braintrust-0.4.0.dist-info}/WHEEL +0 -0
  77. {braintrust-0.3.15.dist-info → braintrust-0.4.0.dist-info}/entry_points.txt +0 -0
  78. {braintrust-0.3.15.dist-info → braintrust-0.4.0.dist-info}/top_level.txt +0 -0
braintrust/framework2.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import dataclasses
2
2
  import json
3
- from typing import Any, Callable, Dict, List, Optional, Union, overload
3
+ from collections.abc import Callable
4
+ from typing import Any, overload
4
5
 
5
6
  import slugify
6
7
  from braintrust.logger import api_conn, app_conn, login
@@ -20,7 +21,7 @@ from .util import eprint
20
21
 
21
22
  class ProjectIdCache:
22
23
  def __init__(self):
23
- self._cache: Dict[Project, str] = {}
24
+ self._cache: dict[Project, str] = {}
24
25
 
25
26
  def get(self, project: "Project") -> str:
26
27
  if project not in self._cache:
@@ -31,8 +32,8 @@ class ProjectIdCache:
31
32
 
32
33
  class _GlobalState:
33
34
  def __init__(self):
34
- self.functions: List[CodeFunction] = []
35
- self.prompts: List[CodePrompt] = []
35
+ self.functions: list[CodeFunction] = []
36
+ self.prompts: list[CodePrompt] = []
36
37
 
37
38
 
38
39
  global_ = _GlobalState()
@@ -47,11 +48,11 @@ class CodeFunction:
47
48
  name: str
48
49
  slug: str
49
50
  type_: str
50
- description: Optional[str]
51
+ description: str | None
51
52
  parameters: Any
52
53
  returns: Any
53
- if_exists: Optional[IfExists]
54
- metadata: Optional[Dict[str, Any]] = None
54
+ if_exists: IfExists | None
55
+ metadata: dict[str, Any] | None = None
55
56
 
56
57
 
57
58
  @dataclasses.dataclass
@@ -62,17 +63,17 @@ class CodePrompt:
62
63
  name: str
63
64
  slug: str
64
65
  prompt: PromptData
65
- tool_functions: List[Union[CodeFunction, SavedFunctionId]]
66
- description: Optional[str]
67
- function_type: Optional[str]
68
- id: Optional[str]
69
- if_exists: Optional[IfExists]
70
- metadata: Optional[Dict[str, Any]] = None
71
-
72
- def to_function_definition(self, if_exists: Optional[IfExists], project_ids: ProjectIdCache) -> Dict[str, Any]:
66
+ tool_functions: list[CodeFunction | SavedFunctionId]
67
+ description: str | None
68
+ function_type: str | None
69
+ id: str | None
70
+ if_exists: IfExists | None
71
+ metadata: dict[str, Any] | None = None
72
+
73
+ def to_function_definition(self, if_exists: IfExists | None, project_ids: ProjectIdCache) -> dict[str, Any]:
73
74
  prompt_data = self.prompt
74
75
  if len(self.tool_functions) > 0:
75
- resolvable_tool_functions: List[Any] = []
76
+ resolvable_tool_functions: list[Any] = []
76
77
  for f in self.tool_functions:
77
78
  if isinstance(f, CodeFunction):
78
79
  resolvable_tool_functions.append(
@@ -85,7 +86,7 @@ class CodePrompt:
85
86
  else:
86
87
  resolvable_tool_functions.append(f)
87
88
  prompt_data["tool_functions"] = resolvable_tool_functions
88
- j: Dict[str, Any] = {
89
+ j: dict[str, Any] = {
89
90
  "project_id": project_ids.get(self.project),
90
91
  "name": self.name,
91
92
  "slug": self.slug,
@@ -116,13 +117,13 @@ class ToolBuilder:
116
117
  self,
117
118
  *,
118
119
  handler: Callable[..., Any],
119
- name: Optional[str] = None,
120
- slug: Optional[str] = None,
121
- description: Optional[str] = None,
120
+ name: str | None = None,
121
+ slug: str | None = None,
122
+ description: str | None = None,
122
123
  parameters: Any = None,
123
124
  returns: Any = None,
124
- if_exists: Optional[IfExists] = None,
125
- metadata: Optional[Dict[str, Any]] = None,
125
+ if_exists: IfExists | None = None,
126
+ metadata: dict[str, Any] | None = None,
126
127
  ) -> CodeFunction:
127
128
  """Creates a tool.
128
129
 
@@ -175,48 +176,48 @@ class PromptBuilder:
175
176
  def create(
176
177
  self,
177
178
  *,
178
- name: Optional[str] = None,
179
- slug: Optional[str] = None,
180
- description: Optional[str] = None,
181
- id: Optional[str] = None,
179
+ name: str | None = None,
180
+ slug: str | None = None,
181
+ description: str | None = None,
182
+ id: str | None = None,
182
183
  prompt: str,
183
184
  model: str,
184
- params: Optional[ModelParams] = None,
185
- tools: Optional[List[Union[CodeFunction, SavedFunctionId, ToolFunctionDefinition]]] = None,
186
- if_exists: Optional[IfExists] = None,
187
- metadata: Optional[Dict[str, Any]] = None,
185
+ params: ModelParams | None = None,
186
+ tools: list[CodeFunction | SavedFunctionId | ToolFunctionDefinition] | None = None,
187
+ if_exists: IfExists | None = None,
188
+ metadata: dict[str, Any] | None = None,
188
189
  ) -> CodePrompt: ...
189
190
 
190
191
  @overload # messages only, no prompt
191
192
  def create(
192
193
  self,
193
194
  *,
194
- name: Optional[str] = None,
195
- slug: Optional[str] = None,
196
- description: Optional[str] = None,
197
- id: Optional[str] = None,
198
- messages: List[ChatCompletionMessageParam],
195
+ name: str | None = None,
196
+ slug: str | None = None,
197
+ description: str | None = None,
198
+ id: str | None = None,
199
+ messages: list[ChatCompletionMessageParam],
199
200
  model: str,
200
- params: Optional[ModelParams] = None,
201
- tools: Optional[List[Union[CodeFunction, SavedFunctionId, ToolFunctionDefinition]]] = None,
202
- if_exists: Optional[IfExists] = None,
203
- metadata: Optional[Dict[str, Any]] = None,
201
+ params: ModelParams | None = None,
202
+ tools: list[CodeFunction | SavedFunctionId | ToolFunctionDefinition] | None = None,
203
+ if_exists: IfExists | None = None,
204
+ metadata: dict[str, Any] | None = None,
204
205
  ) -> CodePrompt: ...
205
206
 
206
207
  def create(
207
208
  self,
208
209
  *,
209
- name: Optional[str] = None,
210
- slug: Optional[str] = None,
211
- description: Optional[str] = None,
212
- id: Optional[str] = None,
213
- prompt: Optional[str] = None,
214
- messages: Optional[List[ChatCompletionMessageParam]] = None,
210
+ name: str | None = None,
211
+ slug: str | None = None,
212
+ description: str | None = None,
213
+ id: str | None = None,
214
+ prompt: str | None = None,
215
+ messages: list[ChatCompletionMessageParam] | None = None,
215
216
  model: str,
216
- params: Optional[ModelParams] = None,
217
- tools: Optional[List[Union[CodeFunction, SavedFunctionId, ToolFunctionDefinition]]] = None,
218
- if_exists: Optional[IfExists] = None,
219
- metadata: Optional[Dict[str, Any]] = None,
217
+ params: ModelParams | None = None,
218
+ tools: list[CodeFunction | SavedFunctionId | ToolFunctionDefinition] | None = None,
219
+ if_exists: IfExists | None = None,
220
+ metadata: dict[str, Any] | None = None,
220
221
  ):
221
222
  """Creates a prompt.
222
223
 
@@ -239,8 +240,8 @@ class PromptBuilder:
239
240
  if not slug:
240
241
  slug = slugify.slugify(name)
241
242
 
242
- tool_functions: List[Union[CodeFunction, SavedFunctionId]] = []
243
- raw_tools: List[ToolFunctionDefinition] = []
243
+ tool_functions: list[CodeFunction | SavedFunctionId] = []
244
+ raw_tools: list[ToolFunctionDefinition] = []
244
245
  for tool in tools or []:
245
246
  if isinstance(tool, CodeFunction):
246
247
  tool_functions.append(tool)
@@ -298,11 +299,11 @@ class ScorerBuilder:
298
299
  def create(
299
300
  self,
300
301
  *,
301
- name: Optional[str] = None,
302
- slug: Optional[str] = None,
303
- description: Optional[str] = None,
304
- if_exists: Optional[IfExists] = None,
305
- metadata: Optional[Dict[str, Any]] = None,
302
+ name: str | None = None,
303
+ slug: str | None = None,
304
+ description: str | None = None,
305
+ if_exists: IfExists | None = None,
306
+ metadata: dict[str, Any] | None = None,
306
307
  handler: Callable[..., Any],
307
308
  parameters: Any,
308
309
  returns: Any = None,
@@ -313,16 +314,16 @@ class ScorerBuilder:
313
314
  def create(
314
315
  self,
315
316
  *,
316
- name: Optional[str] = None,
317
- slug: Optional[str] = None,
318
- description: Optional[str] = None,
319
- if_exists: Optional[IfExists] = None,
320
- metadata: Optional[Dict[str, Any]] = None,
317
+ name: str | None = None,
318
+ slug: str | None = None,
319
+ description: str | None = None,
320
+ if_exists: IfExists | None = None,
321
+ metadata: dict[str, Any] | None = None,
321
322
  prompt: str,
322
323
  model: str,
323
- params: Optional[ModelParams] = None,
324
+ params: ModelParams | None = None,
324
325
  use_cot: bool,
325
- choice_scores: Dict[str, float],
326
+ choice_scores: dict[str, float],
326
327
  ) -> CodePrompt: ...
327
328
 
328
329
  # LLM scorer with messages.
@@ -330,38 +331,38 @@ class ScorerBuilder:
330
331
  def create(
331
332
  self,
332
333
  *,
333
- name: Optional[str] = None,
334
- slug: Optional[str] = None,
335
- description: Optional[str] = None,
336
- if_exists: Optional[IfExists] = None,
337
- metadata: Optional[Dict[str, Any]] = None,
338
- messages: List[ChatCompletionMessageParam],
334
+ name: str | None = None,
335
+ slug: str | None = None,
336
+ description: str | None = None,
337
+ if_exists: IfExists | None = None,
338
+ metadata: dict[str, Any] | None = None,
339
+ messages: list[ChatCompletionMessageParam],
339
340
  model: str,
340
- params: Optional[ModelParams] = None,
341
+ params: ModelParams | None = None,
341
342
  use_cot: bool,
342
- choice_scores: Dict[str, float],
343
+ choice_scores: dict[str, float],
343
344
  ) -> CodePrompt: ...
344
345
 
345
346
  def create(
346
347
  self,
347
348
  *,
348
- name: Optional[str] = None,
349
- slug: Optional[str] = None,
350
- description: Optional[str] = None,
351
- if_exists: Optional[IfExists] = None,
352
- metadata: Optional[Dict[str, Any]] = None,
349
+ name: str | None = None,
350
+ slug: str | None = None,
351
+ description: str | None = None,
352
+ if_exists: IfExists | None = None,
353
+ metadata: dict[str, Any] | None = None,
353
354
  # Code scorer params.
354
- handler: Optional[Callable[..., Any]] = None,
355
+ handler: Callable[..., Any] | None = None,
355
356
  parameters: Any = None,
356
357
  returns: Any = None,
357
358
  # LLM scorer params.
358
- prompt: Optional[str] = None,
359
- messages: Optional[List[ChatCompletionMessageParam]] = None,
360
- model: Optional[str] = None,
361
- params: Optional[ModelParams] = None,
362
- use_cot: Optional[bool] = None,
363
- choice_scores: Optional[Dict[str, float]] = None,
364
- ) -> Union[CodeFunction, CodePrompt]:
359
+ prompt: str | None = None,
360
+ messages: list[ChatCompletionMessageParam] | None = None,
361
+ model: str | None = None,
362
+ params: ModelParams | None = None,
363
+ use_cot: bool | None = None,
364
+ choice_scores: dict[str, float] | None = None,
365
+ ) -> CodeFunction | CodePrompt:
365
366
  """Creates a scorer.
366
367
 
367
368
  Args:
@@ -462,8 +463,8 @@ class Project:
462
463
  self.prompts = PromptBuilder(self)
463
464
  self.scorers = ScorerBuilder(self)
464
465
 
465
- self._publishable_code_functions: List[CodeFunction] = []
466
- self._publishable_prompts: List[CodePrompt] = []
466
+ self._publishable_code_functions: list[CodeFunction] = []
467
+ self._publishable_prompts: list[CodePrompt] = []
467
468
 
468
469
  def add_code_function(self, fn: CodeFunction):
469
470
  self._publishable_code_functions.append(fn)
@@ -483,7 +484,7 @@ class Project:
483
484
  login()
484
485
  project_id_cache = ProjectIdCache()
485
486
 
486
- definitions: List[Dict[str, Any]] = []
487
+ definitions: list[dict[str, Any]] = []
487
488
  if self._publishable_code_functions:
488
489
  eprint(
489
490
  f"{bcolors.WARNING}Code functions cannot be published directly. Use `braintrust push` instead.{bcolors.ENDC}"
@@ -1,8 +1,8 @@
1
- from typing import Any, Dict, List, Literal, Optional, TypedDict, TypeVar, Union, overload
1
+ from typing import Any, Literal, TypedDict, TypeVar, overload
2
2
 
3
3
  from sseclient import SSEClient
4
4
 
5
- from .._generated_types import InvokeContext
5
+ from .._generated_types import FunctionTypeEnum, InvokeContext
6
6
  from ..logger import Exportable, get_span_parent_object, login, proxy_conn
7
7
  from ..util import response_raise_for_status
8
8
  from .constants import INVOKE_API_VERSION
@@ -31,27 +31,28 @@ class TraceScope(TypedDict):
31
31
  @overload
32
32
  def invoke(
33
33
  # the permutations of arguments for a function id
34
- function_id: Optional[str] = None,
35
- version: Optional[str] = None,
36
- prompt_session_id: Optional[str] = None,
37
- prompt_session_function_id: Optional[str] = None,
38
- project_name: Optional[str] = None,
39
- project_id: Optional[str] = None,
40
- slug: Optional[str] = None,
41
- global_function: Optional[str] = None,
34
+ function_id: str | None = None,
35
+ version: str | None = None,
36
+ prompt_session_id: str | None = None,
37
+ prompt_session_function_id: str | None = None,
38
+ project_name: str | None = None,
39
+ project_id: str | None = None,
40
+ slug: str | None = None,
41
+ global_function: str | None = None,
42
+ function_type: FunctionTypeEnum | None = None,
42
43
  # arguments to the function
43
44
  input: Any = None,
44
- messages: Optional[List[Any]] = None,
45
- context: Optional[InvokeContext] = None,
46
- metadata: Optional[Dict[str, Any]] = None,
47
- tags: Optional[List[str]] = None,
48
- parent: Optional[Union[Exportable, str]] = None,
49
- stream: Optional[Literal[False]] = None,
50
- mode: Optional[ModeType] = None,
51
- strict: Optional[bool] = None,
52
- org_name: Optional[str] = None,
53
- api_key: Optional[str] = None,
54
- app_url: Optional[str] = None,
45
+ messages: list[Any] | None = None,
46
+ context: InvokeContext | None = None,
47
+ metadata: dict[str, Any] | None = None,
48
+ tags: list[str] | None = None,
49
+ parent: Exportable | str | None = None,
50
+ stream: Literal[False] | None = None,
51
+ mode: ModeType | None = None,
52
+ strict: bool | None = None,
53
+ org_name: str | None = None,
54
+ api_key: str | None = None,
55
+ app_url: str | None = None,
55
56
  force_login: bool = False,
56
57
  ) -> T: ...
57
58
 
@@ -59,56 +60,58 @@ def invoke(
59
60
  @overload
60
61
  def invoke(
61
62
  # the permutations of arguments for a function id
62
- function_id: Optional[str] = None,
63
- version: Optional[str] = None,
64
- prompt_session_id: Optional[str] = None,
65
- prompt_session_function_id: Optional[str] = None,
66
- project_name: Optional[str] = None,
67
- project_id: Optional[str] = None,
68
- slug: Optional[str] = None,
69
- global_function: Optional[str] = None,
63
+ function_id: str | None = None,
64
+ version: str | None = None,
65
+ prompt_session_id: str | None = None,
66
+ prompt_session_function_id: str | None = None,
67
+ project_name: str | None = None,
68
+ project_id: str | None = None,
69
+ slug: str | None = None,
70
+ global_function: str | None = None,
71
+ function_type: FunctionTypeEnum | None = None,
70
72
  # arguments to the function
71
73
  input: Any = None,
72
- messages: Optional[List[Any]] = None,
73
- context: Optional[InvokeContext] = None,
74
- metadata: Optional[Dict[str, Any]] = None,
75
- tags: Optional[List[str]] = None,
76
- parent: Optional[Union[Exportable, str]] = None,
74
+ messages: list[Any] | None = None,
75
+ context: InvokeContext | None = None,
76
+ metadata: dict[str, Any] | None = None,
77
+ tags: list[str] | None = None,
78
+ parent: Exportable | str | None = None,
77
79
  stream: Literal[True] = True,
78
- mode: Optional[ModeType] = None,
79
- strict: Optional[bool] = None,
80
- org_name: Optional[str] = None,
81
- api_key: Optional[str] = None,
82
- app_url: Optional[str] = None,
80
+ mode: ModeType | None = None,
81
+ strict: bool | None = None,
82
+ org_name: str | None = None,
83
+ api_key: str | None = None,
84
+ app_url: str | None = None,
83
85
  force_login: bool = False,
84
86
  ) -> BraintrustStream: ...
85
87
 
86
88
 
87
89
  def invoke(
88
90
  # the permutations of arguments for a function id
89
- function_id: Optional[str] = None,
90
- version: Optional[str] = None,
91
- prompt_session_id: Optional[str] = None,
92
- prompt_session_function_id: Optional[str] = None,
93
- project_name: Optional[str] = None,
94
- project_id: Optional[str] = None,
95
- slug: Optional[str] = None,
96
- global_function: Optional[str] = None,
91
+ function_id: str | None = None,
92
+ version: str | None = None,
93
+ prompt_session_id: str | None = None,
94
+ prompt_session_function_id: str | None = None,
95
+ project_name: str | None = None,
96
+ project_id: str | None = None,
97
+ slug: str | None = None,
98
+ global_function: str | None = None,
99
+ function_type: FunctionTypeEnum | None = None,
97
100
  # arguments to the function
98
101
  input: Any = None,
99
- messages: Optional[List[Any]] = None,
100
- context: Optional[InvokeContext] = None,
101
- metadata: Optional[Dict[str, Any]] = None,
102
- tags: Optional[List[str]] = None,
103
- parent: Optional[Union[Exportable, str]] = None,
102
+ messages: list[Any] | None = None,
103
+ context: InvokeContext | None = None,
104
+ metadata: dict[str, Any] | None = None,
105
+ tags: list[str] | None = None,
106
+ parent: Exportable | str | None = None,
104
107
  stream: bool = False,
105
- mode: Optional[ModeType] = None,
106
- strict: Optional[bool] = None,
107
- org_name: Optional[str] = None,
108
- api_key: Optional[str] = None,
109
- app_url: Optional[str] = None,
108
+ mode: ModeType | None = None,
109
+ strict: bool | None = None,
110
+ org_name: str | None = None,
111
+ api_key: str | None = None,
112
+ app_url: str | None = None,
110
113
  force_login: bool = False,
111
- ) -> Union[BraintrustStream, T]:
114
+ ) -> BraintrustStream | T:
112
115
  """
113
116
  Invoke a Braintrust function, returning a `BraintrustStream` or the value as a plain
114
117
  Python object.
@@ -147,6 +150,8 @@ def invoke(
147
150
  This is not the project the function belongs to, but the project context for the invocation.
148
151
  slug: The slug of the function to invoke.
149
152
  global_function: The name of the global function to invoke.
153
+ function_type: The type of the global function to invoke. If unspecified, defaults to 'scorer'
154
+ for backward compatibility.
150
155
 
151
156
  Returns:
152
157
  The output of the function. If `stream` is True, returns a `BraintrustStream`,
@@ -176,6 +181,8 @@ def invoke(
176
181
  function_id_args["slug"] = slug
177
182
  if global_function is not None:
178
183
  function_id_args["global_function"] = global_function
184
+ if function_type is not None:
185
+ function_id_args["function_type"] = function_type
179
186
 
180
187
  request = dict(
181
188
  input=input,
@@ -213,7 +220,7 @@ def invoke(
213
220
  return resp.json()
214
221
 
215
222
 
216
- def init_function(project_name: str, slug: str, version: Optional[str] = None):
223
+ def init_function(project_name: str, slug: str, version: str | None = None):
217
224
  """
218
225
  Creates a function that can be used as either a task or scorer in the Eval framework.
219
226
  When used as a task, it will invoke the specified Braintrust function with the input.
@@ -7,8 +7,9 @@ with utility methods to make them easy to log and convert into various formats.
7
7
 
8
8
  import dataclasses
9
9
  import json
10
+ from collections.abc import Generator
10
11
  from itertools import tee
11
- from typing import Generator, List, Literal, Union
12
+ from typing import Literal, Union
12
13
 
13
14
  from sseclient import SSEClient
14
15
 
@@ -87,7 +88,7 @@ class BraintrustStream:
87
88
  with utility methods to make them easy to log and convert into various formats.
88
89
  """
89
90
 
90
- def __init__(self, base_stream: Union[SSEClient, List[BraintrustStreamChunk]]):
91
+ def __init__(self, base_stream: SSEClient | list[BraintrustStreamChunk]):
91
92
  """
92
93
  Initialize a BraintrustStream.
93
94
 
@@ -1,4 +1,4 @@
1
- """Auto-generated file (internal git SHA 437eb5379a737f70dec98033fccf81de43e8e177) -- do not modify"""
1
+ """Auto-generated file (internal git SHA 547fa17c0937e0e25fdf9214487be6f31c91a37a) -- do not modify"""
2
2
 
3
3
  from ._generated_types import (
4
4
  Acl,
@@ -55,6 +55,7 @@ from ._generated_types import (
55
55
  MCPServer,
56
56
  MessageRole,
57
57
  ModelParams,
58
+ NullableFunctionTypeEnum,
58
59
  NullableSavedFunctionId,
59
60
  ObjectReference,
60
61
  ObjectReferenceNullish,
@@ -161,6 +162,7 @@ __all__ = [
161
162
  "MCPServer",
162
163
  "MessageRole",
163
164
  "ModelParams",
165
+ "NullableFunctionTypeEnum",
164
166
  "NullableSavedFunctionId",
165
167
  "ObjectReference",
166
168
  "ObjectReferenceNullish",
braintrust/git_fields.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from dataclasses import dataclass, field
2
- from typing import List, Literal, Optional
2
+ from typing import Literal
3
3
 
4
4
  from .serializable_data_class import SerializableDataClass
5
5
 
@@ -8,21 +8,21 @@ from .serializable_data_class import SerializableDataClass
8
8
  class RepoInfo(SerializableDataClass):
9
9
  """Information about the current HEAD of the repo."""
10
10
 
11
- commit: Optional[str] = None
12
- branch: Optional[str] = None
13
- tag: Optional[str] = None
14
- dirty: Optional[bool] = None
15
- author_name: Optional[str] = None
16
- author_email: Optional[str] = None
17
- commit_message: Optional[str] = None
18
- commit_time: Optional[str] = None
19
- git_diff: Optional[str] = None
11
+ commit: str | None = None
12
+ branch: str | None = None
13
+ tag: str | None = None
14
+ dirty: bool | None = None
15
+ author_name: str | None = None
16
+ author_email: str | None = None
17
+ commit_message: str | None = None
18
+ commit_time: str | None = None
19
+ git_diff: str | None = None
20
20
 
21
21
 
22
22
  @dataclass
23
23
  class GitMetadataSettings(SerializableDataClass):
24
24
  collect: Literal["all", "some", "none"] = "all"
25
- fields: Optional[List[str]] = field(default_factory=list)
25
+ fields: list[str] | None = field(default_factory=list)
26
26
 
27
27
  @classmethod
28
28
  def merge(cls, s1: "GitMetadataSettings", s2: "GitMetadataSettings") -> "GitMetadataSettings":
braintrust/gitutil.py CHANGED
@@ -4,7 +4,6 @@ import re
4
4
  import subprocess
5
5
  import threading
6
6
  from functools import lru_cache as _cache
7
- from typing import Optional
8
7
 
9
8
  from .git_fields import GitMetadataSettings, RepoInfo
10
9
 
@@ -42,7 +41,7 @@ def _get_base_branch(remote=None):
42
41
  # To speed this up in the short term, we pick from a list of common names
43
42
  # and only fall back to the remote origin if required.
44
43
  COMMON_BASE_BRANCHES = ["main", "master", "develop"]
45
- repo_branches = set(b.name for b in repo.branches)
44
+ repo_branches = {b.name for b in repo.branches}
46
45
  if sum(b in repo_branches for b in COMMON_BASE_BRANCHES) == 1:
47
46
  for b in COMMON_BASE_BRANCHES:
48
47
  if b in repo_branches:
@@ -121,7 +120,7 @@ def truncate_to_byte_limit(input_string, byte_limit=65536):
121
120
  return encoded[:byte_limit].decode("utf-8", errors="ignore")
122
121
 
123
122
 
124
- def get_repo_info(settings: Optional[GitMetadataSettings] = None):
123
+ def get_repo_info(settings: GitMetadataSettings | None = None):
125
124
  if settings is None:
126
125
  settings = GitMetadataSettings()
127
126
 
braintrust/graph_util.py CHANGED
@@ -1,24 +1,24 @@
1
1
  # Generic graph algorithms.
2
2
 
3
3
  import dataclasses
4
- from typing import Dict, List, Optional, Protocol, Set, Tuple
4
+ from typing import Protocol
5
5
 
6
6
 
7
7
  # An UndirectedGraph consists of a set of vertex labels and a set of edges
8
8
  # between vertices.
9
9
  @dataclasses.dataclass
10
10
  class UndirectedGraph:
11
- vertices: Set[int]
12
- edges: Set[Tuple[int, int]]
11
+ vertices: set[int]
12
+ edges: set[tuple[int, int]]
13
13
 
14
14
 
15
15
  # An AdjacencyListGraph is a mapping from vertex label to the list of vertices
16
16
  # where there is a directed edge from the key to the value.
17
- AdjacencyListGraph = Dict[int, Set[int]]
17
+ AdjacencyListGraph = dict[int, set[int]]
18
18
 
19
19
 
20
20
  class FirstVisitF(Protocol):
21
- def __call__(self, vertex: int, *, parent_vertex: Optional[int], **kwargs) -> None:
21
+ def __call__(self, vertex: int, *, parent_vertex: int | None, **kwargs) -> None:
22
22
  """Extras:
23
23
  - parent_vertex: the vertex which spawned the current vertex as its
24
24
  child during the depth-first search. `parent_vertex` is guaranteed
@@ -33,9 +33,9 @@ class LastVisitF(Protocol):
33
33
 
34
34
  def depth_first_search(
35
35
  graph: AdjacencyListGraph,
36
- first_visit_f: Optional[FirstVisitF] = None,
37
- last_visit_f: Optional[LastVisitF] = None,
38
- visitation_order: Optional[List[int]] = None,
36
+ first_visit_f: FirstVisitF | None = None,
37
+ last_visit_f: LastVisitF | None = None,
38
+ visitation_order: list[int] | None = None,
39
39
  ) -> None:
40
40
  """A general depth-first search algorithm over a directed graph. As it
41
41
  traverses the graph, it invokes user-provided hooks when a vertex is *first*
@@ -86,7 +86,7 @@ def depth_first_search(
86
86
  events.append(("first", child, dict(parent_vertex=vertex)))
87
87
 
88
88
 
89
- def undirected_connected_components(graph: UndirectedGraph) -> List[List[int]]:
89
+ def undirected_connected_components(graph: UndirectedGraph) -> list[list[int]]:
90
90
  """Group together all the connected components of an undirected graph.
91
91
  Return each group as a list of vertices.
92
92
  """
@@ -124,7 +124,7 @@ def undirected_connected_components(graph: UndirectedGraph) -> List[List[int]]:
124
124
  return output
125
125
 
126
126
 
127
- def topological_sort(graph: AdjacencyListGraph, visitation_order: Optional[List[int]] = None) -> List[int]:
127
+ def topological_sort(graph: AdjacencyListGraph, visitation_order: list[int] | None = None) -> list[int]:
128
128
  """The topological_sort function accepts a graph as input, with edges from
129
129
  parents to children. It returns an ordering where parents are guaranteed to
130
130
  come before their children.
braintrust/id_gen.py CHANGED
@@ -45,8 +45,8 @@ class UUIDGenerator(IDGenerator):
45
45
 
46
46
 
47
47
  class OTELIDGenerator(IDGenerator):
48
- """ ID generator that generates OpenTelemetry-compatible IDs. We use this to have ids that can
49
- seamlessly flow between Braintrust and OpenTelemetry.
48
+ """ID generator that generates OpenTelemetry-compatible IDs. We use this to have ids that can
49
+ seamlessly flow between Braintrust and OpenTelemetry.
50
50
  """
51
51
 
52
52
  def get_span_id(self):