tracia 0.0.1__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tracia/_types.py ADDED
@@ -0,0 +1,564 @@
1
+ """Type definitions for the Tracia SDK."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime
6
+ from enum import Enum
7
+ from typing import Any, Literal, Union
8
+
9
+ from pydantic import BaseModel, ConfigDict, Field
10
+
11
+
12
+ class LLMProvider(str, Enum):
13
+ """Supported LLM providers."""
14
+
15
+ OPENAI = "openai"
16
+ ANTHROPIC = "anthropic"
17
+ GOOGLE = "google"
18
+
19
+
20
+ class TokenUsage(BaseModel):
21
+ """Token usage statistics."""
22
+
23
+ model_config = ConfigDict(populate_by_name=True)
24
+
25
+ input_tokens: int = Field(alias="inputTokens")
26
+ output_tokens: int = Field(alias="outputTokens")
27
+ total_tokens: int = Field(alias="totalTokens")
28
+
29
+
30
+ # Content Parts
31
+
32
+
33
+ class TextPart(BaseModel):
34
+ """Text content part."""
35
+
36
+ type: Literal["text"] = "text"
37
+ text: str
38
+
39
+
40
+ class ToolCallPart(BaseModel):
41
+ """Tool call content part."""
42
+
43
+ type: Literal["tool_call"] = "tool_call"
44
+ id: str
45
+ name: str
46
+ arguments: dict[str, Any]
47
+
48
+
49
+ ContentPart = Union[TextPart, ToolCallPart]
50
+
51
+
52
+ # Tool Definitions
53
+
54
+
55
+ class JsonSchemaProperty(BaseModel):
56
+ """JSON schema property definition."""
57
+
58
+ type: Literal["string", "number", "integer", "boolean", "array", "object"]
59
+ description: str | None = None
60
+ enum: list[str | int] | None = None
61
+ items: "JsonSchemaProperty | None" = None
62
+ properties: dict[str, "JsonSchemaProperty"] | None = None
63
+ required: list[str] | None = None
64
+
65
+
66
+ class ToolParameters(BaseModel):
67
+ """Tool parameter schema."""
68
+
69
+ type: Literal["object"] = "object"
70
+ properties: dict[str, JsonSchemaProperty]
71
+ required: list[str] | None = None
72
+
73
+
74
+ class ToolDefinition(BaseModel):
75
+ """Tool definition for function calling."""
76
+
77
+ name: str
78
+ description: str
79
+ parameters: ToolParameters
80
+
81
+
82
+ class ToolCall(BaseModel):
83
+ """A tool call made by the model."""
84
+
85
+ id: str
86
+ name: str
87
+ arguments: dict[str, Any]
88
+
89
+
90
+ ToolChoice = Union[Literal["auto", "none", "required"], dict[str, str]]
91
+
92
+
93
+ # Messages
94
+
95
+
96
+ class LocalPromptMessage(BaseModel):
97
+ """A message in a local prompt."""
98
+
99
+ role: Literal["system", "developer", "user", "assistant", "tool"]
100
+ content: str | list[ContentPart]
101
+ tool_call_id: str | None = Field(default=None, alias="toolCallId")
102
+ tool_name: str | None = Field(default=None, alias="toolName")
103
+
104
+ model_config = ConfigDict(populate_by_name=True)
105
+
106
+
107
+ FinishReason = Literal["stop", "tool_calls", "max_tokens"]
108
+
109
+
110
+ # Run Local Types
111
+
112
+
113
+ class RunLocalInput(BaseModel):
114
+ """Input for run_local method."""
115
+
116
+ model_config = ConfigDict(populate_by_name=True)
117
+
118
+ messages: list[LocalPromptMessage]
119
+ model: str
120
+ stream: bool = False
121
+ provider: LLMProvider | None = None
122
+ temperature: float | None = None
123
+ max_output_tokens: int | None = Field(default=None, alias="maxOutputTokens")
124
+ top_p: float | None = Field(default=None, alias="topP")
125
+ stop_sequences: list[str] | None = Field(default=None, alias="stopSequences")
126
+ timeout_ms: int | None = Field(default=None, alias="timeoutMs")
127
+ custom_options: dict[str, Any] | None = Field(default=None, alias="customOptions")
128
+ variables: dict[str, str] | None = None
129
+ provider_api_key: str | None = Field(default=None, alias="providerApiKey")
130
+ tags: list[str] | None = None
131
+ user_id: str | None = Field(default=None, alias="userId")
132
+ session_id: str | None = Field(default=None, alias="sessionId")
133
+ send_trace: bool | None = Field(default=None, alias="sendTrace")
134
+ span_id: str | None = Field(default=None, alias="spanId")
135
+ tools: list[ToolDefinition] | None = None
136
+ tool_choice: ToolChoice | None = Field(default=None, alias="toolChoice")
137
+ trace_id: str | None = Field(default=None, alias="traceId")
138
+ parent_span_id: str | None = Field(default=None, alias="parentSpanId")
139
+
140
+
141
+ class RunLocalResult(BaseModel):
142
+ """Result from run_local method."""
143
+
144
+ model_config = ConfigDict(populate_by_name=True)
145
+
146
+ text: str
147
+ span_id: str = Field(alias="spanId")
148
+ trace_id: str = Field(alias="traceId")
149
+ latency_ms: int = Field(alias="latencyMs")
150
+ usage: TokenUsage
151
+ cost: float | None = None
152
+ provider: LLMProvider
153
+ model: str
154
+ tool_calls: list[ToolCall] = Field(default_factory=list, alias="toolCalls")
155
+ finish_reason: FinishReason = Field(alias="finishReason")
156
+ message: LocalPromptMessage
157
+
158
+
159
+ class StreamResult(RunLocalResult):
160
+ """Result from a streaming run_local call."""
161
+
162
+ aborted: bool = False
163
+
164
+
165
+ # Run Responses Types (OpenAI Responses API)
166
+
167
+
168
+ class ResponsesInputMessage(BaseModel):
169
+ """User or developer message for Responses API."""
170
+
171
+ role: Literal["developer", "user"]
172
+ content: str
173
+
174
+
175
+ class ResponsesFunctionCallOutput(BaseModel):
176
+ """Function call output for Responses API."""
177
+
178
+ type: Literal["function_call_output"] = "function_call_output"
179
+ call_id: str = Field(alias="callId")
180
+ output: str
181
+
182
+ model_config = ConfigDict(populate_by_name=True)
183
+
184
+
185
+ class ResponsesFunctionCall(BaseModel):
186
+ """Function call for Responses API."""
187
+
188
+ type: Literal["function_call"] = "function_call"
189
+ id: str
190
+ call_id: str = Field(alias="callId")
191
+ name: str
192
+ arguments: str
193
+
194
+ model_config = ConfigDict(populate_by_name=True)
195
+
196
+
197
+ class ResponsesMessage(BaseModel):
198
+ """Message output for Responses API."""
199
+
200
+ type: Literal["message"] = "message"
201
+ role: Literal["assistant"]
202
+ content: list[dict[str, Any]]
203
+
204
+
205
+ ResponsesOutputItem = Union[ResponsesFunctionCall, ResponsesMessage]
206
+ ResponsesInputItem = Union[
207
+ ResponsesInputMessage, ResponsesFunctionCallOutput, ResponsesOutputItem
208
+ ]
209
+
210
+
211
+ class RunResponsesInput(BaseModel):
212
+ """Input for run_responses method."""
213
+
214
+ model_config = ConfigDict(populate_by_name=True)
215
+
216
+ model: str
217
+ input: list[ResponsesInputItem]
218
+ stream: bool = False
219
+ tools: list[ToolDefinition] | None = None
220
+ max_output_tokens: int | None = Field(default=None, alias="maxOutputTokens")
221
+ provider_api_key: str | None = Field(default=None, alias="providerApiKey")
222
+ timeout_ms: int | None = Field(default=None, alias="timeoutMs")
223
+ send_trace: bool | None = Field(default=None, alias="sendTrace")
224
+ span_id: str | None = Field(default=None, alias="spanId")
225
+ tags: list[str] | None = None
226
+ user_id: str | None = Field(default=None, alias="userId")
227
+ session_id: str | None = Field(default=None, alias="sessionId")
228
+ trace_id: str | None = Field(default=None, alias="traceId")
229
+ parent_span_id: str | None = Field(default=None, alias="parentSpanId")
230
+
231
+
232
+ class ResponsesToolCall(BaseModel):
233
+ """Tool call from Responses API."""
234
+
235
+ id: str
236
+ call_id: str = Field(alias="callId")
237
+ name: str
238
+ arguments: dict[str, Any]
239
+
240
+ model_config = ConfigDict(populate_by_name=True)
241
+
242
+
243
+ class RunResponsesResult(BaseModel):
244
+ """Result from run_responses method."""
245
+
246
+ model_config = ConfigDict(populate_by_name=True)
247
+
248
+ text: str
249
+ span_id: str = Field(alias="spanId")
250
+ trace_id: str = Field(alias="traceId")
251
+ latency_ms: int = Field(alias="latencyMs")
252
+ usage: TokenUsage
253
+ output_items: list[ResponsesOutputItem] = Field(
254
+ default_factory=list, alias="outputItems"
255
+ )
256
+ tool_calls: list[ResponsesToolCall] = Field(default_factory=list, alias="toolCalls")
257
+ aborted: bool = False
258
+
259
+
260
+ # Responses Events
261
+
262
+
263
+ class TextDeltaEvent(BaseModel):
264
+ """Text delta event during streaming."""
265
+
266
+ type: Literal["text_delta"] = "text_delta"
267
+ data: str
268
+
269
+
270
+ class TextEvent(BaseModel):
271
+ """Complete text event."""
272
+
273
+ type: Literal["text"] = "text"
274
+ data: str
275
+
276
+
277
+ class ReasoningEvent(BaseModel):
278
+ """Reasoning/thinking content event."""
279
+
280
+ type: Literal["reasoning"] = "reasoning"
281
+ content: str
282
+
283
+
284
+ class ToolCallEvent(BaseModel):
285
+ """Tool call event."""
286
+
287
+ model_config = ConfigDict(populate_by_name=True)
288
+
289
+ type: Literal["tool_call"] = "tool_call"
290
+ id: str
291
+ call_id: str = Field(alias="callId")
292
+ name: str
293
+ arguments: dict[str, Any]
294
+
295
+
296
+ class DoneEvent(BaseModel):
297
+ """Stream completion event."""
298
+
299
+ type: Literal["done"] = "done"
300
+ usage: TokenUsage
301
+
302
+
303
+ ResponsesEvent = Union[
304
+ TextDeltaEvent, TextEvent, ReasoningEvent, ToolCallEvent, DoneEvent
305
+ ]
306
+
307
+
308
+ # Span Types
309
+
310
+
311
+ class CreateSpanPayload(BaseModel):
312
+ """Payload for creating a span."""
313
+
314
+ model_config = ConfigDict(populate_by_name=True)
315
+
316
+ span_id: str = Field(alias="spanId")
317
+ model: str
318
+ provider: LLMProvider
319
+ input: dict[str, Any]
320
+ variables: dict[str, str] | None = None
321
+ output: str | None = None
322
+ status: Literal["SUCCESS", "ERROR"]
323
+ error: str | None = None
324
+ latency_ms: int = Field(alias="latencyMs")
325
+ input_tokens: int = Field(alias="inputTokens")
326
+ output_tokens: int = Field(alias="outputTokens")
327
+ total_tokens: int = Field(alias="totalTokens")
328
+ tags: list[str] | None = None
329
+ user_id: str | None = Field(default=None, alias="userId")
330
+ session_id: str | None = Field(default=None, alias="sessionId")
331
+ temperature: float | None = None
332
+ max_output_tokens: int | None = Field(default=None, alias="maxOutputTokens")
333
+ top_p: float | None = Field(default=None, alias="topP")
334
+ tools: list[ToolDefinition] | None = None
335
+ tool_calls: list[ToolCall] | None = Field(default=None, alias="toolCalls")
336
+ trace_id: str | None = Field(default=None, alias="traceId")
337
+ parent_span_id: str | None = Field(default=None, alias="parentSpanId")
338
+
339
+
340
+ class CreateSpanResult(BaseModel):
341
+ """Result from creating a span."""
342
+
343
+ model_config = ConfigDict(populate_by_name=True)
344
+
345
+ id: str
346
+ span_id: str = Field(alias="spanId")
347
+ trace_id: str = Field(alias="traceId")
348
+
349
+
350
+ class SpanListItem(BaseModel):
351
+ """A span item from the list endpoint (reduced fields)."""
352
+
353
+ model_config = ConfigDict(populate_by_name=True)
354
+
355
+ id: str
356
+ span_id: str = Field(alias="spanId")
357
+ trace_id: str = Field(alias="traceId")
358
+ prompt_slug: str | None = Field(default=None, alias="promptSlug")
359
+ model: str
360
+ status: Literal["SUCCESS", "ERROR"]
361
+ latency_ms: int = Field(alias="latencyMs")
362
+ input_tokens: int = Field(alias="inputTokens")
363
+ output_tokens: int = Field(alias="outputTokens")
364
+ total_tokens: int = Field(alias="totalTokens")
365
+ cost: float | None = None
366
+ created_at: datetime = Field(alias="createdAt")
367
+
368
+
369
+ class Span(BaseModel):
370
+ """A span from the API (full detail)."""
371
+
372
+ model_config = ConfigDict(populate_by_name=True)
373
+
374
+ id: str
375
+ span_id: str = Field(alias="spanId")
376
+ trace_id: str = Field(alias="traceId")
377
+ parent_span_id: str | None = Field(default=None, alias="parentSpanId")
378
+ prompt_slug: str | None = Field(default=None, alias="promptSlug")
379
+ prompt_version: int | None = Field(default=None, alias="promptVersion")
380
+ model: str
381
+ provider: str
382
+ input: dict[str, Any]
383
+ variables: dict[str, str] | None = None
384
+ output: str | None = None
385
+ status: Literal["SUCCESS", "ERROR"]
386
+ error: str | None = None
387
+ latency_ms: int = Field(alias="latencyMs")
388
+ input_tokens: int = Field(alias="inputTokens")
389
+ output_tokens: int = Field(alias="outputTokens")
390
+ total_tokens: int = Field(alias="totalTokens")
391
+ cost: float | None = None
392
+ tags: list[str] = Field(default_factory=list)
393
+ user_id: str | None = Field(default=None, alias="userId")
394
+ session_id: str | None = Field(default=None, alias="sessionId")
395
+ created_at: datetime = Field(alias="createdAt")
396
+
397
+
398
+ class ListSpansOptions(BaseModel):
399
+ """Options for listing spans."""
400
+
401
+ model_config = ConfigDict(populate_by_name=True)
402
+
403
+ prompt_slug: str | None = Field(default=None, alias="promptSlug")
404
+ status: Literal["SUCCESS", "ERROR"] | None = None
405
+ start_date: datetime | None = Field(default=None, alias="startDate")
406
+ end_date: datetime | None = Field(default=None, alias="endDate")
407
+ user_id: str | None = Field(default=None, alias="userId")
408
+ session_id: str | None = Field(default=None, alias="sessionId")
409
+ tags: list[str] | None = None
410
+ limit: int | None = None
411
+ cursor: str | None = None
412
+
413
+
414
+ class ListSpansResult(BaseModel):
415
+ """Result from listing spans."""
416
+
417
+ model_config = ConfigDict(populate_by_name=True)
418
+
419
+ spans: list[SpanListItem]
420
+ cursor: str | None = None
421
+ has_more: bool = Field(default=False, alias="hasMore")
422
+
423
+
424
+ # Evaluate Types
425
+
426
+
427
+ class EvaluateOptions(BaseModel):
428
+ """Options for evaluating a span."""
429
+
430
+ evaluator: str
431
+ value: int | float
432
+ note: str | None = None
433
+
434
+
435
+ class EvaluateResult(BaseModel):
436
+ """Result from evaluating a span."""
437
+
438
+ model_config = ConfigDict(populate_by_name=True)
439
+
440
+ id: str
441
+ evaluator_key: str = Field(alias="evaluatorKey")
442
+ evaluator_name: str = Field(alias="evaluatorName")
443
+ value: float
444
+ source: str
445
+ note: str | None = None
446
+ created_at: datetime = Field(alias="createdAt")
447
+
448
+
449
+ # Prompt Types
450
+
451
+
452
+ class PromptMessage(BaseModel):
453
+ """A message in a prompt template."""
454
+
455
+ id: str
456
+ role: Literal["system", "developer", "user", "assistant"]
457
+ content: str
458
+
459
+
460
+ class PromptVersion(BaseModel):
461
+ """A version of a prompt."""
462
+
463
+ model_config = ConfigDict(populate_by_name=True)
464
+
465
+ version: int
466
+ messages: list[PromptMessage]
467
+ model: str
468
+ provider: LLMProvider
469
+ temperature: float | None = None
470
+ max_output_tokens: int | None = Field(default=None, alias="maxOutputTokens")
471
+ top_p: float | None = Field(default=None, alias="topP")
472
+ stop_sequences: list[str] | None = Field(default=None, alias="stopSequences")
473
+ created_at: datetime = Field(alias="createdAt")
474
+
475
+
476
+ class Prompt(BaseModel):
477
+ """A prompt from the API."""
478
+
479
+ model_config = ConfigDict(populate_by_name=True)
480
+
481
+ id: str
482
+ slug: str
483
+ name: str
484
+ description: str | None = None
485
+ current_version: int = Field(alias="currentVersion")
486
+ versions: list[PromptVersion] = Field(default_factory=list)
487
+ created_at: datetime = Field(alias="createdAt")
488
+ updated_at: datetime = Field(alias="updatedAt")
489
+
490
+
491
+ class PromptListItem(BaseModel):
492
+ """A prompt item in the list response."""
493
+
494
+ model_config = ConfigDict(populate_by_name=True)
495
+
496
+ id: str
497
+ slug: str
498
+ name: str
499
+ description: str | None = None
500
+ current_version: int = Field(alias="currentVersion")
501
+ created_at: datetime = Field(alias="createdAt")
502
+ updated_at: datetime = Field(alias="updatedAt")
503
+
504
+
505
+ class CreatePromptOptions(BaseModel):
506
+ """Options for creating a prompt."""
507
+
508
+ model_config = ConfigDict(populate_by_name=True)
509
+
510
+ name: str
511
+ slug: str | None = None
512
+ description: str | None = None
513
+ content: list[PromptMessage]
514
+ model: str | None = None
515
+ provider: LLMProvider | None = None
516
+ temperature: float | None = None
517
+ max_output_tokens: int | None = Field(default=None, alias="maxOutputTokens")
518
+ top_p: float | None = Field(default=None, alias="topP")
519
+ stop_sequences: list[str] | None = Field(default=None, alias="stopSequences")
520
+
521
+
522
+ class UpdatePromptOptions(BaseModel):
523
+ """Options for updating a prompt."""
524
+
525
+ model_config = ConfigDict(populate_by_name=True)
526
+
527
+ name: str | None = None
528
+ slug: str | None = None
529
+ description: str | None = None
530
+ content: list[PromptMessage] | None = None
531
+ model: str | None = None
532
+ provider: LLMProvider | None = None
533
+ temperature: float | None = None
534
+ max_output_tokens: int | None = Field(default=None, alias="maxOutputTokens")
535
+ top_p: float | None = Field(default=None, alias="topP")
536
+ stop_sequences: list[str] | None = Field(default=None, alias="stopSequences")
537
+
538
+
539
+ class RunOptions(BaseModel):
540
+ """Options for running a prompt."""
541
+
542
+ model_config = ConfigDict(populate_by_name=True)
543
+
544
+ model: str | None = None
545
+ tags: list[str] | None = None
546
+ user_id: str | None = Field(default=None, alias="userId")
547
+ session_id: str | None = Field(default=None, alias="sessionId")
548
+
549
+
550
+ class RunResult(BaseModel):
551
+ """Result from running a prompt (via API)."""
552
+
553
+ model_config = ConfigDict(populate_by_name=True)
554
+
555
+ text: str | None = None
556
+ span_id: str = Field(alias="spanId")
557
+ trace_id: str = Field(alias="traceId")
558
+ prompt_version: int = Field(alias="promptVersion")
559
+ latency_ms: int = Field(alias="latencyMs")
560
+ usage: TokenUsage
561
+ cost: float
562
+ finish_reason: FinishReason | None = Field(default=None, alias="finishReason")
563
+ tool_calls: list[ToolCall] | None = Field(default=None, alias="toolCalls")
564
+ structured_output: dict[str, Any] | None = Field(default=None, alias="structuredOutput")
tracia/_utils.py ADDED
@@ -0,0 +1,116 @@
1
+ """Utility functions for the Tracia SDK."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import re
6
+ import secrets
7
+ from typing import Any
8
+
9
+ from ._constants import SPAN_ID_PREFIX, TRACE_ID_PREFIX
10
+
11
+
12
+ def generate_span_id() -> str:
13
+ """Generate a new span ID.
14
+
15
+ Returns:
16
+ A span ID in the format 'sp_' followed by 16 hex characters.
17
+ """
18
+ random_part = secrets.token_hex(8)
19
+ return f"{SPAN_ID_PREFIX}{random_part}"
20
+
21
+
22
+ def generate_trace_id() -> str:
23
+ """Generate a new trace ID.
24
+
25
+ Returns:
26
+ A trace ID in the format 'tr_' followed by 16 hex characters.
27
+ """
28
+ random_part = secrets.token_hex(8)
29
+ return f"{TRACE_ID_PREFIX}{random_part}"
30
+
31
+
32
+ # Validation patterns
33
+ _SPAN_ID_PATTERN = re.compile(r"^sp_[a-f0-9]{16}$", re.IGNORECASE)
34
+ _LEGACY_SPAN_ID_PATTERN = re.compile(r"^tr_[a-f0-9]{16}$", re.IGNORECASE)
35
+ _TRACE_ID_PATTERN = re.compile(r"^tr_[a-f0-9]{16}$")
36
+
37
+
38
+ def is_valid_span_id_format(span_id: str) -> bool:
39
+ """Check if a span ID has a valid format.
40
+
41
+ Accepts both 'sp_' format and legacy 'tr_' format.
42
+
43
+ Args:
44
+ span_id: The span ID to validate.
45
+
46
+ Returns:
47
+ True if the span ID is valid, False otherwise.
48
+ """
49
+ return bool(
50
+ _SPAN_ID_PATTERN.match(span_id) or _LEGACY_SPAN_ID_PATTERN.match(span_id)
51
+ )
52
+
53
+
54
+ def is_valid_trace_id_format(trace_id: str) -> bool:
55
+ """Check if a trace ID has a valid format.
56
+
57
+ Args:
58
+ trace_id: The trace ID to validate.
59
+
60
+ Returns:
61
+ True if the trace ID is valid, False otherwise.
62
+ """
63
+ return bool(_TRACE_ID_PATTERN.match(trace_id))
64
+
65
+
66
+ # Variable interpolation pattern: {{variable_name}}
67
+ _VARIABLE_PATTERN = re.compile(r"\{\{(\w+)\}\}")
68
+
69
+
70
+ def interpolate_variables(text: str, variables: dict[str, str]) -> str:
71
+ """Interpolate variables into text using {{variable_name}} syntax.
72
+
73
+ Args:
74
+ text: The text containing variable placeholders.
75
+ variables: A dictionary of variable names to values.
76
+
77
+ Returns:
78
+ The text with variables interpolated.
79
+ """
80
+ def replace(match: re.Match[str]) -> str:
81
+ var_name = match.group(1)
82
+ return variables.get(var_name, match.group(0))
83
+
84
+ return _VARIABLE_PATTERN.sub(replace, text)
85
+
86
+
87
+ def interpolate_message_content(
88
+ content: str | list[Any],
89
+ variables: dict[str, str] | None,
90
+ ) -> str | list[Any]:
91
+ """Interpolate variables in message content.
92
+
93
+ Args:
94
+ content: The message content (string or list of content parts).
95
+ variables: Optional dictionary of variable names to values.
96
+
97
+ Returns:
98
+ The content with variables interpolated.
99
+ """
100
+ if variables is None:
101
+ return content
102
+
103
+ if isinstance(content, str):
104
+ return interpolate_variables(content, variables)
105
+
106
+ # Handle list of content parts
107
+ result = []
108
+ for part in content:
109
+ if isinstance(part, dict) and part.get("type") == "text" and "text" in part:
110
+ result.append({
111
+ **part,
112
+ "text": interpolate_variables(part["text"], variables),
113
+ })
114
+ else:
115
+ result.append(part)
116
+ return result
tracia/py.typed ADDED
File without changes
@@ -0,0 +1,6 @@
1
+ """API resources for the Tracia SDK."""
2
+
3
+ from .prompts import Prompts
4
+ from .spans import Spans
5
+
6
+ __all__ = ["Prompts", "Spans"]