tracia 0.0.1__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tracia-0.1.1/.claude/settings.local.json +48 -0
- tracia-0.1.1/.gitignore +30 -0
- tracia-0.1.1/CLAUDE.md +112 -0
- tracia-0.1.1/PKG-INFO +277 -0
- tracia-0.1.1/README.md +241 -0
- tracia-0.1.1/pyproject.toml +98 -0
- tracia-0.1.1/tests/__init__.py +1 -0
- tracia-0.1.1/tests/test_client.py +99 -0
- tracia-0.1.1/tests/test_errors.py +105 -0
- tracia-0.1.1/tests/test_llm.py +200 -0
- tracia-0.1.1/tests/test_types.py +192 -0
- tracia-0.1.1/tests/test_utils.py +124 -0
- tracia-0.1.1/tracia/__init__.py +157 -0
- tracia-0.1.1/tracia/_client.py +1100 -0
- tracia-0.1.1/tracia/_constants.py +39 -0
- tracia-0.1.1/tracia/_errors.py +87 -0
- tracia-0.1.1/tracia/_http.py +362 -0
- tracia-0.1.1/tracia/_llm.py +898 -0
- tracia-0.1.1/tracia/_session.py +244 -0
- tracia-0.1.1/tracia/_streaming.py +135 -0
- tracia-0.1.1/tracia/_types.py +564 -0
- tracia-0.1.1/tracia/_utils.py +116 -0
- tracia-0.1.1/tracia/py.typed +0 -0
- tracia-0.1.1/tracia/resources/__init__.py +6 -0
- tracia-0.1.1/tracia/resources/prompts.py +273 -0
- tracia-0.1.1/tracia/resources/spans.py +227 -0
- tracia-0.0.1/.idea/.gitignore +0 -8
- tracia-0.0.1/.idea/copilot.data.migration.agent.xml +0 -6
- tracia-0.0.1/.idea/inspectionProfiles/Project_Default.xml +0 -6
- tracia-0.0.1/.idea/inspectionProfiles/profiles_settings.xml +0 -6
- tracia-0.0.1/.idea/misc.xml +0 -7
- tracia-0.0.1/.idea/modules.xml +0 -8
- tracia-0.0.1/.idea/tracia-python.iml +0 -8
- tracia-0.0.1/.idea/workspace.xml +0 -51
- tracia-0.0.1/PKG-INFO +0 -52
- tracia-0.0.1/README.md +0 -28
- tracia-0.0.1/pyproject.toml +0 -44
- tracia-0.0.1/tracia/__init__.py +0 -8
- {tracia-0.0.1 → tracia-0.1.1}/LICENSE +0 -0
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
{
|
|
2
|
+
"permissions": {
|
|
3
|
+
"allow": [
|
|
4
|
+
"WebFetch(domain:ai-sdk.dev)",
|
|
5
|
+
"WebFetch(domain:github.com)",
|
|
6
|
+
"WebFetch(domain:mintlify.com)",
|
|
7
|
+
"WebSearch",
|
|
8
|
+
|
|
9
|
+
"Bash(python:*)",
|
|
10
|
+
"Bash(python3:*)",
|
|
11
|
+
"Bash(python manage.py:*)",
|
|
12
|
+
"Bash(pip install:*)",
|
|
13
|
+
"Bash(pip show:*)",
|
|
14
|
+
"Bash(/opt/homebrew/bin/python3.10:*)",
|
|
15
|
+
"Bash(/opt/homebrew/bin/python3.10 -m pip install:*)",
|
|
16
|
+
|
|
17
|
+
"Bash(npm install:*)",
|
|
18
|
+
"Bash(npm run build:*)",
|
|
19
|
+
"Bash(npm run dev:*)",
|
|
20
|
+
"Bash(npm test:*)",
|
|
21
|
+
"Bash(npm show:*)",
|
|
22
|
+
"Bash(npm view:*)",
|
|
23
|
+
"Bash(npm ls:*)",
|
|
24
|
+
"Bash(npx tsc:*)",
|
|
25
|
+
"Bash(npx vitest run:*)",
|
|
26
|
+
"Bash(pnpm test:*)",
|
|
27
|
+
"Bash(pnpm typecheck:*)",
|
|
28
|
+
|
|
29
|
+
"Bash(mint dev:*)",
|
|
30
|
+
|
|
31
|
+
"Bash(find:*)",
|
|
32
|
+
"Bash(tree:*)",
|
|
33
|
+
"Bash(grep:*)",
|
|
34
|
+
"Bash(ls:*)",
|
|
35
|
+
"Bash(cat:*)",
|
|
36
|
+
"Bash(xargs:*)",
|
|
37
|
+
"Bash(curl:*)",
|
|
38
|
+
|
|
39
|
+
"Bash(git show:*)",
|
|
40
|
+
"Bash(git diff:*)",
|
|
41
|
+
"Bash(git log:*)",
|
|
42
|
+
"Bash(git status:*)",
|
|
43
|
+
"Bash(git branch:*)",
|
|
44
|
+
"Bash(git check-ignore:*)",
|
|
45
|
+
"Bash(git -C /Users/dm/work/tracia/*:*)"
|
|
46
|
+
]
|
|
47
|
+
}
|
|
48
|
+
}
|
tracia-0.1.1/.gitignore
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# Distribution / packaging
|
|
7
|
+
dist/
|
|
8
|
+
build/
|
|
9
|
+
*.egg-info/
|
|
10
|
+
*.egg
|
|
11
|
+
|
|
12
|
+
# Virtual environments
|
|
13
|
+
.venv/
|
|
14
|
+
venv/
|
|
15
|
+
|
|
16
|
+
# IDE
|
|
17
|
+
.idea/
|
|
18
|
+
.vscode/
|
|
19
|
+
*.swp
|
|
20
|
+
*.swo
|
|
21
|
+
|
|
22
|
+
# Testing
|
|
23
|
+
.pytest_cache/
|
|
24
|
+
.coverage
|
|
25
|
+
htmlcov/
|
|
26
|
+
.mypy_cache/
|
|
27
|
+
|
|
28
|
+
# OS
|
|
29
|
+
.DS_Store
|
|
30
|
+
Thumbs.db
|
tracia-0.1.1/CLAUDE.md
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# Tracia Python SDK
|
|
2
|
+
|
|
3
|
+
Python SDK for Tracia — the developer tool for storing, testing, and tracing LLM prompts. Published to PyPI as `tracia`.
|
|
4
|
+
|
|
5
|
+
This is the Python counterpart to `tracia-node`. Both SDKs implement the same v1 API surface. See `~/work/tracia/sdk-parity.md` for the canonical mapping between them.
|
|
6
|
+
|
|
7
|
+
## Coding Standards
|
|
8
|
+
|
|
9
|
+
### Naming
|
|
10
|
+
- ALWAYS use descriptive variable/parameter names
|
|
11
|
+
- NEVER use single-letter names (`p`, `r`, `k`) except in trivial callbacks like list comprehensions
|
|
12
|
+
- Use domain terminology: `prompt`, `span`, `provider`, `api_key`
|
|
13
|
+
|
|
14
|
+
### Comments
|
|
15
|
+
- Do NOT add comments that just describe what's obvious from names
|
|
16
|
+
- Only comment on WHY, not WHAT
|
|
17
|
+
|
|
18
|
+
### Enums and Constants
|
|
19
|
+
- ALWAYS use enums/constants for fixed values, never hardcoded strings
|
|
20
|
+
- Enums defined in `tracia/_errors.py` (TraciaErrorCode) and `tracia/_types.py` (LLMProvider)
|
|
21
|
+
|
|
22
|
+
## Tech Stack
|
|
23
|
+
|
|
24
|
+
- Python 3.10+
|
|
25
|
+
- httpx for HTTP requests (sync + async)
|
|
26
|
+
- Pydantic 2.0+ for type definitions and validation
|
|
27
|
+
- LiteLLM for unified LLM provider abstraction
|
|
28
|
+
- Hatchling for building
|
|
29
|
+
- pytest + pytest-asyncio for testing
|
|
30
|
+
- mypy (strict) for type checking
|
|
31
|
+
- ruff for linting
|
|
32
|
+
|
|
33
|
+
## Project Structure
|
|
34
|
+
|
|
35
|
+
```
|
|
36
|
+
tracia-python/
|
|
37
|
+
├── tracia/
|
|
38
|
+
│ ├── __init__.py # Public API exports (55+ types)
|
|
39
|
+
│ ├── _client.py # Main Tracia class (run_local, sessions, span scheduling)
|
|
40
|
+
│ ├── _types.py # All Pydantic type definitions
|
|
41
|
+
│ ├── _http.py # HttpClient + AsyncHttpClient (httpx-based)
|
|
42
|
+
│ ├── _llm.py # LiteLLM wrapper (provider resolution, message conversion)
|
|
43
|
+
│ ├── _session.py # TraciaSession (multi-turn trace linking)
|
|
44
|
+
│ ├── _streaming.py # LocalStream + AsyncLocalStream
|
|
45
|
+
│ ├── _errors.py # TraciaError + TraciaErrorCode enum
|
|
46
|
+
│ ├── _utils.py # ID generation, validation, variable interpolation
|
|
47
|
+
│ ├── _constants.py # BASE_URL, timeouts, retry config, env var mapping
|
|
48
|
+
│ ├── py.typed # PEP 561 marker
|
|
49
|
+
│ └── resources/
|
|
50
|
+
│ ├── __init__.py
|
|
51
|
+
│ ├── prompts.py # Prompts resource (list, get, create, update, delete, run)
|
|
52
|
+
│ └── spans.py # Spans resource (create, get, list, evaluate)
|
|
53
|
+
├── tests/
|
|
54
|
+
│ ├── test_client.py
|
|
55
|
+
│ ├── test_errors.py
|
|
56
|
+
│ ├── test_llm.py
|
|
57
|
+
│ ├── test_types.py
|
|
58
|
+
│ └── test_utils.py
|
|
59
|
+
└── pyproject.toml # Build config + dependencies
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
## Key Patterns
|
|
63
|
+
|
|
64
|
+
### Dual sync/async
|
|
65
|
+
Every public method has both sync and async variants. Async methods use the `a` prefix:
|
|
66
|
+
- `run_local()` / `arun_local()`
|
|
67
|
+
- `prompts.list()` / `prompts.alist()`
|
|
68
|
+
- `spans.create()` / `spans.acreate()`
|
|
69
|
+
- `flush()` / `aflush()`
|
|
70
|
+
- `close()` / `aclose()`
|
|
71
|
+
|
|
72
|
+
### Pydantic models with camelCase aliases
|
|
73
|
+
All types use snake_case fields with `Field(alias="camelCase")` for API serialization:
|
|
74
|
+
```python
|
|
75
|
+
class RunLocalResult(BaseModel):
|
|
76
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
77
|
+
span_id: str = Field(alias="spanId")
|
|
78
|
+
trace_id: str = Field(alias="traceId")
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### Background span creation
|
|
82
|
+
Spans are submitted to the API in the background using `ThreadPoolExecutor`. Max 1000 pending spans, 2 retry attempts with 500ms delay.
|
|
83
|
+
|
|
84
|
+
### LiteLLM integration
|
|
85
|
+
Provider abstraction via LiteLLM. Supports OpenAI, Anthropic, Google. LiteLLM is imported dynamically with a helpful error message if missing.
|
|
86
|
+
|
|
87
|
+
### Context manager support
|
|
88
|
+
```python
|
|
89
|
+
with Tracia(api_key="...") as client:
|
|
90
|
+
result = client.run_local(...)
|
|
91
|
+
# Resources cleaned up automatically
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
## How to Add New Methods
|
|
95
|
+
|
|
96
|
+
1. Add the type definitions to `tracia/_types.py` (Pydantic BaseModel, snake_case fields, camelCase aliases)
|
|
97
|
+
2. Add the method to the appropriate resource (`resources/prompts.py` or `resources/spans.py`) or to `_client.py`
|
|
98
|
+
3. Create BOTH sync and async variants
|
|
99
|
+
4. Export new types from `tracia/__init__.py`
|
|
100
|
+
5. Update `~/work/tracia/sdk-parity.md` with the new mapping
|
|
101
|
+
6. Check `~/work/tracia/tracia-node/` to verify the Node SDK has the equivalent
|
|
102
|
+
|
|
103
|
+
## Commands
|
|
104
|
+
|
|
105
|
+
```bash
|
|
106
|
+
pip install -e ".[dev]" # Install with dev dependencies
|
|
107
|
+
pytest # Run tests
|
|
108
|
+
pytest --cov=tracia # Run tests with coverage
|
|
109
|
+
mypy tracia/ # Type checking (strict)
|
|
110
|
+
ruff check tracia/ # Lint
|
|
111
|
+
ruff format tracia/ # Format
|
|
112
|
+
```
|
tracia-0.1.1/PKG-INFO
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: tracia
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: LLM prompt management and tracing SDK
|
|
5
|
+
Project-URL: Homepage, https://tracia.io
|
|
6
|
+
Project-URL: Documentation, https://docs.tracia.io
|
|
7
|
+
Project-URL: Repository, https://github.com/tracia/tracia-python
|
|
8
|
+
Project-URL: Issues, https://github.com/tracia/tracia-python/issues
|
|
9
|
+
Author-email: Tracia <hello@tracia.io>
|
|
10
|
+
License-Expression: MIT
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Keywords: ai,anthropic,chatgpt,claude,gemini,google,litellm,llm,observability,openai,prompt,tracing
|
|
13
|
+
Classifier: Development Status :: 4 - Beta
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
21
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
|
+
Classifier: Typing :: Typed
|
|
23
|
+
Requires-Python: >=3.10
|
|
24
|
+
Requires-Dist: httpx>=0.25.0
|
|
25
|
+
Requires-Dist: litellm>=1.30.0
|
|
26
|
+
Requires-Dist: pydantic>=2.0.0
|
|
27
|
+
Requires-Dist: typing-extensions>=4.0.0
|
|
28
|
+
Provides-Extra: dev
|
|
29
|
+
Requires-Dist: mypy>=1.0.0; extra == 'dev'
|
|
30
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
|
|
31
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == 'dev'
|
|
32
|
+
Requires-Dist: pytest>=7.0.0; extra == 'dev'
|
|
33
|
+
Requires-Dist: respx>=0.20.0; extra == 'dev'
|
|
34
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
35
|
+
Description-Content-Type: text/markdown
|
|
36
|
+
|
|
37
|
+
# Tracia
|
|
38
|
+
|
|
39
|
+
**LLM prompt management and tracing SDK for Python**
|
|
40
|
+
|
|
41
|
+
[](https://badge.fury.io/py/tracia)
|
|
42
|
+
[](https://www.python.org/downloads/)
|
|
43
|
+
[](https://opensource.org/licenses/MIT)
|
|
44
|
+
|
|
45
|
+
## What is Tracia?
|
|
46
|
+
|
|
47
|
+
Tracia is a modern LLM prompt management and tracing platform. This Python SDK provides:
|
|
48
|
+
|
|
49
|
+
- **Unified LLM Access** - Call OpenAI, Anthropic, Google, and 100+ providers through a single interface (powered by LiteLLM)
|
|
50
|
+
- **Automatic Tracing** - Every LLM call is automatically traced with latency, token usage, and cost
|
|
51
|
+
- **Prompt Management** - Store, version, and manage your prompts in the cloud
|
|
52
|
+
- **Session Linking** - Easily link related calls for multi-turn conversations
|
|
53
|
+
|
|
54
|
+
## Installation
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
pip install tracia
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
You'll also need API keys for the LLM providers you want to use:
|
|
61
|
+
```bash
|
|
62
|
+
export OPENAI_API_KEY="sk-..."
|
|
63
|
+
export ANTHROPIC_API_KEY="sk-ant-..."
|
|
64
|
+
export GOOGLE_API_KEY="..."
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
## Quick Start
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from tracia import Tracia
|
|
71
|
+
|
|
72
|
+
# Initialize the client
|
|
73
|
+
client = Tracia(api_key="your_tracia_api_key")
|
|
74
|
+
|
|
75
|
+
# Run a local prompt
|
|
76
|
+
result = client.run_local(
|
|
77
|
+
model="gpt-4o",
|
|
78
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
79
|
+
)
|
|
80
|
+
print(result.text)
|
|
81
|
+
print(f"Tokens: {result.usage.total_tokens}")
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## Streaming
|
|
85
|
+
|
|
86
|
+
```python
|
|
87
|
+
# Stream the response
|
|
88
|
+
stream = client.run_local(
|
|
89
|
+
model="gpt-4o",
|
|
90
|
+
messages=[{"role": "user", "content": "Tell me a story"}],
|
|
91
|
+
stream=True
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
for chunk in stream:
|
|
95
|
+
print(chunk, end="", flush=True)
|
|
96
|
+
|
|
97
|
+
# Get the final result (stream.result is a Future[StreamResult])
|
|
98
|
+
final = stream.result.result()
|
|
99
|
+
print(f"\nTotal tokens: {final.usage.total_tokens}")
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
## Multi-turn Conversations with Sessions
|
|
103
|
+
|
|
104
|
+
```python
|
|
105
|
+
# Create a session for linked conversations
|
|
106
|
+
session = client.create_session()
|
|
107
|
+
|
|
108
|
+
# First message
|
|
109
|
+
r1 = session.run_local(
|
|
110
|
+
model="gpt-4o",
|
|
111
|
+
messages=[{"role": "user", "content": "My name is Alice"}]
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# Follow-up - automatically linked to the same trace
|
|
115
|
+
r2 = session.run_local(
|
|
116
|
+
model="gpt-4o",
|
|
117
|
+
messages=[
|
|
118
|
+
{"role": "user", "content": "My name is Alice"},
|
|
119
|
+
{"role": "assistant", "content": r1.text},
|
|
120
|
+
{"role": "user", "content": "What's my name?"}
|
|
121
|
+
]
|
|
122
|
+
)
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
## Function Calling
|
|
126
|
+
|
|
127
|
+
```python
|
|
128
|
+
from tracia import ToolDefinition, ToolParameters, JsonSchemaProperty
|
|
129
|
+
|
|
130
|
+
# Define a tool
|
|
131
|
+
tools = [
|
|
132
|
+
ToolDefinition(
|
|
133
|
+
name="get_weather",
|
|
134
|
+
description="Get the current weather",
|
|
135
|
+
parameters=ToolParameters(
|
|
136
|
+
properties={
|
|
137
|
+
"location": JsonSchemaProperty(
|
|
138
|
+
type="string",
|
|
139
|
+
description="City name"
|
|
140
|
+
)
|
|
141
|
+
},
|
|
142
|
+
required=["location"]
|
|
143
|
+
)
|
|
144
|
+
)
|
|
145
|
+
]
|
|
146
|
+
|
|
147
|
+
result = client.run_local(
|
|
148
|
+
model="gpt-4o",
|
|
149
|
+
messages=[{"role": "user", "content": "What's the weather in Paris?"}],
|
|
150
|
+
tools=tools
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
if result.tool_calls:
|
|
154
|
+
for call in result.tool_calls:
|
|
155
|
+
print(f"Tool: {call.name}, Args: {call.arguments}")
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
## Variable Interpolation
|
|
159
|
+
|
|
160
|
+
```python
|
|
161
|
+
result = client.run_local(
|
|
162
|
+
model="gpt-4o",
|
|
163
|
+
messages=[
|
|
164
|
+
{"role": "system", "content": "You are a helpful assistant named {{name}}."},
|
|
165
|
+
{"role": "user", "content": "Hello!"}
|
|
166
|
+
],
|
|
167
|
+
variables={"name": "Claude"}
|
|
168
|
+
)
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
## Prompts API
|
|
172
|
+
|
|
173
|
+
```python
|
|
174
|
+
# List all prompts
|
|
175
|
+
prompts = client.prompts.list()
|
|
176
|
+
|
|
177
|
+
# Get a specific prompt
|
|
178
|
+
prompt = client.prompts.get("my-prompt")
|
|
179
|
+
|
|
180
|
+
# Run a prompt template
|
|
181
|
+
result = client.prompts.run(
|
|
182
|
+
"my-prompt",
|
|
183
|
+
variables={"name": "World"}
|
|
184
|
+
)
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
## Spans API
|
|
188
|
+
|
|
189
|
+
```python
|
|
190
|
+
from tracia import Eval, EvaluateOptions
|
|
191
|
+
|
|
192
|
+
# List spans
|
|
193
|
+
spans = client.spans.list()
|
|
194
|
+
|
|
195
|
+
# Evaluate a span
|
|
196
|
+
client.spans.evaluate(
|
|
197
|
+
"sp_xxx",
|
|
198
|
+
EvaluateOptions(
|
|
199
|
+
evaluator="quality",
|
|
200
|
+
value=Eval.POSITIVE, # or Eval.NEGATIVE
|
|
201
|
+
note="Great response!",
|
|
202
|
+
),
|
|
203
|
+
)
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
## Async Support
|
|
207
|
+
|
|
208
|
+
All methods have async variants:
|
|
209
|
+
|
|
210
|
+
```python
|
|
211
|
+
import asyncio
|
|
212
|
+
|
|
213
|
+
async def main():
|
|
214
|
+
async with Tracia(api_key="...") as client:
|
|
215
|
+
result = await client.arun_local(
|
|
216
|
+
model="gpt-4o",
|
|
217
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
218
|
+
)
|
|
219
|
+
print(result.text)
|
|
220
|
+
|
|
221
|
+
asyncio.run(main())
|
|
222
|
+
```
|
|
223
|
+
|
|
224
|
+
## Supported Providers
|
|
225
|
+
|
|
226
|
+
Via LiteLLM, Tracia supports 100+ providers including:
|
|
227
|
+
|
|
228
|
+
- **OpenAI**: gpt-4o, gpt-4, gpt-3.5-turbo, o1, o3
|
|
229
|
+
- **Anthropic**: claude-3-opus, claude-sonnet-4, claude-3-haiku
|
|
230
|
+
- **Google**: gemini-2.0-flash, gemini-2.5-pro
|
|
231
|
+
- And many more...
|
|
232
|
+
|
|
233
|
+
## Error Handling
|
|
234
|
+
|
|
235
|
+
```python
|
|
236
|
+
from tracia import TraciaError, TraciaErrorCode
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
result = client.run_local(...)
|
|
240
|
+
except TraciaError as e:
|
|
241
|
+
if e.code == TraciaErrorCode.MISSING_PROVIDER_API_KEY:
|
|
242
|
+
print("Please set your API key")
|
|
243
|
+
elif e.code == TraciaErrorCode.PROVIDER_ERROR:
|
|
244
|
+
print(f"LLM error: {e.message}")
|
|
245
|
+
```
|
|
246
|
+
|
|
247
|
+
## Configuration Options
|
|
248
|
+
|
|
249
|
+
```python
|
|
250
|
+
client = Tracia(
|
|
251
|
+
api_key="...",
|
|
252
|
+
base_url="https://app.tracia.io", # Custom API URL
|
|
253
|
+
on_span_error=lambda e, span_id: print(f"Span error: {e}")
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
result = client.run_local(
|
|
257
|
+
model="gpt-4o",
|
|
258
|
+
messages=[...],
|
|
259
|
+
temperature=0.7,
|
|
260
|
+
max_output_tokens=1000,
|
|
261
|
+
timeout_ms=30000,
|
|
262
|
+
tags=["production"],
|
|
263
|
+
user_id="user_123",
|
|
264
|
+
session_id="session_456",
|
|
265
|
+
send_trace=True, # Set to False to disable tracing
|
|
266
|
+
)
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
## Learn More
|
|
270
|
+
|
|
271
|
+
- Website: [tracia.io](https://tracia.io)
|
|
272
|
+
- Documentation: [docs.tracia.io](https://docs.tracia.io)
|
|
273
|
+
- GitHub: [github.com/tracia](https://github.com/tracia)
|
|
274
|
+
|
|
275
|
+
## License
|
|
276
|
+
|
|
277
|
+
MIT
|
tracia-0.1.1/README.md
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
# Tracia
|
|
2
|
+
|
|
3
|
+
**LLM prompt management and tracing SDK for Python**
|
|
4
|
+
|
|
5
|
+
[](https://badge.fury.io/py/tracia)
|
|
6
|
+
[](https://www.python.org/downloads/)
|
|
7
|
+
[](https://opensource.org/licenses/MIT)
|
|
8
|
+
|
|
9
|
+
## What is Tracia?
|
|
10
|
+
|
|
11
|
+
Tracia is a modern LLM prompt management and tracing platform. This Python SDK provides:
|
|
12
|
+
|
|
13
|
+
- **Unified LLM Access** - Call OpenAI, Anthropic, Google, and 100+ providers through a single interface (powered by LiteLLM)
|
|
14
|
+
- **Automatic Tracing** - Every LLM call is automatically traced with latency, token usage, and cost
|
|
15
|
+
- **Prompt Management** - Store, version, and manage your prompts in the cloud
|
|
16
|
+
- **Session Linking** - Easily link related calls for multi-turn conversations
|
|
17
|
+
|
|
18
|
+
## Installation
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
pip install tracia
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
You'll also need API keys for the LLM providers you want to use:
|
|
25
|
+
```bash
|
|
26
|
+
export OPENAI_API_KEY="sk-..."
|
|
27
|
+
export ANTHROPIC_API_KEY="sk-ant-..."
|
|
28
|
+
export GOOGLE_API_KEY="..."
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## Quick Start
|
|
32
|
+
|
|
33
|
+
```python
|
|
34
|
+
from tracia import Tracia
|
|
35
|
+
|
|
36
|
+
# Initialize the client
|
|
37
|
+
client = Tracia(api_key="your_tracia_api_key")
|
|
38
|
+
|
|
39
|
+
# Run a local prompt
|
|
40
|
+
result = client.run_local(
|
|
41
|
+
model="gpt-4o",
|
|
42
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
43
|
+
)
|
|
44
|
+
print(result.text)
|
|
45
|
+
print(f"Tokens: {result.usage.total_tokens}")
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Streaming
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
# Stream the response
|
|
52
|
+
stream = client.run_local(
|
|
53
|
+
model="gpt-4o",
|
|
54
|
+
messages=[{"role": "user", "content": "Tell me a story"}],
|
|
55
|
+
stream=True
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
for chunk in stream:
|
|
59
|
+
print(chunk, end="", flush=True)
|
|
60
|
+
|
|
61
|
+
# Get the final result (stream.result is a Future[StreamResult])
|
|
62
|
+
final = stream.result.result()
|
|
63
|
+
print(f"\nTotal tokens: {final.usage.total_tokens}")
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Multi-turn Conversations with Sessions
|
|
67
|
+
|
|
68
|
+
```python
|
|
69
|
+
# Create a session for linked conversations
|
|
70
|
+
session = client.create_session()
|
|
71
|
+
|
|
72
|
+
# First message
|
|
73
|
+
r1 = session.run_local(
|
|
74
|
+
model="gpt-4o",
|
|
75
|
+
messages=[{"role": "user", "content": "My name is Alice"}]
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Follow-up - automatically linked to the same trace
|
|
79
|
+
r2 = session.run_local(
|
|
80
|
+
model="gpt-4o",
|
|
81
|
+
messages=[
|
|
82
|
+
{"role": "user", "content": "My name is Alice"},
|
|
83
|
+
{"role": "assistant", "content": r1.text},
|
|
84
|
+
{"role": "user", "content": "What's my name?"}
|
|
85
|
+
]
|
|
86
|
+
)
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
## Function Calling
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
from tracia import ToolDefinition, ToolParameters, JsonSchemaProperty
|
|
93
|
+
|
|
94
|
+
# Define a tool
|
|
95
|
+
tools = [
|
|
96
|
+
ToolDefinition(
|
|
97
|
+
name="get_weather",
|
|
98
|
+
description="Get the current weather",
|
|
99
|
+
parameters=ToolParameters(
|
|
100
|
+
properties={
|
|
101
|
+
"location": JsonSchemaProperty(
|
|
102
|
+
type="string",
|
|
103
|
+
description="City name"
|
|
104
|
+
)
|
|
105
|
+
},
|
|
106
|
+
required=["location"]
|
|
107
|
+
)
|
|
108
|
+
)
|
|
109
|
+
]
|
|
110
|
+
|
|
111
|
+
result = client.run_local(
|
|
112
|
+
model="gpt-4o",
|
|
113
|
+
messages=[{"role": "user", "content": "What's the weather in Paris?"}],
|
|
114
|
+
tools=tools
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
if result.tool_calls:
|
|
118
|
+
for call in result.tool_calls:
|
|
119
|
+
print(f"Tool: {call.name}, Args: {call.arguments}")
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
## Variable Interpolation
|
|
123
|
+
|
|
124
|
+
```python
|
|
125
|
+
result = client.run_local(
|
|
126
|
+
model="gpt-4o",
|
|
127
|
+
messages=[
|
|
128
|
+
{"role": "system", "content": "You are a helpful assistant named {{name}}."},
|
|
129
|
+
{"role": "user", "content": "Hello!"}
|
|
130
|
+
],
|
|
131
|
+
variables={"name": "Claude"}
|
|
132
|
+
)
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## Prompts API
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
# List all prompts
|
|
139
|
+
prompts = client.prompts.list()
|
|
140
|
+
|
|
141
|
+
# Get a specific prompt
|
|
142
|
+
prompt = client.prompts.get("my-prompt")
|
|
143
|
+
|
|
144
|
+
# Run a prompt template
|
|
145
|
+
result = client.prompts.run(
|
|
146
|
+
"my-prompt",
|
|
147
|
+
variables={"name": "World"}
|
|
148
|
+
)
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
## Spans API
|
|
152
|
+
|
|
153
|
+
```python
|
|
154
|
+
from tracia import Eval, EvaluateOptions
|
|
155
|
+
|
|
156
|
+
# List spans
|
|
157
|
+
spans = client.spans.list()
|
|
158
|
+
|
|
159
|
+
# Evaluate a span
|
|
160
|
+
client.spans.evaluate(
|
|
161
|
+
"sp_xxx",
|
|
162
|
+
EvaluateOptions(
|
|
163
|
+
evaluator="quality",
|
|
164
|
+
value=Eval.POSITIVE, # or Eval.NEGATIVE
|
|
165
|
+
note="Great response!",
|
|
166
|
+
),
|
|
167
|
+
)
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
## Async Support
|
|
171
|
+
|
|
172
|
+
All methods have async variants:
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
import asyncio
|
|
176
|
+
|
|
177
|
+
async def main():
|
|
178
|
+
async with Tracia(api_key="...") as client:
|
|
179
|
+
result = await client.arun_local(
|
|
180
|
+
model="gpt-4o",
|
|
181
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
182
|
+
)
|
|
183
|
+
print(result.text)
|
|
184
|
+
|
|
185
|
+
asyncio.run(main())
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
## Supported Providers
|
|
189
|
+
|
|
190
|
+
Via LiteLLM, Tracia supports 100+ providers including:
|
|
191
|
+
|
|
192
|
+
- **OpenAI**: gpt-4o, gpt-4, gpt-3.5-turbo, o1, o3
|
|
193
|
+
- **Anthropic**: claude-3-opus, claude-sonnet-4, claude-3-haiku
|
|
194
|
+
- **Google**: gemini-2.0-flash, gemini-2.5-pro
|
|
195
|
+
- And many more...
|
|
196
|
+
|
|
197
|
+
## Error Handling
|
|
198
|
+
|
|
199
|
+
```python
|
|
200
|
+
from tracia import TraciaError, TraciaErrorCode
|
|
201
|
+
|
|
202
|
+
try:
|
|
203
|
+
result = client.run_local(...)
|
|
204
|
+
except TraciaError as e:
|
|
205
|
+
if e.code == TraciaErrorCode.MISSING_PROVIDER_API_KEY:
|
|
206
|
+
print("Please set your API key")
|
|
207
|
+
elif e.code == TraciaErrorCode.PROVIDER_ERROR:
|
|
208
|
+
print(f"LLM error: {e.message}")
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
## Configuration Options
|
|
212
|
+
|
|
213
|
+
```python
|
|
214
|
+
client = Tracia(
|
|
215
|
+
api_key="...",
|
|
216
|
+
base_url="https://app.tracia.io", # Custom API URL
|
|
217
|
+
on_span_error=lambda e, span_id: print(f"Span error: {e}")
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
result = client.run_local(
|
|
221
|
+
model="gpt-4o",
|
|
222
|
+
messages=[...],
|
|
223
|
+
temperature=0.7,
|
|
224
|
+
max_output_tokens=1000,
|
|
225
|
+
timeout_ms=30000,
|
|
226
|
+
tags=["production"],
|
|
227
|
+
user_id="user_123",
|
|
228
|
+
session_id="session_456",
|
|
229
|
+
send_trace=True, # Set to False to disable tracing
|
|
230
|
+
)
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
## Learn More
|
|
234
|
+
|
|
235
|
+
- Website: [tracia.io](https://tracia.io)
|
|
236
|
+
- Documentation: [docs.tracia.io](https://docs.tracia.io)
|
|
237
|
+
- GitHub: [github.com/tracia](https://github.com/tracia)
|
|
238
|
+
|
|
239
|
+
## License
|
|
240
|
+
|
|
241
|
+
MIT
|