fiddler-langgraph 0.1.0rc1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,15 @@
1
+ include LICENSE
2
+ include README.md
3
+ include fiddler_langgraph/VERSION
4
+ recursive-include fiddler_langgraph *.py
5
+ recursive-exclude tests *
6
+ recursive-exclude examples *
7
+ recursive-exclude scripts *
8
+ exclude semantic_conventions.md
9
+ exclude mypy.ini
10
+ exclude bandit.yaml
11
+ exclude setup.py
12
+ global-exclude *.pyc
13
+ global-exclude *.pyo
14
+ global-exclude __pycache__
15
+ global-exclude .DS_Store
@@ -0,0 +1,328 @@
1
+ Metadata-Version: 2.4
2
+ Name: fiddler-langgraph
3
+ Version: 0.1.0rc1
4
+ Summary: Python SDK for instrumenting GenAI Applications with Fiddler
5
+ Home-page: https://fiddler.ai
6
+ Author: Fiddler AI
7
+ Author-email: Fiddler AI <support@fiddler.ai>
8
+ License-Expression: Apache-2.0
9
+ Project-URL: Homepage, https://fiddler.ai
10
+ Project-URL: Documentation, https://docs.fiddler.ai
11
+ Project-URL: Repository, https://github.com/fiddler-labs/fiddler-sdk
12
+ Project-URL: Issues, https://github.com/fiddler-labs/fiddler-sdk/issues
13
+ Keywords: fiddler,ai,genai,llm,monitoring,observability,instrumentation,langgraph,langchain,opentelemetry
14
+ Classifier: Development Status :: 3 - Alpha
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
17
+ Classifier: Topic :: System :: Monitoring
18
+ Classifier: Topic :: Software Development :: Quality Assurance
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Classifier: Programming Language :: Python :: 3.13
24
+ Classifier: Operating System :: OS Independent
25
+ Requires-Python: >=3.10
26
+ Description-Content-Type: text/markdown
27
+ Requires-Dist: pip>=21.0
28
+ Requires-Dist: opentelemetry-api<=1.35.0,>=1.19.0
29
+ Requires-Dist: opentelemetry-sdk<=1.35.0,>=1.19.0
30
+ Requires-Dist: opentelemetry-instrumentation<=0.56b0,>=0.40b0
31
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http<=1.35.0,>=1.19.0
32
+ Requires-Dist: pydantic>=2.0
33
+ Provides-Extra: dev
34
+ Requires-Dist: pytest>=8.3.5; extra == "dev"
35
+ Requires-Dist: pytest-cov>=6.1.1; extra == "dev"
36
+ Requires-Dist: mypy>=1.16.1; extra == "dev"
37
+ Requires-Dist: black>=25.1.0; extra == "dev"
38
+ Requires-Dist: isort>=6.0; extra == "dev"
39
+ Requires-Dist: flake8>=7.2.0; extra == "dev"
40
+ Requires-Dist: pre-commit>=4.2.0; extra == "dev"
41
+ Requires-Dist: bandit>=1.8.5; extra == "dev"
42
+ Requires-Dist: pylint>=3.3.7; extra == "dev"
43
+ Requires-Dist: ruff>=0.12.0; extra == "dev"
44
+ Requires-Dist: nox>=2025.5.1; extra == "dev"
45
+ Requires-Dist: tomli==2.2.1; extra == "dev"
46
+ Requires-Dist: langgraph<=1.0.1,>=0.3.28; extra == "dev"
47
+ Requires-Dist: langchain-community<=1.0.0,>=0.3.27; extra == "dev"
48
+ Requires-Dist: langchain-openai<=1.0.0,>=0.2.14; extra == "dev"
49
+ Requires-Dist: langchain-chroma<=1.0.0,>=0.1.6; extra == "dev"
50
+ Requires-Dist: langgraph-supervisor>=0.0.1; extra == "dev"
51
+ Dynamic: author
52
+ Dynamic: home-page
53
+ Dynamic: requires-python
54
+
55
+ # Fiddler LangGraph SDK
56
+
57
+ SDK for instrumenting GenAI Applications with Fiddler using OpenTelemetry and LangGraph.
58
+
59
+ ## Installation
60
+
61
+ ```bash
62
+ pip install fiddler-langgraph
63
+ ```
64
+
65
+ **Note**: This SDK supports LangGraph versions >= 0.3.28 and <= 1.0.2 If you already have LangGraph installed in your environment, the SDK will work with your existing version as long as it falls within this range. If LangGraph is not installed or is outside the supported range, you'll get a helpful error message with installation instructions.
66
+
67
+ ### With Example Dependencies
68
+
69
+ To run the example scripts in the `examples/` directory:
70
+
71
+ ```bash
72
+ pip install fiddler-langgraph[examples]
73
+ ```
74
+
75
+ ### Development Dependencies
76
+
77
+ For development and testing:
78
+
79
+ ```bash
80
+ pip install fiddler-langgraph[dev]
81
+ ```
82
+
83
+ ## Quick Start
84
+
85
+ ```python
86
+ from fiddler_langgraph import FiddlerClient
87
+
88
+ # Initialize the FiddlerClient with basic configuration
89
+ client = FiddlerClient(
90
+ url="https://api.fiddler.ai",
91
+ api_key="fdl_api_key",
92
+ application_id="fdl_application_id" # Must be a valid UUID4
93
+ )
94
+
95
+ # For langgraph, you can instrument like below
96
+ from fiddler_langgraph.tracing.instrumentation import LangGraphInstrumentor, set_llm_context, set_conversation_id
97
+ LangGraphInstrumentor(client).instrument()
98
+
99
+ # Set additional context for LLM processing
100
+ from langchain_openai import ChatOpenAI
101
+ model = ChatOpenAI(model='gpt-4o-mini')
102
+ set_llm_context(model, "Previous conversation context")
103
+
104
+ # Set conversation ID for multi-turn conversations
105
+ from langgraph.graph import StateGraph
106
+ workflow = StateGraph(state_schema=State)
107
+ app = workflow.compile()
108
+ set_conversation_id("conversation_123")
109
+ app.invoke({"messages": [{"role": "user", "content": "Write a novel"}]})
110
+ ```
111
+
112
+ ## LangGraph Usage Examples
113
+
114
+ ### Basic Instrumentation
115
+
116
+ ```python
117
+ from fiddler_langgraph.tracing.instrumentation import LangGraphInstrumentor
118
+
119
+ # Initialize and instrument
120
+ instrumentor = LangGraphInstrumentor(client)
121
+ instrumentor.instrument()
122
+ ```
123
+
124
+ ### Setting LLM Context
125
+
126
+ ```python
127
+ from fiddler_langgraph.tracing.instrumentation import set_llm_context
128
+ from langchain_openai import ChatOpenAI
129
+
130
+ model = ChatOpenAI(model='gpt-4o-mini')
131
+ set_llm_context(model, "User prefers concise responses")
132
+ ```
133
+
134
+ ### Conversation Tracking
135
+
136
+ ```python
137
+ from fiddler_langgraph.tracing.instrumentation import set_conversation_id
138
+ import uuid
139
+
140
+ # Set conversation ID for tracking multi-turn conversations
141
+ conversation_id = str(uuid.uuid4())
142
+ set_conversation_id(conversation_id)
143
+ ```
144
+
145
+ ## Configuration
146
+
147
+ The Fiddler SDK provides flexible configuration options for OpenTelemetry integration and performance tuning.
148
+
149
+ ### Basic Configuration
150
+
151
+ ```python
152
+ client = FiddlerClient(
153
+ api_key="your-api-key",
154
+ application_id="your-app-id", # Must be a valid UUID4
155
+ url="https://api.fiddler.ai"
156
+ )
157
+ ```
158
+
159
+ ### Advanced Configuration
160
+
161
+ ```python
162
+ from opentelemetry.sdk.trace import SpanLimits, sampling
163
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import Compression
164
+
165
+ # Custom span limits for high-volume applications
166
+ custom_limits = SpanLimits(
167
+ max_events=64,
168
+ max_links=64,
169
+ max_span_attributes=64,
170
+ max_event_attributes=64,
171
+ max_link_attributes=64,
172
+ max_span_attribute_length=4096,
173
+ )
174
+
175
+ # Sampling strategy for production
176
+ sampler = sampling.TraceIdRatioBased(0.1) # Sample 10% of traces
177
+
178
+ client = FiddlerClient(
179
+ api_key="your-api-key",
180
+ application_id="your-app-id",
181
+ span_limits=custom_limits,
182
+ sampler=sampler,
183
+ console_tracer=False, # Set to True for debugging
184
+ compression=Compression.Gzip, # Enable gzip compression (default)
185
+ )
186
+ ```
187
+
188
+ ### Compression Options
189
+
190
+ The SDK supports compression for OTLP export to reduce payload size:
191
+
192
+ ```python
193
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import Compression
194
+
195
+ # Enable gzip compression (default, recommended for production)
196
+ client = FiddlerClient(
197
+ api_key="your-api-key",
198
+ application_id="your-app-id",
199
+ compression=Compression.Gzip,
200
+ )
201
+
202
+ # Disable compression (useful for debugging or local development)
203
+ client = FiddlerClient(
204
+ api_key="your-api-key",
205
+ application_id="your-app-id",
206
+ compression=Compression.NoCompression,
207
+ )
208
+
209
+ # Use deflate compression (alternative to gzip)
210
+ client = FiddlerClient(
211
+ api_key="your-api-key",
212
+ application_id="your-app-id",
213
+ compression=Compression.Deflate,
214
+ )
215
+ ```
216
+
217
+ ### Environment Variables for Batch Processing
218
+
219
+ Configure batch span processor behavior using environment variables:
220
+
221
+ ```python
222
+ import os
223
+
224
+ # Configure batch processing
225
+ os.environ['OTEL_BSP_MAX_QUEUE_SIZE'] = '500'
226
+ os.environ['OTEL_BSP_SCHEDULE_DELAY_MILLIS'] = '500'
227
+ os.environ['OTEL_BSP_MAX_EXPORT_BATCH_SIZE'] = '50'
228
+ os.environ['OTEL_BSP_EXPORT_TIMEOUT'] = '10000'
229
+
230
+ client = FiddlerClient(
231
+ api_key="your-api-key",
232
+ application_id="your-app-id"
233
+ )
234
+ ```
235
+
236
+ ### Default Configuration
237
+
238
+ The SDK uses restrictive defaults to prevent excessive resource usage:
239
+
240
+ - **Span Limits**: 32 events/links/attributes per span, 2048 character attribute length
241
+ - **Batch Processing**: 100 queue size, 1000ms delay, 10 batch size, 5000ms timeout
242
+ - **Sampling**: Always on (100% sampling)
243
+
244
+ ## Features
245
+
246
+ ### Core Features
247
+
248
+ - **OpenTelemetry Integration**: Full tracing support with configurable span limits
249
+ - **Input Validation**: UUID4 validation for application IDs, URL validation
250
+ - **Flexible Configuration**: Custom span limits, sampling strategies, and batch processing
251
+ - **Resource Management**: Conservative defaults to prevent resource exhaustion
252
+
253
+ ### LangGraph Instrumentation
254
+
255
+ - **Automatic Tracing**: Complete workflow tracing with span hierarchy
256
+ - **LLM Context Setting**: Set additional context information for LLM processing via `set_llm_context()`
257
+ - **Conversation Tracking**: Set conversation IDs for multi-turn conversations via `set_conversation_id()`
258
+ - **Message Serialization**: Smart handling of complex message content (lists, dicts)
259
+ - **Attribute Truncation**: Automatic truncation of long attribute values (256 character limit)
260
+ - **Error Handling**: Comprehensive error tracking and status reporting
261
+
262
+ ### Monitoring and Observability
263
+
264
+ - **Span Types**: Different span types for chains, tools, retrievers, and LLMs
265
+ - **Agent Tracking**: Automatic agent name and ID generation
266
+ - **Performance Metrics**: Timing, token usage, and model information
267
+ - **Error Context**: Detailed error information with stack traces
268
+
269
+ ## Validation and Error Handling
270
+
271
+ The SDK includes comprehensive validation:
272
+
273
+ - **Application ID**: Must be a valid UUID4 string
274
+ - **URL**: Must have valid scheme (http/https) and netloc
275
+ - **Attribute Values**: Automatically truncated to prevent oversized spans
276
+ - **Message Content**: Smart serialization of complex data structures
277
+
278
+ ## Performance Considerations
279
+
280
+ - **High-volume applications**: Increase span limits and batch processing parameters
281
+ - **Low-latency requirements**: Decrease batch schedule delay
282
+ - **Memory constraints**: Use restrictive span limits and smaller batch sizes
283
+ - **Debugging**: Enable console tracer and use higher attribute limits
284
+ - **Production**: Use appropriate sampling strategies to control data volume
285
+
286
+ ## Requirements
287
+
288
+ - Python 3.10, 3.11, 3.12, or 3.13
289
+ - Dependencies (automatically installed):
290
+ - opentelemetry-api (1.34.1)
291
+ - opentelemetry-sdk (1.34.1)
292
+ - opentelemetry-instrumentation (0.55b1)
293
+ - opentelemetry-exporter-otlp-proto-http (1.34.1)
294
+ - langgraph (0.4.8)
295
+ - langchain (0.3.26)
296
+ - langchain-core (automatically installed with langchain)
297
+
298
+ ## Development
299
+
300
+ ### Running Tests
301
+
302
+ ```bash
303
+ # Run all tests
304
+ pytest
305
+
306
+ # Run specific test file
307
+ pytest tests/core/test_client.py
308
+
309
+ # Run with coverage
310
+ pytest --cov=fiddler_langgraph
311
+ ```
312
+
313
+ ### Code Quality
314
+
315
+ ```bash
316
+ # Run linting
317
+ flake8 fiddler_langgraph/
318
+
319
+ # Run type checking
320
+ mypy fiddler_langgraph/
321
+
322
+ # Run security checks
323
+ bandit -r fiddler_langgraph/
324
+ ```
325
+
326
+ ## License
327
+
328
+ Apache License 2.0 - see LICENSE file for details
@@ -0,0 +1,274 @@
1
+ # Fiddler LangGraph SDK
2
+
3
+ SDK for instrumenting GenAI Applications with Fiddler using OpenTelemetry and LangGraph.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install fiddler-langgraph
9
+ ```
10
+
11
+ **Note**: This SDK supports LangGraph versions >= 0.3.28 and <= 1.0.2 If you already have LangGraph installed in your environment, the SDK will work with your existing version as long as it falls within this range. If LangGraph is not installed or is outside the supported range, you'll get a helpful error message with installation instructions.
12
+
13
+ ### With Example Dependencies
14
+
15
+ To run the example scripts in the `examples/` directory:
16
+
17
+ ```bash
18
+ pip install fiddler-langgraph[examples]
19
+ ```
20
+
21
+ ### Development Dependencies
22
+
23
+ For development and testing:
24
+
25
+ ```bash
26
+ pip install fiddler-langgraph[dev]
27
+ ```
28
+
29
+ ## Quick Start
30
+
31
+ ```python
32
+ from fiddler_langgraph import FiddlerClient
33
+
34
+ # Initialize the FiddlerClient with basic configuration
35
+ client = FiddlerClient(
36
+ url="https://api.fiddler.ai",
37
+ api_key="fdl_api_key",
38
+ application_id="fdl_application_id" # Must be a valid UUID4
39
+ )
40
+
41
+ # For langgraph, you can instrument like below
42
+ from fiddler_langgraph.tracing.instrumentation import LangGraphInstrumentor, set_llm_context, set_conversation_id
43
+ LangGraphInstrumentor(client).instrument()
44
+
45
+ # Set additional context for LLM processing
46
+ from langchain_openai import ChatOpenAI
47
+ model = ChatOpenAI(model='gpt-4o-mini')
48
+ set_llm_context(model, "Previous conversation context")
49
+
50
+ # Set conversation ID for multi-turn conversations
51
+ from langgraph.graph import StateGraph
52
+ workflow = StateGraph(state_schema=State)
53
+ app = workflow.compile()
54
+ set_conversation_id("conversation_123")
55
+ app.invoke({"messages": [{"role": "user", "content": "Write a novel"}]})
56
+ ```
57
+
58
+ ## LangGraph Usage Examples
59
+
60
+ ### Basic Instrumentation
61
+
62
+ ```python
63
+ from fiddler_langgraph.tracing.instrumentation import LangGraphInstrumentor
64
+
65
+ # Initialize and instrument
66
+ instrumentor = LangGraphInstrumentor(client)
67
+ instrumentor.instrument()
68
+ ```
69
+
70
+ ### Setting LLM Context
71
+
72
+ ```python
73
+ from fiddler_langgraph.tracing.instrumentation import set_llm_context
74
+ from langchain_openai import ChatOpenAI
75
+
76
+ model = ChatOpenAI(model='gpt-4o-mini')
77
+ set_llm_context(model, "User prefers concise responses")
78
+ ```
79
+
80
+ ### Conversation Tracking
81
+
82
+ ```python
83
+ from fiddler_langgraph.tracing.instrumentation import set_conversation_id
84
+ import uuid
85
+
86
+ # Set conversation ID for tracking multi-turn conversations
87
+ conversation_id = str(uuid.uuid4())
88
+ set_conversation_id(conversation_id)
89
+ ```
90
+
91
+ ## Configuration
92
+
93
+ The Fiddler SDK provides flexible configuration options for OpenTelemetry integration and performance tuning.
94
+
95
+ ### Basic Configuration
96
+
97
+ ```python
98
+ client = FiddlerClient(
99
+ api_key="your-api-key",
100
+ application_id="your-app-id", # Must be a valid UUID4
101
+ url="https://api.fiddler.ai"
102
+ )
103
+ ```
104
+
105
+ ### Advanced Configuration
106
+
107
+ ```python
108
+ from opentelemetry.sdk.trace import SpanLimits, sampling
109
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import Compression
110
+
111
+ # Custom span limits for high-volume applications
112
+ custom_limits = SpanLimits(
113
+ max_events=64,
114
+ max_links=64,
115
+ max_span_attributes=64,
116
+ max_event_attributes=64,
117
+ max_link_attributes=64,
118
+ max_span_attribute_length=4096,
119
+ )
120
+
121
+ # Sampling strategy for production
122
+ sampler = sampling.TraceIdRatioBased(0.1) # Sample 10% of traces
123
+
124
+ client = FiddlerClient(
125
+ api_key="your-api-key",
126
+ application_id="your-app-id",
127
+ span_limits=custom_limits,
128
+ sampler=sampler,
129
+ console_tracer=False, # Set to True for debugging
130
+ compression=Compression.Gzip, # Enable gzip compression (default)
131
+ )
132
+ ```
133
+
134
+ ### Compression Options
135
+
136
+ The SDK supports compression for OTLP export to reduce payload size:
137
+
138
+ ```python
139
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import Compression
140
+
141
+ # Enable gzip compression (default, recommended for production)
142
+ client = FiddlerClient(
143
+ api_key="your-api-key",
144
+ application_id="your-app-id",
145
+ compression=Compression.Gzip,
146
+ )
147
+
148
+ # Disable compression (useful for debugging or local development)
149
+ client = FiddlerClient(
150
+ api_key="your-api-key",
151
+ application_id="your-app-id",
152
+ compression=Compression.NoCompression,
153
+ )
154
+
155
+ # Use deflate compression (alternative to gzip)
156
+ client = FiddlerClient(
157
+ api_key="your-api-key",
158
+ application_id="your-app-id",
159
+ compression=Compression.Deflate,
160
+ )
161
+ ```
162
+
163
+ ### Environment Variables for Batch Processing
164
+
165
+ Configure batch span processor behavior using environment variables:
166
+
167
+ ```python
168
+ import os
169
+
170
+ # Configure batch processing
171
+ os.environ['OTEL_BSP_MAX_QUEUE_SIZE'] = '500'
172
+ os.environ['OTEL_BSP_SCHEDULE_DELAY_MILLIS'] = '500'
173
+ os.environ['OTEL_BSP_MAX_EXPORT_BATCH_SIZE'] = '50'
174
+ os.environ['OTEL_BSP_EXPORT_TIMEOUT'] = '10000'
175
+
176
+ client = FiddlerClient(
177
+ api_key="your-api-key",
178
+ application_id="your-app-id"
179
+ )
180
+ ```
181
+
182
+ ### Default Configuration
183
+
184
+ The SDK uses restrictive defaults to prevent excessive resource usage:
185
+
186
+ - **Span Limits**: 32 events/links/attributes per span, 2048 character attribute length
187
+ - **Batch Processing**: 100 queue size, 1000ms delay, 10 batch size, 5000ms timeout
188
+ - **Sampling**: Always on (100% sampling)
189
+
190
+ ## Features
191
+
192
+ ### Core Features
193
+
194
+ - **OpenTelemetry Integration**: Full tracing support with configurable span limits
195
+ - **Input Validation**: UUID4 validation for application IDs, URL validation
196
+ - **Flexible Configuration**: Custom span limits, sampling strategies, and batch processing
197
+ - **Resource Management**: Conservative defaults to prevent resource exhaustion
198
+
199
+ ### LangGraph Instrumentation
200
+
201
+ - **Automatic Tracing**: Complete workflow tracing with span hierarchy
202
+ - **LLM Context Setting**: Set additional context information for LLM processing via `set_llm_context()`
203
+ - **Conversation Tracking**: Set conversation IDs for multi-turn conversations via `set_conversation_id()`
204
+ - **Message Serialization**: Smart handling of complex message content (lists, dicts)
205
+ - **Attribute Truncation**: Automatic truncation of long attribute values (256 character limit)
206
+ - **Error Handling**: Comprehensive error tracking and status reporting
207
+
208
+ ### Monitoring and Observability
209
+
210
+ - **Span Types**: Different span types for chains, tools, retrievers, and LLMs
211
+ - **Agent Tracking**: Automatic agent name and ID generation
212
+ - **Performance Metrics**: Timing, token usage, and model information
213
+ - **Error Context**: Detailed error information with stack traces
214
+
215
+ ## Validation and Error Handling
216
+
217
+ The SDK includes comprehensive validation:
218
+
219
+ - **Application ID**: Must be a valid UUID4 string
220
+ - **URL**: Must have valid scheme (http/https) and netloc
221
+ - **Attribute Values**: Automatically truncated to prevent oversized spans
222
+ - **Message Content**: Smart serialization of complex data structures
223
+
224
+ ## Performance Considerations
225
+
226
+ - **High-volume applications**: Increase span limits and batch processing parameters
227
+ - **Low-latency requirements**: Decrease batch schedule delay
228
+ - **Memory constraints**: Use restrictive span limits and smaller batch sizes
229
+ - **Debugging**: Enable console tracer and use higher attribute limits
230
+ - **Production**: Use appropriate sampling strategies to control data volume
231
+
232
+ ## Requirements
233
+
234
+ - Python 3.10, 3.11, 3.12, or 3.13
235
+ - Dependencies (automatically installed):
236
+ - opentelemetry-api (1.34.1)
237
+ - opentelemetry-sdk (1.34.1)
238
+ - opentelemetry-instrumentation (0.55b1)
239
+ - opentelemetry-exporter-otlp-proto-http (1.34.1)
240
+ - langgraph (0.4.8)
241
+ - langchain (0.3.26)
242
+ - langchain-core (automatically installed with langchain)
243
+
244
+ ## Development
245
+
246
+ ### Running Tests
247
+
248
+ ```bash
249
+ # Run all tests
250
+ pytest
251
+
252
+ # Run specific test file
253
+ pytest tests/core/test_client.py
254
+
255
+ # Run with coverage
256
+ pytest --cov=fiddler_langgraph
257
+ ```
258
+
259
+ ### Code Quality
260
+
261
+ ```bash
262
+ # Run linting
263
+ flake8 fiddler_langgraph/
264
+
265
+ # Run type checking
266
+ mypy fiddler_langgraph/
267
+
268
+ # Run security checks
269
+ bandit -r fiddler_langgraph/
270
+ ```
271
+
272
+ ## License
273
+
274
+ Apache License 2.0 - see LICENSE file for details
@@ -0,0 +1 @@
1
+ 0.1.0rc1
@@ -0,0 +1,11 @@
1
+ """Fiddler SDK for instrumenting GenAI Applications."""
2
+
3
+ from pathlib import Path
4
+
5
+ from fiddler_langgraph.core.client import FiddlerClient
6
+
7
+ # Read version from VERSION file
8
+ _version_file = Path(__file__).parent / 'VERSION'
9
+ __version__ = _version_file.read_text().strip()
10
+
11
+ __all__ = ['FiddlerClient', '__version__']
@@ -0,0 +1 @@
1
+ """Core functionality for Fiddler SDK."""