fiddler-langgraph 0.1.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
fiddler_langgraph/VERSION CHANGED
@@ -1 +1 @@
1
- 0.1.1
1
+ 1.0.0
@@ -3,8 +3,6 @@
3
3
  import contextvars
4
4
  from typing import Any
5
5
 
6
- from pydantic import ConfigDict, validate_call
7
-
8
6
  # Key used for storing Fiddler-specific attributes in metadata dictionary
9
7
  FIDDLER_METADATA_KEY = '_fiddler_attributes'
10
8
 
@@ -66,22 +64,3 @@ _CONVERSATION_ID: contextvars.ContextVar[str] = contextvars.ContextVar(
66
64
  _CUSTOM_ATTRIBUTES: contextvars.ContextVar[dict[str, Any]] = contextvars.ContextVar(
67
65
  '_CUSTOM_ATTRIBUTES'
68
66
  )
69
-
70
-
71
- @validate_call(config=ConfigDict(strict=True, arbitrary_types_allowed=True))
72
- def add_session_attributes(key: str, value: str) -> None:
73
- """Adds Fiddler-specific attributes to a runnable's metadata.
74
-
75
- This is used for various runnable types like Pregel nodes, LLM calls, tool
76
- calls, and retriever calls.
77
-
78
- Args:
79
- key (str): The attribute key to add or update.
80
- value (str): The attribute value to set.
81
- """
82
- try:
83
- current_attributes = _CUSTOM_ATTRIBUTES.get().copy()
84
- except LookupError:
85
- current_attributes = {}
86
- current_attributes[key] = value
87
- _CUSTOM_ATTRIBUTES.set(current_attributes)
@@ -60,7 +60,7 @@ class FiddlerClient:
60
60
  self,
61
61
  api_key: str,
62
62
  application_id: str,
63
- url: str = 'http://localhost:4318',
63
+ url: str,
64
64
  console_tracer: bool = False,
65
65
  span_limits: SpanLimits | None = _default_span_limits,
66
66
  sampler: sampling.Sampler | None = None,
@@ -76,41 +76,69 @@ class FiddlerClient:
76
76
  Args:
77
77
  api_key (str): The API key for authenticating with the Fiddler backend. **Required**.
78
78
  application_id (str): The unique identifier (UUID4) for the application. **Required**.
79
- url (str): The base URL for the Fiddler backend. While it defaults to
80
- `http://localhost:4318` for local development, this **must** be set to your
81
- Fiddler instance URL for any other use.
79
+ url (str): The base URL for your Fiddler instance. This is specific to your
80
+ deployment, whether hosted, VPC-deployed, on-premise, or local development
81
+ (e.g., `https://your-instance.fiddler.ai`, `http://localhost:4318`). **Required**.
82
82
  console_tracer (bool): If True, traces will be printed to the console
83
83
  instead of being sent to the Fiddler backend. Useful for debugging.
84
84
  Defaults to `False`.
85
85
  span_limits (SpanLimits | None): Configuration for span limits, such as the
86
86
  maximum number of attributes or events. Defaults to a restrictive
87
- set of internal limits.
87
+ set of internal limits (32 events/links/attributes, 2048 char limit).
88
88
  sampler (sampling.Sampler | None): The sampler for deciding which spans to record.
89
- Defaults to `None`, which uses the parent-based OpenTelemetry sampler.
89
+ Defaults to `None`, which uses the parent-based always-on OpenTelemetry sampler
90
+ (100% sampling).
90
91
  compression (Compression): The compression for exporting traces.
91
- Can be `Compression.Gzip` or `Compression.NoCompression`.
92
- Defaults to `Compression.Gzip`.
92
+ Can be `Compression.Gzip`, `Compression.Deflate`, or `Compression.NoCompression`.
93
+ Defaults to `Compression.Gzip` (recommended for production).
93
94
  jsonl_capture_enabled (bool): Whether to enable JSONL capture of trace data.
94
95
  When enabled, all span data will be captured and saved to a JSONL file
95
- in OpenTelemetry format for analysis. Defaults to `False`.
96
+ in OpenTelemetry format for offline analysis. Defaults to `False`.
96
97
  jsonl_file_path (str): Path to the JSONL file where trace data will be saved.
97
98
  Only used when `jsonl_capture_enabled` is `True`. Defaults to
98
99
  "fiddler_trace_data.jsonl".
99
100
 
100
101
  Raises:
101
102
  ValueError: If `application_id` is not a valid UUID4 or if the
102
- `url` is not a valid HTTPS URL.
103
+ `url` is not a valid HTTP/HTTPS URL.
103
104
 
104
105
  Examples:
105
- >>> from opentelemetry.sdk.trace import SpanLimits
106
- >>> from fiddler_langgraph import FiddlerClient
107
- >>>
108
- >>> client = FiddlerClient(
109
- ... api_key='YOUR_API_KEY',
110
- ... application_id='YOUR_APPLICATION_ID',
111
- ... url='https://your-fiddler-instance.fiddler.ai',
112
- ... span_limits=SpanLimits(max_span_attributes=64),
113
- ... )
106
+ Basic connection to your Fiddler instance:
107
+
108
+ .. code-block:: python
109
+
110
+ client = FiddlerClient(
111
+ api_key='YOUR_API_KEY',
112
+ application_id='YOUR_APPLICATION_ID',
113
+ url='https://your-instance.fiddler.ai',
114
+ )
115
+
116
+ High-volume applications with custom configuration:
117
+
118
+ .. code-block:: python
119
+
120
+ from opentelemetry.sdk.trace import SpanLimits, sampling
121
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import Compression
122
+
123
+ client = FiddlerClient(
124
+ api_key='YOUR_API_KEY',
125
+ application_id='YOUR_APPLICATION_ID',
126
+ url='https://your-instance.fiddler.ai',
127
+ span_limits=SpanLimits(max_span_attributes=64),
128
+ sampler=sampling.TraceIdRatioBased(0.1), # Sample 10% of traces
129
+ compression=Compression.Gzip,
130
+ )
131
+
132
+ Local development with console output:
133
+
134
+ .. code-block:: python
135
+
136
+ client = FiddlerClient(
137
+ api_key='dev-key',
138
+ application_id='00000000-0000-0000-0000-000000000000',
139
+ url='http://localhost:4318',
140
+ console_tracer=True, # Print traces to console for debugging
141
+ )
114
142
  """
115
143
  # Validate application_id is a valid UUID4
116
144
 
@@ -216,9 +244,11 @@ class FiddlerClient:
216
244
  ValueError: If the tracer has already been initialized.
217
245
 
218
246
  Examples:
219
- >>> from fiddler_langgraph import FiddlerClient
220
- >>> client = FiddlerClient(api_key='...', application_id='...')
221
- >>> client.update_resource({'service.version': '1.2.3'})
247
+ .. code-block:: python
248
+
249
+ from fiddler_langgraph import FiddlerClient
250
+ client = FiddlerClient(api_key='...', application_id='...', url='https://your-instance.fiddler.ai')
251
+ client.update_resource({'service.version': '1.2.3'})
222
252
  """
223
253
  if self._tracer is not None:
224
254
  raise ValueError('Cannot update resource after tracer is initialized')
@@ -305,11 +335,13 @@ class FiddlerClient:
305
335
  RuntimeError: If tracer initialization fails.
306
336
 
307
337
  Examples:
308
- >>> from fiddler_langgraph import FiddlerClient
309
- >>> client = FiddlerClient(api_key='...', application_id='...')
310
- >>> tracer = client.get_tracer()
311
- >>> with tracer.start_as_current_span('my-operation'):
312
- ... print('Doing some work...')
338
+ .. code-block:: python
339
+
340
+ from fiddler_langgraph import FiddlerClient
341
+ client = FiddlerClient(api_key='...', application_id='...', url='https://your-instance.fiddler.ai')
342
+ tracer = client.get_tracer()
343
+ with tracer.start_as_current_span('my-operation'):
344
+ print('Doing some work...')
313
345
  """
314
346
  if self._tracer is None:
315
347
  self._initialize_tracer()
@@ -16,6 +16,7 @@ from wrapt import wrap_function_wrapper
16
16
 
17
17
  from fiddler_langgraph.core.attributes import (
18
18
  _CONVERSATION_ID,
19
+ _CUSTOM_ATTRIBUTES,
19
20
  FIDDLER_METADATA_KEY,
20
21
  FiddlerSpanAttributes,
21
22
  )
@@ -26,13 +27,98 @@ from fiddler_langgraph.tracing.util import _check_langgraph_version, _get_packag
26
27
 
27
28
  @validate_call(config=ConfigDict(strict=True))
28
29
  def set_conversation_id(conversation_id: str) -> None:
29
- """Set the conversation ID for the current application invocation.
30
+ """Enables end-to-end tracing of multi-step workflows and conversations.
31
+
32
+ The primary purpose of set_conversation_id is to enable end-to-end tracing
33
+ of a multi-step workflow. Modern agentic applications often involve a complex
34
+ sequence of events to fulfill a single user request. The result in your Fiddler
35
+ dashboard is that you can instantly filter for and view the entire, ordered
36
+ sequence of operations that constituted a single conversation or task. This is
37
+ crucial for debugging complex failures, analyzing latency across an entire
38
+ workflow, and understanding the agent's behavior from start to finish.
39
+
30
40
  This will remain in use until it is called again with a new conversation ID.
31
- Note (Robin 11th Sep 2025): This should be moved to the core.attributes module in the future.
41
+
42
+ Args:
43
+ conversation_id (str): Unique identifier for the conversation session. **Required**.
44
+
45
+ Returns:
46
+ None
47
+
48
+ Examples:
49
+ .. code-block:: python
50
+
51
+ from langgraph.prebuilt import create_react_agent
52
+ from fiddler_langgraph.tracing.instrumentation import set_conversation_id
53
+ import uuid
54
+
55
+ # Basic usage
56
+ agent = create_react_agent(model, tools=[])
57
+ conversation_id = str(uuid.uuid4())
58
+ set_conversation_id(conversation_id)
59
+ agent.invoke({"messages": [{"role": "user", "content": "Write me a novel"}]})
60
+
61
+ # Multi-turn conversation tracking
62
+ def handle_conversation(user_id, session_id):
63
+ # Create a unique conversation ID combining user and session
64
+ conversation_id = f"{user_id}_{session_id}_{uuid.uuid4()}"
65
+ set_conversation_id(conversation_id)
66
+ return conversation_id
67
+
68
+ # Different conversation types
69
+ business_conversation_id = f"business_{uuid.uuid4()}"
70
+ support_conversation_id = f"support_{uuid.uuid4()}"
32
71
  """
33
72
  _CONVERSATION_ID.set(conversation_id)
34
73
 
35
74
 
75
+ @validate_call(config=ConfigDict(strict=True, arbitrary_types_allowed=True))
76
+ def add_session_attributes(key: str, value: str) -> None:
77
+ """Adds custom session-level attributes that persist across all spans in the current context.
78
+
79
+ Session attributes are key-value pairs that apply to all operations within the current
80
+ execution context (thread or async coroutine). Use this to add metadata that describes
81
+ the session environment, such as user information, deployment environment, or feature flags.
82
+
83
+ These attributes are stored in context variables and automatically included in all spans
84
+ created during the session. They persist until the context ends or the attribute is updated
85
+ with a new value.
86
+
87
+ Note: Context variables are shallow copied - modifications to mutable values (lists, dicts)
88
+ are shared between contexts.
89
+
90
+ Args:
91
+ key (str): The attribute key to add or update. Will be formatted as
92
+ 'fiddler.session.user.{key}' in the OpenTelemetry span. **Required**.
93
+ value (str): The attribute value to set. **Required**.
94
+
95
+ Returns:
96
+ None
97
+
98
+ Examples:
99
+ .. code-block:: python
100
+
101
+ from fiddler_langgraph.tracing.instrumentation import add_session_attributes
102
+
103
+ # Add user information to all spans in this session
104
+ add_session_attributes("user_id", "user_12345")
105
+ add_session_attributes("tier", "premium")
106
+
107
+ # Add deployment environment context
108
+ add_session_attributes("environment", "production")
109
+ add_session_attributes("region", "us-west-2")
110
+
111
+ # Update an existing attribute
112
+ add_session_attributes("user_id", "user_67890") # Overwrites previous value
113
+ """
114
+ try:
115
+ current_attributes = _CUSTOM_ATTRIBUTES.get().copy()
116
+ except LookupError:
117
+ current_attributes = {}
118
+ current_attributes[key] = value
119
+ _CUSTOM_ATTRIBUTES.set(current_attributes)
120
+
121
+
36
122
  @validate_call(config=ConfigDict(strict=True, arbitrary_types_allowed=True))
37
123
  def _set_default_metadata(
38
124
  node: BaseLanguageModel | BaseRetriever | BaseTool,
@@ -59,14 +145,137 @@ def add_span_attributes(
59
145
  node: BaseLanguageModel | BaseRetriever | BaseTool,
60
146
  **kwargs: Any,
61
147
  ) -> None:
62
- """Adds Fiddler-specific attributes to a runnable's metadata.
148
+ """Adds custom span-level attributes to a specific runnable component's metadata.
149
+
150
+ Span attributes are key-value pairs that apply to a specific component (LLM, tool, or retriever)
151
+ and are included in the OpenTelemetry spans created when that component executes. Use this to
152
+ add metadata that describes the component's configuration, purpose, or operational context.
153
+
154
+ Unlike session attributes (which apply to all spans in a context), span attributes are scoped
155
+ to individual components. This is useful for:
156
+ - Identifying which model or tool is being used
157
+ - Tagging components by purpose or category
158
+ - Adding version information or deployment metadata
159
+ - Tracking A/B test variants or experimental configurations
63
160
 
64
- This is used for various runnable types like LLM calls, tool
65
- calls, and retriever calls.
161
+ The attributes are stored in the component's metadata dictionary under the key
162
+ '_fiddler_attributes' and will be automatically included in spans when the component executes.
163
+ Attributes persist for the lifetime of the component instance.
164
+
165
+ Supported component types:
166
+ - **BaseLanguageModel**: LLM calls (ChatOpenAI, ChatAnthropic, etc.)
167
+ - **BaseRetriever**: Document retrieval operations
168
+ - **BaseTool**: Tool/function calls in agent workflows
66
169
 
67
170
  Args:
68
- node (BaseLanguageModel | BaseRetriever | BaseTool): The runnable node.
69
- **kwargs: The attributes to add as key-value pairs.
171
+ node (BaseLanguageModel | BaseRetriever | BaseTool): The LangChain component to annotate
172
+ with custom attributes. The component's metadata will be modified in place. **Required**.
173
+ **kwargs (Any): Arbitrary keyword arguments representing the attributes to add. Each
174
+ key-value pair will be stored as a span attribute. Keys should be strings, and values
175
+ can be any type (though simple types like str, int, bool are recommended for
176
+ observability). **Required** (at least one attribute).
177
+
178
+ Returns:
179
+ None
180
+
181
+ Examples:
182
+ Tagging an LLM with model information:
183
+
184
+ .. code-block:: python
185
+
186
+ from langchain_openai import ChatOpenAI
187
+ from fiddler_langgraph.tracing.instrumentation import add_span_attributes
188
+
189
+ llm = ChatOpenAI(model="gpt-4")
190
+ add_span_attributes(
191
+ llm,
192
+ model_name="gpt-4",
193
+ provider="openai",
194
+ purpose="summarization"
195
+ )
196
+
197
+ Adding version and environment metadata:
198
+
199
+ .. code-block:: python
200
+
201
+ add_span_attributes(
202
+ llm,
203
+ version="v2.1.0",
204
+ environment="production",
205
+ region="us-west-2"
206
+ )
207
+
208
+ Tagging tools in a multi-tool agent:
209
+
210
+ .. code-block:: python
211
+
212
+ from langchain.tools import Tool
213
+
214
+ search_tool = Tool(
215
+ name="search",
216
+ func=search_function,
217
+ description="Search the web"
218
+ )
219
+ add_span_attributes(
220
+ search_tool,
221
+ tool_category="external_api",
222
+ rate_limit="100/min",
223
+ cost_per_call=0.001
224
+ )
225
+
226
+ A/B testing different retrievers:
227
+
228
+ .. code-block:: python
229
+
230
+ from langchain_community.vectorstores import FAISS
231
+
232
+ retriever_a = FAISS.from_documents(docs, embeddings).as_retriever()
233
+ add_span_attributes(
234
+ retriever_a,
235
+ variant="semantic_search",
236
+ experiment_id="exp_2024_q1",
237
+ retrieval_strategy="similarity"
238
+ )
239
+
240
+ retriever_b = FAISS.from_documents(docs, embeddings).as_retriever(
241
+ search_type="mmr"
242
+ )
243
+ add_span_attributes(
244
+ retriever_b,
245
+ variant="mmr_search",
246
+ experiment_id="exp_2024_q1",
247
+ retrieval_strategy="maximum_marginal_relevance"
248
+ )
249
+
250
+ Combining with session attributes:
251
+
252
+ .. code-block:: python
253
+
254
+ from fiddler_langgraph.tracing.instrumentation import (
255
+ add_session_attributes,
256
+ add_span_attributes,
257
+ set_conversation_id
258
+ )
259
+
260
+ # Session-level: applies to all spans
261
+ set_conversation_id("conv_12345")
262
+ add_session_attributes("user_id", "user_789")
263
+
264
+ # Span-level: applies only to this LLM's spans
265
+ llm = ChatOpenAI(model="gpt-4-turbo")
266
+ add_span_attributes(
267
+ llm,
268
+ model_tier="premium",
269
+ use_case="customer_support"
270
+ )
271
+
272
+ Note:
273
+ - Attributes are stored in the component's `metadata` dictionary, which persists
274
+ for the lifetime of the component instance
275
+ - If the component doesn't have a `metadata` attribute, one will be created
276
+ - Multiple calls to `add_span_attributes` on the same component will merge attributes
277
+ - Later calls with the same key will overwrite previous values
278
+ - This modifies the component in place - no need to reassign the return value
70
279
  """
71
280
  _set_default_metadata(node)
72
281
  metadata = cast(dict[str, Any], node.metadata)
@@ -77,33 +286,60 @@ def add_span_attributes(
77
286
 
78
287
  @validate_call(config=ConfigDict(strict=True))
79
288
  def set_llm_context(llm: BaseLanguageModel | RunnableBinding, context: str) -> None:
80
- """Sets a context string on a language model instance.
81
- If the language model is a RunnableBinding, the context will be set on the bound object.
289
+ """Sets additional context information on a language model instance.
82
290
 
83
- https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableBinding.html
291
+ This context provides environmental or operational information that will be
292
+ attached to all spans created for this model. Use this to add relevant metadata
293
+ such as user preferences, session state, or runtime conditions that influenced
294
+ the LLM's behavior. This is valuable for debugging and understanding why the
295
+ model produced specific outputs.
84
296
 
85
- The bound object of the RunnableBinding must be a BaseLanguageModel.
86
- This context can be used to provide additional information about the
87
- environment or data that the language model is being used in. This
88
- information will be attached to the spans created for this model.
89
- In case the user passes a RunnableBinding, the context will be set on the
90
- bound object.
297
+ Supports both `BaseLanguageModel` instances and `RunnableBinding` objects. When a
298
+ `RunnableBinding` is provided, the context is automatically set on the underlying
299
+ bound object (which must be a `BaseLanguageModel`).
300
+
301
+ For more information on RunnableBinding, see:
302
+ https://python.langchain.com/api_reference/core/runnables/langchain_core.runnables.base.RunnableBinding.html
91
303
 
92
304
  Args:
93
- llm (BaseLanguageModel | RunnableBinding): The language model instance. **Required**.
94
- context (str): The context string to add. **Required**.
305
+ llm (BaseLanguageModel | RunnableBinding): The language model instance or binding. **Required**.
306
+ context (str): The context string to add. This will be included in span attributes
307
+ as 'gen_ai.llm.context'. **Required**.
308
+
309
+ Raises:
310
+ TypeError: If a RunnableBinding is provided but its bound object is not a BaseLanguageModel.
95
311
 
96
312
  Examples:
97
- >>> from langchain_openai import ChatOpenAI
98
- >>> from fiddler_langgraph.tracing.instrumentation import set_llm_context
99
- >>>
100
- >>> llm = ChatOpenAI()
101
- >>> set_llm_context(llm, "This is a test context.")
102
- >>>
103
- >>> # If you are using a RunnableBinding, you can pass the bound object
104
- >>> # directly to set_llm_context.
105
- >>> bound_llm = llm.bind(x=1)
106
- >>> set_llm_context(bound_llm, "This is a test context.")
313
+ Basic usage with ChatOpenAI:
314
+
315
+ .. code-block:: python
316
+
317
+ from langchain_openai import ChatOpenAI
318
+ from fiddler_langgraph.tracing.instrumentation import set_llm_context
319
+
320
+ llm = ChatOpenAI(model="gpt-4")
321
+ set_llm_context(llm, "User prefers concise responses")
322
+
323
+ With user preferences:
324
+
325
+ .. code-block:: python
326
+
327
+ set_llm_context(llm, "User language: Spanish, Expertise: Beginner")
328
+
329
+ Using with RunnableBinding:
330
+
331
+ .. code-block:: python
332
+
333
+ bound_llm = llm.bind(temperature=0.7, max_tokens=100)
334
+ set_llm_context(bound_llm, "Creative writing mode with token limits")
335
+
336
+ Adding session context:
337
+
338
+ .. code-block:: python
339
+
340
+ import uuid
341
+ session_id = uuid.uuid4()
342
+ set_llm_context(llm, f"Session: {session_id}, Environment: Production")
107
343
  """
108
344
  if isinstance(llm, RunnableBinding):
109
345
  if not isinstance(llm.bound, BaseLanguageModel):
@@ -129,22 +365,58 @@ class LangGraphInstrumentor(BaseInstrumentor):
129
365
 
130
366
  This class provides automatic instrumentation for applications built with
131
367
  LangGraph. It captures traces from the execution of LangGraph graphs and
132
- sends them to the Fiddler platform.
368
+ sends them to the Fiddler platform for monitoring and analysis.
369
+
370
+ Instrumentation works by monkey-patching LangChain's callback system to inject
371
+ a custom callback handler that captures trace data. Once instrumented, all
372
+ LangGraph operations will automatically generate telemetry data.
373
+
374
+ Note: Instrumentation persists for the lifetime of the application unless
375
+ explicitly removed by calling `uninstrument()`. Calling `instrument()` multiple
376
+ times is safe - it will not create duplicate handlers.
377
+
378
+ Thread Safety: The instrumentation applies globally to the process and affects
379
+ all threads. In concurrent environments (multi-threading, async), all contexts
380
+ share the same instrumented callback system.
133
381
 
134
382
  To use the instrumentor, you first need to create a `FiddlerClient`
135
383
  instance. Then, you can create an instance of `LangGraphInstrumentor` and
136
384
  call the `instrument()` method.
137
385
 
138
386
  Examples:
139
- >>> from fiddler_langgraph import FiddlerClient
140
- >>> from fiddler_langgraph.tracing import LangGraphInstrumentor
141
- >>>
142
- >>> client = FiddlerClient(api_key="...", application_id="...")
143
- >>> instrumentor = LangGraphInstrumentor(client=client)
144
- >>> instrumentor.instrument()
387
+ Basic usage:
388
+
389
+ .. code-block:: python
390
+
391
+ from fiddler_langgraph import FiddlerClient
392
+ from fiddler_langgraph.tracing import LangGraphInstrumentor
393
+
394
+ client = FiddlerClient(api_key="...", application_id="...", url="https://your-instance.fiddler.ai")
395
+ instrumentor = LangGraphInstrumentor(client=client)
396
+ instrumentor.instrument()
397
+
398
+ Removing instrumentation:
399
+
400
+ .. code-block:: python
401
+
402
+ # Clean up instrumentation when shutting down
403
+ instrumentor.uninstrument()
404
+
405
+ Context manager pattern (advanced):
406
+
407
+ .. code-block:: python
408
+
409
+ with LangGraphInstrumentor(client).instrument():
410
+ # Instrumented operations here
411
+ agent.invoke({"messages": [...]})
412
+ # Automatically uninstrumented after block
145
413
 
146
414
  Attributes:
147
415
  _client (FiddlerClient): The FiddlerClient instance used for configuration.
416
+ _tracer (_CallbackHandler | None): The callback handler instance for tracing.
417
+ _langgraph_version: The installed LangGraph version.
418
+ _langchain_version: The installed LangChain Core version.
419
+ _fiddler_langgraph_version: The Fiddler LangGraph SDK version.
148
420
  """
149
421
 
150
422
  def __init__(self, client: FiddlerClient):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fiddler-langgraph
3
- Version: 0.1.1
3
+ Version: 1.0.0
4
4
  Summary: Python SDK for instrumenting GenAI Applications with Fiddler
5
5
  Author-email: Fiddler AI <support@fiddler.ai>
6
6
  License-Expression: Apache-2.0
@@ -82,7 +82,7 @@ from fiddler_langgraph import FiddlerClient
82
82
 
83
83
  # Initialize the FiddlerClient with basic configuration
84
84
  client = FiddlerClient(
85
- url="https://api.fiddler.ai",
85
+ url="https://your-instance.fiddler.ai",
86
86
  api_key="fdl_api_key",
87
87
  application_id="fdl_application_id" # Must be a valid UUID4
88
88
  )
@@ -147,7 +147,7 @@ The Fiddler SDK provides flexible configuration options for OpenTelemetry integr
147
147
  client = FiddlerClient(
148
148
  api_key="your-api-key",
149
149
  application_id="your-app-id", # Must be a valid UUID4
150
- url="https://api.fiddler.ai"
150
+ url="https://your-instance.fiddler.ai"
151
151
  )
152
152
  ```
153
153
 
@@ -173,6 +173,7 @@ sampler = sampling.TraceIdRatioBased(0.1) # Sample 10% of traces
173
173
  client = FiddlerClient(
174
174
  api_key="your-api-key",
175
175
  application_id="your-app-id",
176
+ url="https://your-instance.fiddler.ai",
176
177
  span_limits=custom_limits,
177
178
  sampler=sampler,
178
179
  console_tracer=False, # Set to True for debugging
@@ -191,6 +192,7 @@ from opentelemetry.exporter.otlp.proto.http.trace_exporter import Compression
191
192
  client = FiddlerClient(
192
193
  api_key="your-api-key",
193
194
  application_id="your-app-id",
195
+ url="https://your-instance.fiddler.ai",
194
196
  compression=Compression.Gzip,
195
197
  )
196
198
 
@@ -198,6 +200,7 @@ client = FiddlerClient(
198
200
  client = FiddlerClient(
199
201
  api_key="your-api-key",
200
202
  application_id="your-app-id",
203
+ url="https://your-instance.fiddler.ai",
201
204
  compression=Compression.NoCompression,
202
205
  )
203
206
 
@@ -205,6 +208,7 @@ client = FiddlerClient(
205
208
  client = FiddlerClient(
206
209
  api_key="your-api-key",
207
210
  application_id="your-app-id",
211
+ url="https://your-instance.fiddler.ai",
208
212
  compression=Compression.Deflate,
209
213
  )
210
214
  ```
@@ -224,7 +228,8 @@ os.environ['OTEL_BSP_EXPORT_TIMEOUT'] = '10000'
224
228
 
225
229
  client = FiddlerClient(
226
230
  api_key="your-api-key",
227
- application_id="your-app-id"
231
+ application_id="your-app-id",
232
+ url="https://your-instance.fiddler.ai"
228
233
  )
229
234
  ```
230
235
 
@@ -1,15 +1,15 @@
1
- fiddler_langgraph/VERSION,sha256=gERzFlKfxiAU-6oEf7Z8Uaww4ygwzaNwQFNJ2NZtGWw,6
1
+ fiddler_langgraph/VERSION,sha256=WYVJhIUxBN9cNT4vaBoV_HkkdC-aLkaMKa8kjc5FzgM,6
2
2
  fiddler_langgraph/__init__.py,sha256=cqomWAmuY-2KvwJTvo7c7cecCPoe31pv4vgyBk_E8oQ,315
3
3
  fiddler_langgraph/core/__init__.py,sha256=HXPZt8YpmVrvwEEukoWR78LufMKtl7lVjLtcl9UNSoc,42
4
- fiddler_langgraph/core/attributes.py,sha256=Kj2p03gzuaVWhgd4odm15xMP-FbYSDA5aBi-dvEtgkA,2864
5
- fiddler_langgraph/core/client.py,sha256=qgDsMI_bBVSV-75tLjnM0of0i464g9KyeUljiccRkIE,13658
4
+ fiddler_langgraph/core/attributes.py,sha256=TbTwr8WtmJpDDRgTP8S3wD3ZHc3wwGJK1lu3vHal3mY,2169
5
+ fiddler_langgraph/core/client.py,sha256=Fn4sPiTfAF1i375Qg_9DYwD0wAT65K6j3M9luyTVKPE,14948
6
6
  fiddler_langgraph/core/span_processor.py,sha256=ODYmdo0FUYEFbIWS_VaR9L6qHUVvpnuk-RSIhgRxyb0,1164
7
7
  fiddler_langgraph/tracing/__init__.py,sha256=Kw8VUB7RDffBq4ss0v6vNQYi4KDQOM0J1elbMrqJpsU,49
8
8
  fiddler_langgraph/tracing/callback.py,sha256=oI6Zpbfe50mGu85NArd8rXxOyYvOKUV4fY--EBnlQlo,31264
9
- fiddler_langgraph/tracing/instrumentation.py,sha256=LHCYW6asySajbF-sOOi6_6J7UBD6z1Vwp4QjHP9zzt0,10580
9
+ fiddler_langgraph/tracing/instrumentation.py,sha256=AlCM9GWp3qN_Fa9cl-USJjrUhuvKmrLokzkLMgah-CY,21142
10
10
  fiddler_langgraph/tracing/jsonl_capture.py,sha256=7Sy0nbxRftV5y64UCovSlm07qXn-EfG3uHpBu9H2ZiU,8174
11
11
  fiddler_langgraph/tracing/util.py,sha256=RKMrrmdCXSRJrTIHngdhRsotPLEY_LR1SKnUXAJC40Y,2678
12
- fiddler_langgraph-0.1.1.dist-info/METADATA,sha256=79J7iKAC1ZqCwGp1guUDNWZfjzM-5LSf03x1HBqwfVg,10184
13
- fiddler_langgraph-0.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
- fiddler_langgraph-0.1.1.dist-info/top_level.txt,sha256=hOKdR6_3AkS4dS6EfE9Ii7YrS_hApnyGfY-0v0DV0s4,18
15
- fiddler_langgraph-0.1.1.dist-info/RECORD,,
12
+ fiddler_langgraph-1.0.0.dist-info/METADATA,sha256=tfc_l2tf5HwODWFknx1qPfoVOFtfqHQ4GrqvzKPAxgI,10424
13
+ fiddler_langgraph-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
+ fiddler_langgraph-1.0.0.dist-info/top_level.txt,sha256=hOKdR6_3AkS4dS6EfE9Ii7YrS_hApnyGfY-0v0DV0s4,18
15
+ fiddler_langgraph-1.0.0.dist-info/RECORD,,