langtrace-python-sdk 3.3.6__py3-none-any.whl → 3.3.8__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,8 +1,10 @@
1
+ from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
1
2
  from langtrace_python_sdk.utils.silently_fail import silently_fail
2
3
  from opentelemetry.trace import Tracer
3
4
  from opentelemetry.trace import SpanKind
4
5
  from langtrace_python_sdk.utils import handle_span_error, set_span_attribute
5
6
  from langtrace_python_sdk.utils.llm import (
7
+ get_langtrace_attributes,
6
8
  get_extra_attributes,
7
9
  set_span_attributes,
8
10
  )
@@ -16,6 +18,11 @@ def generic_patch(api, version: str, tracer: Tracer):
16
18
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
17
19
  try:
18
20
  span_attributes = {
21
+ **get_langtrace_attributes(
22
+ service_provider=SERVICE_PROVIDERS["MILVUS"],
23
+ version=version,
24
+ vendor_type="Vector Database",
25
+ ),
19
26
  "db.system": "milvus",
20
27
  "db.operation": operation,
21
28
  "db.name": kwargs.get("collection_name", None),
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
14
14
  limitations under the License.
15
15
  """
16
16
 
17
+ import base64
17
18
  import asyncio
18
19
  import os
19
20
  from deprecated import deprecated
@@ -145,7 +146,9 @@ class SendUserFeedback:
145
146
  _langtrace_api_key: str
146
147
 
147
148
  def __init__(self):
148
- self._langtrace_host = os.environ.get("LANGTRACE_API_HOST", LANGTRACE_REMOTE_URL)
149
+ self._langtrace_host = os.environ.get(
150
+ "LANGTRACE_API_HOST", LANGTRACE_REMOTE_URL
151
+ )
149
152
  # When the host is set to /api/trace, remove the /api/trace
150
153
  if self._langtrace_host.endswith("/api/trace"):
151
154
  self._langtrace_host = self._langtrace_host.replace("/api/trace", "")
@@ -162,14 +165,13 @@ class SendUserFeedback:
162
165
  print(Fore.RESET)
163
166
  return
164
167
 
165
- # convert spanId and traceId to hexadecimals
166
- span_hex_number = hex(int(data["spanId"], 10))[2:] # Convert to hex and remove the '0x' prefix
167
- formatted_span_hex_number = span_hex_number.zfill(16) # Pad with zeros to 16 characters
168
- data["spanId"] = f"0x{formatted_span_hex_number}"
168
+ # convert spanId and traceId to Base64
169
+ span_bytes = int(data["spanId"], 10).to_bytes(8, "big")
170
+ data["spanId"] = base64.b64encode(span_bytes).decode("ascii")
169
171
 
170
- trace_hex_number = hex(int(data["traceId"], 10))[2:] # Convert to hex and remove the '0x' prefix
171
- formatted_trace_hex_number = trace_hex_number.zfill(32) # Pad with zeros to 32 characters
172
- data["traceId"] = f"0x{formatted_trace_hex_number}"
172
+ # Convert traceId to base64
173
+ trace_bytes = int(data["traceId"], 10).to_bytes(16, "big")
174
+ data["traceId"] = base64.b64encode(trace_bytes).decode("ascii")
173
175
 
174
176
  evaluation = self.get_evaluation(data["spanId"])
175
177
  headers = {"x-api-key": self._langtrace_api_key}
@@ -1 +1 @@
1
- __version__ = "3.3.6"
1
+ __version__ = "3.3.8"
@@ -0,0 +1,493 @@
1
+ Metadata-Version: 2.3
2
+ Name: langtrace-python-sdk
3
+ Version: 3.3.8
4
+ Summary: Python SDK for LangTrace
5
+ Project-URL: Homepage, https://github.com/Scale3-Labs/langtrace-python-sdk
6
+ Author-email: Scale3 Labs <engineering@scale3labs.com>
7
+ License: Apache-2.0
8
+ Classifier: License :: OSI Approved :: Apache Software License
9
+ Classifier: Operating System :: OS Independent
10
+ Classifier: Programming Language :: Python :: 3
11
+ Requires-Python: >=3.9
12
+ Requires-Dist: colorama>=0.4.6
13
+ Requires-Dist: fsspec>=2024.6.0
14
+ Requires-Dist: opentelemetry-api>=1.25.0
15
+ Requires-Dist: opentelemetry-exporter-otlp-proto-grpc>=1.25.0
16
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.25.0
17
+ Requires-Dist: opentelemetry-instrumentation-sqlalchemy>=0.46b0
18
+ Requires-Dist: opentelemetry-instrumentation>=0.47b0
19
+ Requires-Dist: opentelemetry-sdk>=1.25.0
20
+ Requires-Dist: sentry-sdk>=2.14.0
21
+ Requires-Dist: sqlalchemy
22
+ Requires-Dist: tiktoken>=0.1.1
23
+ Requires-Dist: trace-attributes==7.1.0
24
+ Requires-Dist: transformers>=4.11.3
25
+ Requires-Dist: ujson>=5.10.0
26
+ Provides-Extra: dev
27
+ Requires-Dist: anthropic; extra == 'dev'
28
+ Requires-Dist: boto3; extra == 'dev'
29
+ Requires-Dist: chromadb; extra == 'dev'
30
+ Requires-Dist: cohere; extra == 'dev'
31
+ Requires-Dist: embedchain; extra == 'dev'
32
+ Requires-Dist: google-cloud-aiplatform; extra == 'dev'
33
+ Requires-Dist: google-generativeai; extra == 'dev'
34
+ Requires-Dist: groq; extra == 'dev'
35
+ Requires-Dist: langchain; extra == 'dev'
36
+ Requires-Dist: langchain-community; extra == 'dev'
37
+ Requires-Dist: langchain-openai; extra == 'dev'
38
+ Requires-Dist: litellm==1.48.7; extra == 'dev'
39
+ Requires-Dist: mistralai; extra == 'dev'
40
+ Requires-Dist: ollama; extra == 'dev'
41
+ Requires-Dist: openai==1.45.0; extra == 'dev'
42
+ Requires-Dist: pinecone-client; extra == 'dev'
43
+ Requires-Dist: python-dotenv; extra == 'dev'
44
+ Requires-Dist: qdrant-client; extra == 'dev'
45
+ Requires-Dist: setuptools; extra == 'dev'
46
+ Requires-Dist: weaviate-client; extra == 'dev'
47
+ Provides-Extra: test
48
+ Requires-Dist: pytest; extra == 'test'
49
+ Requires-Dist: pytest-asyncio; extra == 'test'
50
+ Requires-Dist: pytest-vcr; extra == 'test'
51
+ Description-Content-Type: text/markdown
52
+
53
+ <div align="center">
54
+ <h1>Langtrace Python SDK</h1>
55
+ <p>Open Source & Open Telemetry(OTEL) Observability for LLM Applications</p>
56
+
57
+ ![Static Badge](https://img.shields.io/badge/License-Apache--2.0-blue)
58
+ ![Static Badge](https://img.shields.io/badge/pip_langtrace--python--sdk-1.2.8-green)
59
+ ![Static Badge](https://img.shields.io/badge/Development_status-Active-green)
60
+ [![Downloads](https://static.pepy.tech/badge/langtrace-python-sdk)](https://static.pepy.tech/badge/langtrace-python-sdk)
61
+ [![Deploy](https://railway.app/button.svg)](https://railway.app/template/8dNq1c?referralCode=MA2S9H)
62
+ </div>
63
+
64
+ ---
65
+
66
+ ## 📚 Table of Contents
67
+ - [✨ Features](#-features)
68
+ - [🚀 Quick Start](#-quick-start)
69
+ - [🔗 Integrations](#-supported-integrations)
70
+ - [🌐 Getting Started](#-getting-started)
71
+ - [⚙️ Configuration](#-configuration)
72
+ - [🔧 Advanced Features](#-advanced-features)
73
+ - [📐 Examples](#-examples)
74
+ - [🏠 Self Hosting](#-langtrace-self-hosted)
75
+ - [🤝 Contributing](#-contributions)
76
+ - [🔒 Security](#-security)
77
+ - [❓ FAQ](#-frequently-asked-questions)
78
+ - [📜 License](#-license)
79
+
80
+ Langtrace is an open source observability software which lets you capture, debug and analyze traces and metrics from all your applications that leverages LLM APIs, Vector Databases and LLM based Frameworks.
81
+
82
+ ## ✨ Features
83
+
84
+ - 📊 **Open Telemetry Support**: Built on OTEL standards for comprehensive tracing
85
+ - 🔄 **Real-time Monitoring**: Track LLM API calls, vector operations, and framework usage
86
+ - 🎯 **Performance Insights**: Analyze latency, costs, and usage patterns
87
+ - 🔍 **Debug Tools**: Trace and debug your LLM application workflows
88
+ - 📈 **Analytics**: Get detailed metrics and visualizations
89
+ - 🛠️ **Framework Support**: Extensive integration with popular LLM frameworks
90
+ - 🔌 **Vector DB Integration**: Support for major vector databases
91
+ - 🎨 **Flexible Configuration**: Customizable tracing and monitoring options
92
+
93
+ ## 🚀 Quick Start
94
+
95
+ ```bash
96
+ pip install langtrace-python-sdk
97
+ ```
98
+
99
+ ```python
100
+ from langtrace_python_sdk import langtrace
101
+ langtrace.init(api_key='<your_api_key>') # Get your API key at langtrace.ai
102
+ ```
103
+
104
+ ## 🔗 Supported Integrations
105
+
106
+ Langtrace automatically captures traces from the following vendors:
107
+
108
+ ### LLM Providers
109
+ | Provider | TypeScript SDK | Python SDK |
110
+ |----------|:-------------:|:----------:|
111
+ | OpenAI | ✅ | ✅ |
112
+ | Anthropic | ✅ | ✅ |
113
+ | Azure OpenAI | ✅ | ✅ |
114
+ | Cohere | ✅ | ✅ |
115
+ | Groq | ✅ | ✅ |
116
+ | Perplexity | ✅ | ✅ |
117
+ | Gemini | ❌ | ✅ |
118
+ | Mistral | ❌ | ✅ |
119
+ | AWS Bedrock | ✅ | ✅ |
120
+ | Ollama | ❌ | ✅ |
121
+ | Cerebras | ❌ | ✅ |
122
+
123
+ ### Frameworks
124
+ | Framework | TypeScript SDK | Python SDK |
125
+ |-----------|:-------------:|:----------:|
126
+ | Langchain | ❌ | ✅ |
127
+ | LlamaIndex | ✅ | ✅ |
128
+ | Langgraph | ❌ | ✅ |
129
+ | LiteLLM | ❌ | ✅ |
130
+ | DSPy | ❌ | ✅ |
131
+ | CrewAI | ❌ | ✅ |
132
+ | VertexAI | ✅ | ✅ |
133
+ | EmbedChain | ❌ | ✅ |
134
+ | Autogen | ❌ | ✅ |
135
+ | HiveAgent | ❌ | ✅ |
136
+ | Inspect AI | ❌ | ✅ |
137
+
138
+ ### Vector Databases
139
+ | Database | TypeScript SDK | Python SDK |
140
+ |----------|:-------------:|:----------:|
141
+ | Pinecone | ✅ | ✅ |
142
+ | ChromaDB | ✅ | ✅ |
143
+ | QDrant | ✅ | ✅ |
144
+ | Weaviate | ✅ | ✅ |
145
+ | PGVector | ✅ | ✅ (SQLAlchemy) |
146
+ | MongoDB | ❌ | ✅ |
147
+ | Milvus | ❌ | ✅ |
148
+
149
+ ## 🌐 Getting Started
150
+
151
+ ### Langtrace Cloud ☁️
152
+
153
+ <!-- Original cloud setup instructions -->
154
+ 1. Sign up by going to [this link](https://langtrace.ai).
155
+ 2. Create a new Project after signing up. Projects are containers for storing traces and metrics generated by your application. If you have only one application, creating 1 project will do.
156
+ 3. Generate an API key by going inside the project.
157
+ 4. In your application, install the Langtrace SDK and initialize it with the API key you generated in the step 3.
158
+ 5. The code for installing and setting up the SDK is shown below
159
+
160
+ ### Framework Quick Starts
161
+
162
+ #### FastAPI
163
+ ```python
164
+ from fastapi import FastAPI
165
+ from langtrace_python_sdk import langtrace
166
+ from openai import OpenAI
167
+
168
+ langtrace.init()
169
+ app = FastAPI()
170
+ client = OpenAI()
171
+
172
+ @app.get("/")
173
+ def root():
174
+ client.chat.completions.create(
175
+ model="gpt-4",
176
+ messages=[{"role": "user", "content": "Say this is a test"}],
177
+ stream=False,
178
+ )
179
+ return {"Hello": "World"}
180
+ ```
181
+
182
+ #### Django
183
+ ```python
184
+ # settings.py
185
+ from langtrace_python_sdk import langtrace
186
+ langtrace.init()
187
+
188
+ # views.py
189
+ from django.http import JsonResponse
190
+ from openai import OpenAI
191
+
192
+ client = OpenAI()
193
+
194
+ def chat_view(request):
195
+ response = client.chat.completions.create(
196
+ model="gpt-4",
197
+ messages=[{"role": "user", "content": request.GET.get('message', '')}]
198
+ )
199
+ return JsonResponse({"response": response.choices[0].message.content})
200
+ ```
201
+
202
+ #### Flask
203
+ ```python
204
+ from flask import Flask
205
+ from langtrace_python_sdk import langtrace
206
+ from openai import OpenAI
207
+
208
+ app = Flask(__name__)
209
+ langtrace.init()
210
+ client = OpenAI()
211
+
212
+ @app.route('/')
213
+ def chat():
214
+ response = client.chat.completions.create(
215
+ model="gpt-4",
216
+ messages=[{"role": "user", "content": "Hello!"}]
217
+ )
218
+ return {"response": response.choices[0].message.content}
219
+ ```
220
+
221
+ #### LangChain
222
+ ```python
223
+ from langtrace_python_sdk import langtrace
224
+ from langchain.chat_models import ChatOpenAI
225
+ from langchain.prompts import ChatPromptTemplate
226
+
227
+ langtrace.init()
228
+
229
+ # LangChain operations are automatically traced
230
+ chat = ChatOpenAI()
231
+ prompt = ChatPromptTemplate.from_messages([
232
+ ("system", "You are a helpful assistant."),
233
+ ("user", "{input}")
234
+ ])
235
+ chain = prompt | chat
236
+ response = chain.invoke({"input": "Hello!"})
237
+ ```
238
+
239
+ #### LlamaIndex
240
+ ```python
241
+ from langtrace_python_sdk import langtrace
242
+ from llama_index import VectorStoreIndex, SimpleDirectoryReader
243
+
244
+ langtrace.init()
245
+
246
+ # Document loading and indexing are automatically traced
247
+ documents = SimpleDirectoryReader('data').load_data()
248
+ index = VectorStoreIndex.from_documents(documents)
249
+
250
+ # Queries are traced with metadata
251
+ query_engine = index.as_query_engine()
252
+ response = query_engine.query("What's in the documents?")
253
+ ```
254
+
255
+ #### DSPy
256
+ ```python
257
+ from langtrace_python_sdk import langtrace
258
+ import dspy
259
+ from dspy.teleprompt import BootstrapFewShot
260
+
261
+ langtrace.init()
262
+
263
+ # DSPy operations are automatically traced
264
+ lm = dspy.OpenAI(model="gpt-4")
265
+ dspy.settings.configure(lm=lm)
266
+
267
+ class SimpleQA(dspy.Signature):
268
+ """Answer questions with short responses."""
269
+ question = dspy.InputField()
270
+ answer = dspy.OutputField(desc="short answer")
271
+
272
+ compiler = BootstrapFewShot(metric=dspy.metrics.Answer())
273
+ program = compiler.compile(SimpleQA)
274
+ ```
275
+
276
+ #### CrewAI
277
+ ```python
278
+ from langtrace_python_sdk import langtrace
279
+ from crewai import Agent, Task, Crew
280
+
281
+ langtrace.init()
282
+
283
+ # Agents and tasks are automatically traced
284
+ researcher = Agent(
285
+ role="Researcher",
286
+ goal="Research and analyze data",
287
+ backstory="Expert data researcher",
288
+ allow_delegation=False
289
+ )
290
+
291
+ task = Task(
292
+ description="Analyze market trends",
293
+ agent=researcher
294
+ )
295
+
296
+ crew = Crew(
297
+ agents=[researcher],
298
+ tasks=[task]
299
+ )
300
+
301
+ result = crew.kickoff()
302
+ ```
303
+
304
+ For more detailed examples and framework-specific features, visit our [documentation](https://docs.langtrace.ai).
305
+
306
+ ## ⚙️ Configuration
307
+
308
+ ### Initialize Options
309
+
310
+ The SDK can be initialized with various configuration options to customize its behavior:
311
+
312
+ ```python
313
+ langtrace.init(
314
+ api_key: Optional[str] = None, # API key for authentication
315
+ batch: bool = True, # Enable/disable batch processing
316
+ write_spans_to_console: bool = False, # Console logging
317
+ custom_remote_exporter: Optional[Any] = None, # Custom exporter
318
+ api_host: Optional[str] = None, # Custom API host
319
+ disable_instrumentations: Optional[Dict] = None, # Disable specific integrations
320
+ service_name: Optional[str] = None, # Custom service name
321
+ disable_logging: bool = False, # Disable all logging
322
+ headers: Dict[str, str] = {}, # Custom headers
323
+ )
324
+ ```
325
+
326
+ #### Configuration Details
327
+
328
+ | Parameter | Type | Default Value | Description |
329
+ |-----------|------|---------------|-------------|
330
+ | `api_key` | `str` | `LANGTRACE_API_KEY` or `None` | The API key for authentication. Can be set via environment variable |
331
+ | `batch` | `bool` | `True` | Whether to batch spans before sending them to reduce API calls |
332
+ | `write_spans_to_console` | `bool` | `False` | Enable console logging for debugging purposes |
333
+ | `custom_remote_exporter` | `Optional[Exporter]` | `None` | Custom exporter for sending traces to your own backend |
334
+ | `api_host` | `Optional[str]` | `https://langtrace.ai/` | Custom API endpoint for self-hosted deployments |
335
+ | `disable_instrumentations` | `Optional[Dict]` | `None` | Disable specific vendor instrumentations (e.g., `{'only': ['openai']}`) |
336
+ | `service_name` | `Optional[str]` | `None` | Custom service name for trace identification |
337
+ | `disable_logging` | `bool` | `False` | Disable SDK logging completely |
338
+ | `headers` | `Dict[str, str]` | `{}` | Custom headers for API requests |
339
+
340
+ ### Environment Variables
341
+
342
+ Configure Langtrace behavior using these environment variables:
343
+
344
+ | Variable | Description | Default | Impact |
345
+ |----------|-------------|---------|---------|
346
+ | `LANGTRACE_API_KEY` | Primary authentication method | Required* | Required if not passed to init() |
347
+ | `TRACE_PROMPT_COMPLETION_DATA` | Control prompt/completion tracing | `true` | Set to 'false' to opt out of prompt/completion data collection |
348
+ | `TRACE_DSPY_CHECKPOINT` | Control DSPy checkpoint tracing | `true` | Set to 'false' to disable checkpoint tracing |
349
+ | `LANGTRACE_ERROR_REPORTING` | Control error reporting | `true` | Set to 'false' to disable Sentry error reporting |
350
+ | `LANGTRACE_API_HOST` | Custom API endpoint | `https://langtrace.ai/` | Override default API endpoint for self-hosted deployments |
351
+
352
+ > **Performance Note**: Setting `TRACE_DSPY_CHECKPOINT=false` is recommended in production environments as checkpoint tracing involves state serialization which can impact latency.
353
+
354
+ > **Security Note**: When `TRACE_PROMPT_COMPLETION_DATA=false`, no prompt or completion data will be collected, ensuring sensitive information remains private.
355
+
356
+ ## 🔧 Advanced Features
357
+
358
+ ### Root Span Decorator
359
+
360
+ Use the root span decorator to create custom trace hierarchies:
361
+
362
+ ```python
363
+ from langtrace_python_sdk import langtrace
364
+
365
+ @langtrace.with_langtrace_root_span(name="custom_operation")
366
+ def my_function():
367
+ # Your code here
368
+ pass
369
+ ```
370
+
371
+ ### Additional Attributes
372
+
373
+ Inject custom attributes into your traces:
374
+
375
+ ```python
376
+ # Using decorator
377
+ @langtrace.with_additional_attributes({"custom_key": "custom_value"})
378
+ def my_function():
379
+ pass
380
+
381
+ # Using context manager
382
+ with langtrace.inject_additional_attributes({"custom_key": "custom_value"}):
383
+ # Your code here
384
+ pass
385
+ ```
386
+
387
+ ### Prompt Registry
388
+
389
+ Register and manage prompts for better traceability:
390
+
391
+ ```python
392
+ from langtrace_python_sdk import langtrace
393
+
394
+ # Register a prompt template
395
+ langtrace.register_prompt("greeting", "Hello, {name}!")
396
+
397
+ # Use registered prompt
398
+ response = client.chat.completions.create(
399
+ model="gpt-4",
400
+ messages=[{"role": "user", "content": langtrace.get_prompt("greeting", name="Alice")}]
401
+ )
402
+ ```
403
+
404
+ ### User Feedback System
405
+
406
+ Collect and analyze user feedback:
407
+
408
+ ```python
409
+ from langtrace_python_sdk import langtrace
410
+
411
+ # Record user feedback for a trace
412
+ langtrace.record_feedback(
413
+ trace_id="your_trace_id",
414
+ rating=5,
415
+ feedback_text="Great response!",
416
+ metadata={"user_id": "123"}
417
+ )
418
+ ```
419
+
420
+ ### DSPy Checkpointing
421
+
422
+ Manage DSPy checkpoints for workflow tracking:
423
+
424
+ ```python
425
+ from langtrace_python_sdk import langtrace
426
+
427
+ # Enable checkpoint tracing (disabled by default in production)
428
+ langtrace.init(
429
+ api_key="your_api_key",
430
+ dspy_checkpoint_tracing=True
431
+ )
432
+ ```
433
+
434
+ ### Vector Database Operations
435
+
436
+ Track vector database operations:
437
+
438
+ ```python
439
+ from langtrace_python_sdk import langtrace
440
+
441
+ # Vector operations are automatically traced
442
+ with langtrace.inject_additional_attributes({"operation_type": "similarity_search"}):
443
+ results = vector_db.similarity_search("query", k=5)
444
+ ```
445
+
446
+ For more detailed examples and use cases, visit our [documentation](https://docs.langtrace.ai).
447
+
448
+ <!-- Will be expanded in step 007 with comprehensive documentation of advanced features -->
449
+
450
+ ## 📐 Examples
451
+
452
+ <!-- Will be added in step 008: Framework-specific examples and usage patterns -->
453
+
454
+ ## 🏠 Langtrace Self Hosted
455
+
456
+ <!-- Original self-hosted documentation -->
457
+ Get started with self-hosted Langtrace:
458
+
459
+ ```python
460
+ from langtrace_python_sdk import langtrace
461
+ langtrace.init(write_spans_to_console=True) # For console logging
462
+ # OR
463
+ langtrace.init(custom_remote_exporter=<your_exporter>, batch=<True or False>) # For custom exporter
464
+ ```
465
+
466
+ ## 🤝 Contributing
467
+
468
+ We welcome contributions! To get started:
469
+
470
+ 1. Fork this repository and start developing
471
+ 2. Join our [Discord](https://discord.langtrace.ai) workspace
472
+ 3. Run examples:
473
+ ```python
474
+ # In run_example.py, set ENABLED_EXAMPLES flag to True for desired example
475
+ python src/run_example.py
476
+ ```
477
+ 4. Run tests:
478
+ ```python
479
+ pip install '.[test]' && pip install '.[dev]'
480
+ pytest -v
481
+ ```
482
+
483
+ ## 🔒 Security
484
+
485
+ To report security vulnerabilities, email us at <security@scale3labs.com>. You can read more on security [here](https://github.com/Scale3-Labs/langtrace/blob/development/SECURITY.md).
486
+
487
+ ## ❓ Frequently Asked Questions
488
+
489
+ <!-- Will be populated during content addition steps -->
490
+
491
+ ## 📜 License
492
+
493
+ Langtrace Python SDK is licensed under the Apache 2.0 License. You can read about this license [here](https://www.apache.org/licenses/LICENSE-2.0).
@@ -105,7 +105,7 @@ examples/weaviate_example/__init__.py,sha256=8JMDBsRSEV10HfTd-YC7xb4txBjD3la56sn
105
105
  examples/weaviate_example/query_text.py,sha256=wPHQTc_58kPoKTZMygVjTj-2ZcdrIuaausJfMxNQnQc,127162
106
106
  langtrace_python_sdk/__init__.py,sha256=VZM6i71NR7pBQK6XvJWRelknuTYUhqwqE7PlicKa5Wg,1166
107
107
  langtrace_python_sdk/langtrace.py,sha256=TtRWuUiWUB0S7JiQpUsF9lZsiyqPG3m9mMDX-QlDgAw,12601
108
- langtrace_python_sdk/version.py,sha256=GblPlLThspLCrtYLx-68Q7tG3E2pz1nlwTjMxZYJOYo,22
108
+ langtrace_python_sdk/version.py,sha256=pQATGDyqrNBUFj1qQmGoPCfn2udX5OQy6wvCOuVPUfg,22
109
109
  langtrace_python_sdk/constants/__init__.py,sha256=3CNYkWMdd1DrkGqzLUgNZXjdAlM6UFMlf_F-odAToyc,146
110
110
  langtrace_python_sdk/constants/exporter/langtrace_exporter.py,sha256=d-3Qn5C_NTy1NkmdavZvy-6vePwTC5curN6QMy2haHc,50
111
111
  langtrace_python_sdk/constants/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -186,7 +186,7 @@ langtrace_python_sdk/instrumentation/llamaindex/instrumentation.py,sha256=8iAg-O
186
186
  langtrace_python_sdk/instrumentation/llamaindex/patch.py,sha256=548hzPyT_k-2wmt9AArv4JzTT4j4AGKJq5Ar2bWv7o8,4615
187
187
  langtrace_python_sdk/instrumentation/milvus/__init__.py,sha256=gGb0xdMrpX_IEHdTplMt_U5A2IrKRc7lf5VU0WAsgVY,88
188
188
  langtrace_python_sdk/instrumentation/milvus/instrumentation.py,sha256=bpf6uRIJ44F1H2ncIgjjrVSwHMs1EiX7kTOy-urMDu0,928
189
- langtrace_python_sdk/instrumentation/milvus/patch.py,sha256=_sasB17CnBCufxtY3AzeTZFBo3449bn6iHsPmyAire4,4378
189
+ langtrace_python_sdk/instrumentation/milvus/patch.py,sha256=0yY5aQz0x7hpQZ8U-0qfyk_KIizW60s0e2CZrXe3FNk,4729
190
190
  langtrace_python_sdk/instrumentation/mistral/__init__.py,sha256=mkGALBQvq0jSfwDl6TU09SFwnVs6O4zkUi-yVmd3SNg,90
191
191
  langtrace_python_sdk/instrumentation/mistral/instrumentation.py,sha256=qtCkHCSOaiicUChbmTID4lcK1rbeW8oRSbpda2ogbgM,2328
192
192
  langtrace_python_sdk/instrumentation/mistral/patch.py,sha256=1peU0vqt9BGYn2PFNyKAMKNRVMEujXNyZGzgttPJrTQ,6580
@@ -221,7 +221,7 @@ langtrace_python_sdk/utils/prompt_registry.py,sha256=n5dQMVLBw8aJZY8Utvf67bncc25
221
221
  langtrace_python_sdk/utils/sdk_version_checker.py,sha256=F-VVVH7Fmhr5LcY0IIe-34zIi5RQcx26uuxFpPzZesM,1782
222
222
  langtrace_python_sdk/utils/silently_fail.py,sha256=wzmvRDZppaRZgVP8C1xpq2GlWXYCwubhaeWvEbQP1SI,1196
223
223
  langtrace_python_sdk/utils/types.py,sha256=l-N6o7cnWUyrD6dBvW7W3Pf5CkPo5QaoT__k1XLbrQg,383
224
- langtrace_python_sdk/utils/with_root_span.py,sha256=2iWu8XD1NOFqSFgDZDJiMHZ1JB4HzmYPLr_F3Ugul2k,8480
224
+ langtrace_python_sdk/utils/with_root_span.py,sha256=EW0WQfxESntf23dJt0E0rFdHV-7BRniW1Ce6nrCoEPY,8291
225
225
  tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
226
226
  tests/conftest.py,sha256=0Jo6iCZTXbdvyJVhG9UpYGkLabL75378oauCzmt-Sa8,603
227
227
  tests/utils.py,sha256=8ZBYvxBH6PynipT1sqenfyjTGLhEV7SORQH1NJjnpsM,2500
@@ -264,8 +264,8 @@ tests/pinecone/cassettes/test_query.yaml,sha256=b5v9G3ssUy00oG63PlFUR3JErF2Js-5A
264
264
  tests/pinecone/cassettes/test_upsert.yaml,sha256=neWmQ1v3d03V8WoLl8FoFeeCYImb8pxlJBWnFd_lITU,38607
265
265
  tests/qdrant/conftest.py,sha256=9n0uHxxIjWk9fbYc4bx-uP8lSAgLBVx-cV9UjnsyCHM,381
266
266
  tests/qdrant/test_qdrant.py,sha256=pzjAjVY2kmsmGfrI2Gs2xrolfuaNHz7l1fqGQCjp5_o,3353
267
- langtrace_python_sdk-3.3.6.dist-info/METADATA,sha256=dXfiYaLZTVt9ZFTeAUyGT__kD6njegfpMoiwoC_0Cig,16005
268
- langtrace_python_sdk-3.3.6.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
269
- langtrace_python_sdk-3.3.6.dist-info/entry_points.txt,sha256=1_b9-qvf2fE7uQNZcbUei9vLpFZBbbh9LrtGw95ssAo,70
270
- langtrace_python_sdk-3.3.6.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
271
- langtrace_python_sdk-3.3.6.dist-info/RECORD,,
267
+ langtrace_python_sdk-3.3.8.dist-info/METADATA,sha256=sfFy68YkEQs8-KFe2u4oLkL7tn1KohguWPdWBzC5uPs,15642
268
+ langtrace_python_sdk-3.3.8.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
269
+ langtrace_python_sdk-3.3.8.dist-info/entry_points.txt,sha256=1_b9-qvf2fE7uQNZcbUei9vLpFZBbbh9LrtGw95ssAo,70
270
+ langtrace_python_sdk-3.3.8.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
271
+ langtrace_python_sdk-3.3.8.dist-info/RECORD,,
@@ -1,371 +0,0 @@
1
- Metadata-Version: 2.3
2
- Name: langtrace-python-sdk
3
- Version: 3.3.6
4
- Summary: Python SDK for LangTrace
5
- Project-URL: Homepage, https://github.com/Scale3-Labs/langtrace-python-sdk
6
- Author-email: Scale3 Labs <engineering@scale3labs.com>
7
- License: Apache-2.0
8
- Classifier: License :: OSI Approved :: Apache Software License
9
- Classifier: Operating System :: OS Independent
10
- Classifier: Programming Language :: Python :: 3
11
- Requires-Python: >=3.9
12
- Requires-Dist: colorama>=0.4.6
13
- Requires-Dist: fsspec>=2024.6.0
14
- Requires-Dist: opentelemetry-api>=1.25.0
15
- Requires-Dist: opentelemetry-exporter-otlp-proto-grpc>=1.25.0
16
- Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.25.0
17
- Requires-Dist: opentelemetry-instrumentation-sqlalchemy>=0.46b0
18
- Requires-Dist: opentelemetry-instrumentation>=0.47b0
19
- Requires-Dist: opentelemetry-sdk>=1.25.0
20
- Requires-Dist: sentry-sdk>=2.14.0
21
- Requires-Dist: sqlalchemy
22
- Requires-Dist: tiktoken>=0.1.1
23
- Requires-Dist: trace-attributes==7.1.0
24
- Requires-Dist: transformers>=4.11.3
25
- Requires-Dist: ujson>=5.10.0
26
- Provides-Extra: dev
27
- Requires-Dist: anthropic; extra == 'dev'
28
- Requires-Dist: boto3; extra == 'dev'
29
- Requires-Dist: chromadb; extra == 'dev'
30
- Requires-Dist: cohere; extra == 'dev'
31
- Requires-Dist: embedchain; extra == 'dev'
32
- Requires-Dist: google-cloud-aiplatform; extra == 'dev'
33
- Requires-Dist: google-generativeai; extra == 'dev'
34
- Requires-Dist: groq; extra == 'dev'
35
- Requires-Dist: langchain; extra == 'dev'
36
- Requires-Dist: langchain-community; extra == 'dev'
37
- Requires-Dist: langchain-openai; extra == 'dev'
38
- Requires-Dist: litellm==1.48.7; extra == 'dev'
39
- Requires-Dist: mistralai; extra == 'dev'
40
- Requires-Dist: ollama; extra == 'dev'
41
- Requires-Dist: openai==1.45.0; extra == 'dev'
42
- Requires-Dist: pinecone-client; extra == 'dev'
43
- Requires-Dist: python-dotenv; extra == 'dev'
44
- Requires-Dist: qdrant-client; extra == 'dev'
45
- Requires-Dist: setuptools; extra == 'dev'
46
- Requires-Dist: weaviate-client; extra == 'dev'
47
- Provides-Extra: test
48
- Requires-Dist: pytest; extra == 'test'
49
- Requires-Dist: pytest-asyncio; extra == 'test'
50
- Requires-Dist: pytest-vcr; extra == 'test'
51
- Description-Content-Type: text/markdown
52
-
53
- # [Langtrace](https://www.langtrace.ai)
54
-
55
- ## Open Source & Open Telemetry(OTEL) Observability for LLM applications
56
-
57
- ![Static Badge](https://img.shields.io/badge/License-Apache--2.0-blue) ![Static Badge](https://img.shields.io/badge/npm_@langtrase/typescript--sdk-1.2.9-green) ![Static Badge](https://img.shields.io/badge/pip_langtrace--python--sdk-1.2.8-green) ![Static Badge](https://img.shields.io/badge/Development_status-Active-green)
58
-
59
- ---
60
-
61
- Langtrace is an open source observability software which lets you capture, debug and analyze traces and metrics from all your applications that leverages LLM APIs, Vector Databases and LLM based Frameworks.
62
-
63
- ## Open Telemetry Support
64
-
65
- The traces generated by Langtrace adhere to [Open Telemetry Standards(OTEL)](https://opentelemetry.io/docs/concepts/signals/traces/). We are developing [semantic conventions](https://opentelemetry.io/docs/concepts/semantic-conventions/) for the traces generated by this project. You can checkout the current definitions in [this repository](https://github.com/Scale3-Labs/langtrace-trace-attributes/tree/main/schemas). Note: This is an ongoing development and we encourage you to get involved and welcome your feedback.
66
-
67
- ---
68
-
69
- ## Langtrace Cloud ☁️
70
-
71
- To use the managed SaaS version of Langtrace, follow the steps below:
72
-
73
- 1. Sign up by going to [this link](https://langtrace.ai).
74
- 2. Create a new Project after signing up. Projects are containers for storing traces and metrics generated by your application. If you have only one application, creating 1 project will do.
75
- 3. Generate an API key by going inside the project.
76
- 4. In your application, install the Langtrace SDK and initialize it with the API key you generated in the step 3.
77
- 5. The code for installing and setting up the SDK is shown below
78
-
79
- ## Getting Started
80
-
81
- Get started by adding simply three lines to your code!
82
-
83
- ```python
84
- pip install langtrace-python-sdk
85
- ```
86
-
87
- ```python
88
- from langtrace_python_sdk import langtrace # Must precede any llm module imports
89
- langtrace.init(api_key=<your_api_key>)
90
- ```
91
-
92
- OR
93
-
94
- ```python
95
- from langtrace_python_sdk import langtrace # Must precede any llm module imports
96
- langtrace.init() # LANGTRACE_API_KEY as an ENVIRONMENT variable
97
- ```
98
-
99
- ## FastAPI Quick Start
100
-
101
- Initialize FastAPI project and add this inside the `main.py` file
102
-
103
- ```python
104
- from fastapi import FastAPI
105
- from langtrace_python_sdk import langtrace
106
- from openai import OpenAI
107
-
108
- langtrace.init()
109
- app = FastAPI()
110
- client = OpenAI()
111
-
112
- @app.get("/")
113
- def root():
114
- client.chat.completions.create(
115
- model="gpt-4",
116
- messages=[{"role": "user", "content": "Say this is a test three times"}],
117
- stream=False,
118
- )
119
- return {"Hello": "World"}
120
- ```
121
-
122
- ## Django Quick Start
123
-
124
- Initialize django project and add this inside the `__init.py__` file
125
-
126
- ```python
127
- from langtrace_python_sdk import langtrace
128
- from openai import OpenAI
129
-
130
-
131
- langtrace.init()
132
- client = OpenAI()
133
-
134
- client.chat.completions.create(
135
- model="gpt-4",
136
- messages=[{"role": "user", "content": "Say this is a test three times"}],
137
- stream=False,
138
- )
139
-
140
- ```
141
-
142
- ## Flask Quick Start
143
-
144
- Initialize flask project and this inside `app.py` file
145
-
146
- ```python
147
- from flask import Flask
148
- from langtrace_python_sdk import langtrace
149
- from openai import OpenAI
150
-
151
- langtrace.init()
152
- client = OpenAI()
153
- app = Flask(__name__)
154
-
155
-
156
- @app.route("/")
157
- def main():
158
- client.chat.completions.create(
159
- model="gpt-4",
160
- messages=[{"role": "user", "content": "Say this is a test three times"}],
161
- stream=False,
162
- )
163
- return "Hello, World!"
164
- ```
165
-
166
- ## Langtrace Self Hosted
167
-
168
- Get started by adding simply two lines to your code and see traces being logged to the console!
169
-
170
- ```python
171
- pip install langtrace-python-sdk
172
- ```
173
-
174
- ```python
175
- from langtrace_python_sdk import langtrace # Must precede any llm module imports
176
- langtrace.init(write_spans_to_console=True)
177
- ```
178
-
179
- ## Langtrace self hosted custom exporter
180
-
181
- Get started by adding simply three lines to your code and see traces being exported to your remote location!
182
-
183
- ```python
184
- pip install langtrace-python-sdk
185
- ```
186
-
187
- ```python
188
- from langtrace_python_sdk import langtrace # Must precede any llm module imports
189
- langtrace.init(custom_remote_exporter=<your_exporter>, batch=<True or False>)
190
- ```
191
-
192
- ### Configure Langtrace
193
-
194
- | Parameter | Type | Default Value | Description |
195
- | -------------------------- | ----------------------------------- | ----------------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
196
- | `api_key` | `str` | `LANGTRACE_API_KEY` or `None` | The API key for authentication. |
197
- | `batch` | `bool` | `True` | Whether to batch spans before sending them. |
198
- | `write_spans_to_console` | `bool` | `False` | Whether to write spans to the console. |
199
- | `custom_remote_exporter` | `Optional[Exporter]` | `None` | Custom remote exporter. If `None`, a default `LangTraceExporter` will be used. |
200
- | `api_host` | `Optional[str]` | `https://langtrace.ai/` | The API host for the remote exporter. |
201
- | `disable_instrumentations` | `Optional[DisableInstrumentations]` | `None` | You can pass an object to disable instrumentation for specific vendors ex: `{'only': ['openai']}` or `{'all_except': ['openai']}` |
202
-
203
- ### Error Reporting to Langtrace
204
-
205
- By default all sdk errors are reported to langtrace via Sentry. This can be disabled by setting the following enviroment variable to `False` like so `LANGTRACE_ERROR_REPORTING=False`
206
-
207
- ### Additional Customization
208
-
209
- - `@with_langtrace_root_span` - this decorator is designed to organize and relate different spans, in a hierarchical manner. When you're performing multiple operations that you want to monitor together as a unit, this function helps by establishing a "parent" (`LangtraceRootSpan` or whatever is passed to `name`) span. Then, any calls to the LLM APIs made within the given function (fn) will be considered "children" of this parent span. This setup is especially useful for tracking the performance or behavior of a group of operations collectively, rather than individually.
210
-
211
- ```python
212
- from langtrace_python_sdk import with_langtrace_root_span
213
-
214
- @with_langtrace_root_span()
215
- def example():
216
- response = client.chat.completions.create(
217
- model="gpt-4",
218
- messages=[{"role": "user", "content": "Say this is a test three times"}],
219
- stream=False,
220
- )
221
- return response
222
- ```
223
-
224
- - `inject_additional_attributes` - this function is designed to enhance the traces by adding custom attributes to the current context. These custom attributes provide extra details about the operations being performed, making it easier to analyze and understand their behavior.
225
-
226
- ```python
227
- from langtrace_python_sdk import inject_additional_attributes
228
-
229
-
230
-
231
- def do_llm_stuff(name=""):
232
- response = client.chat.completions.create(
233
- model="gpt-4",
234
- messages=[{"role": "user", "content": "Say this is a test three times"}],
235
- stream=False,
236
- )
237
- return response
238
-
239
-
240
- def main():
241
- response = inject_additional_attributes(lambda: do_llm_stuff(name="llm"), {'user.id': 'userId'})
242
-
243
- # if the function do not take arguments then this syntax will work
244
- response = inject_additional_attributes(do_llm_stuff, {'user.id': 'userId'})
245
- ```
246
-
247
- - `with_additional_attributes` - is behaving the same as `inject_additional_attributes` but as a decorator, this will be deprecated soon.
248
-
249
- ```python
250
- from langtrace_python_sdk import with_langtrace_root_span, with_additional_attributes
251
-
252
-
253
- @with_additional_attributes({"user.id": "1234"})
254
- def api_call1():
255
- response = client.chat.completions.create(
256
- model="gpt-4",
257
- messages=[{"role": "user", "content": "Say this is a test three times"}],
258
- stream=False,
259
- )
260
- return response
261
-
262
-
263
- @with_additional_attributes({"user.id": "5678"})
264
- def api_call2():
265
- response = client.chat.completions.create(
266
- model="gpt-4",
267
- messages=[{"role": "user", "content": "Say this is a test three times"}],
268
- stream=False,
269
- )
270
- return response
271
-
272
-
273
- @with_langtrace_root_span()
274
- def chat_completion():
275
- api_call1()
276
- api_call2()
277
- ```
278
-
279
- - `get_prompt_from_registry` - this function is designed to fetch the desired prompt from the `Prompt Registry`. You can pass two options for filtering `prompt_version` & `variables`.
280
-
281
- ```python
282
- from langtrace_python_sdk import get_prompt_from_registry
283
-
284
- prompt = get_prompt_from_registry(<Registry ID>, options={"prompt_version": 1, "variables": {"foo": "bar"} })
285
- ```
286
-
287
- ### Opt out of tracing prompt and completion data
288
-
289
- By default, prompt and completion data are captured. If you would like to opt out of it, set the following env var,
290
-
291
- `TRACE_PROMPT_COMPLETION_DATA=false`
292
-
293
- ### Enable/Disable checkpoint tracing for DSPy
294
-
295
- By default, checkpoints are traced for DSPy pipelines. If you would like to disable it, set the following env var,
296
-
297
- `TRACE_DSPY_CHECKPOINT=false`
298
-
299
- Note: Checkpoint tracing will increase the latency of executions as the state is serialized. Please disable it in production.
300
-
301
- ## Supported integrations
302
-
303
- Langtrace automatically captures traces from the following vendors:
304
-
305
- | Vendor | Type | Typescript SDK | Python SDK |
306
- | ------------- | --------------- | ------------------ | ------------------------------- |
307
- | OpenAI | LLM | :white_check_mark: | :white_check_mark: |
308
- | Anthropic | LLM | :white_check_mark: | :white_check_mark: |
309
- | Azure OpenAI | LLM | :white_check_mark: | :white_check_mark: |
310
- | Cohere | LLM | :white_check_mark: | :white_check_mark: |
311
- | Groq | LLM | :x: | :white_check_mark: |
312
- | Perplexity | LLM | :white_check_mark: | :white_check_mark: |
313
- | Gemini | LLM | :x: | :white_check_mark: |
314
- | Mistral | LLM | :x: | :white_check_mark: |
315
- | Langchain | Framework | :x: | :white_check_mark: |
316
- | Langgraph | Framework | :x: | :white_check_mark: |
317
- | LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
318
- | AWS Bedrock | Framework | :white_check_mark: | :white_check_mark: |
319
- | LiteLLM | Framework | :x: | :white_check_mark: |
320
- | DSPy | Framework | :x: | :white_check_mark: |
321
- | CrewAI | Framework | :x: | :white_check_mark: |
322
- | Ollama | Framework | :x: | :white_check_mark: |
323
- | VertexAI | Framework | :x: | :white_check_mark: |
324
- | Vercel AI SDK | Framework | :white_check_mark: | :x: |
325
- | EmbedChain | Framework | :x: | :white_check_mark: |
326
- | Autogen | Framework | :x: | :white_check_mark: |
327
- | Pinecone | Vector Database | :white_check_mark: | :white_check_mark: |
328
- | ChromaDB | Vector Database | :white_check_mark: | :white_check_mark: |
329
- | QDrant | Vector Database | :white_check_mark: | :white_check_mark: |
330
- | Weaviate | Vector Database | :white_check_mark: | :white_check_mark: |
331
- | PGVector | Vector Database | :white_check_mark: | :white_check_mark: (SQLAlchemy) |
332
-
333
- ---
334
-
335
- ## Feature Requests and Issues
336
-
337
- - To request for features, head over [here to start a discussion](https://github.com/Scale3-Labs/langtrace/discussions/categories/feature-requests).
338
- - To raise an issue, head over [here and create an issue](https://github.com/Scale3-Labs/langtrace/issues).
339
-
340
- ---
341
-
342
- ## Contributions
343
-
344
- We welcome contributions to this project. To get started, fork this repository and start developing. To get involved, join our [Discord](https://discord.langtrace.ai) workspace.
345
-
346
- - If you want to run any of the examples go to `run_example.py` file, you will find `ENABLED_EXAMPLES`. choose the example you want to run and just toggle the flag to `True` and run the file using `python src/run_example.py`
347
-
348
- - If you want to run tests, make sure to install dev & test dependencies:
349
-
350
- ```python
351
- pip install '.[test]' && pip install '.[dev]'
352
- ```
353
-
354
- then run `pytest` using:
355
-
356
- ```python
357
- pytest -v
358
- ```
359
-
360
- ---
361
-
362
- ## Security
363
-
364
- To report security vulnerabilites, email us at <security@scale3labs.com>. You can read more on security [here](https://github.com/Scale3-Labs/langtrace/blob/development/SECURITY.md).
365
-
366
- ---
367
-
368
- ## License
369
-
370
- - Langtrace application is [licensed](https://github.com/Scale3-Labs/langtrace/blob/development/LICENSE) under the AGPL 3.0 License. You can read about this license [here](https://www.gnu.org/licenses/agpl-3.0.en.html).
371
- - Langtrace SDKs are licensed under the Apache 2.0 License. You can read about this license [here](https://www.apache.org/licenses/LICENSE-2.0).