langtrace-python-sdk 1.1.29__py3-none-any.whl → 1.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langtrace_python_sdk/instrumentation/anthropic/patch.py +0 -1
- langtrace_python_sdk/instrumentation/openai/patch.py +29 -24
- langtrace_python_sdk/version.py +1 -0
- langtrace_python_sdk-1.2.5.dist-info/METADATA +63 -0
- {langtrace_python_sdk-1.1.29.dist-info → langtrace_python_sdk-1.2.5.dist-info}/RECORD +7 -24
- {langtrace_python_sdk-1.1.29.dist-info → langtrace_python_sdk-1.2.5.dist-info}/WHEEL +1 -2
- examples/__init__.py +0 -0
- examples/anthropic_example/__init__.py +0 -0
- examples/anthropic_example/completion.py +0 -27
- examples/chroma_example/__init__.py +0 -1
- examples/chroma_example/basic.py +0 -25
- examples/langchain_example/__init__.py +0 -0
- examples/langchain_example/basic.py +0 -66
- examples/langchain_example/tool.py +0 -85
- examples/llamaindex_example/__init__.py +0 -0
- examples/llamaindex_example/basic.py +0 -21
- examples/openai/__init__.py +0 -0
- examples/openai/chat_completion.py +0 -39
- examples/openai/embeddings_create.py +0 -18
- examples/openai/function_calling.py +0 -66
- examples/openai/images_generate.py +0 -20
- examples/pinecone_example/__init__.py +0 -0
- examples/pinecone_example/basic.py +0 -36
- langtrace_python_sdk-1.1.29.dist-info/METADATA +0 -220
- langtrace_python_sdk-1.1.29.dist-info/top_level.txt +0 -2
- {langtrace_python_sdk-1.1.29.dist-info → langtrace_python_sdk-1.2.5.dist-info/licenses}/LICENSE +0 -0
|
@@ -9,7 +9,6 @@ from opentelemetry.trace.status import Status, StatusCode
|
|
|
9
9
|
|
|
10
10
|
from langtrace_python_sdk.constants.instrumentation.anthropic import APIS
|
|
11
11
|
from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
|
|
12
|
-
from langtrace_python_sdk.utils.llm import estimate_tokens
|
|
13
12
|
|
|
14
13
|
|
|
15
14
|
def messages_create(original_method, version, tracer):
|
|
@@ -97,7 +97,6 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
97
97
|
"langtrace.version": "1.0.0",
|
|
98
98
|
"url.full": base_url,
|
|
99
99
|
"llm.api": APIS["CHAT_COMPLETION"]["ENDPOINT"],
|
|
100
|
-
"llm.model": kwargs.get("model"),
|
|
101
100
|
"llm.prompts": json.dumps(kwargs.get("messages", [])),
|
|
102
101
|
"llm.stream": kwargs.get("stream"),
|
|
103
102
|
}
|
|
@@ -126,28 +125,34 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
126
125
|
# Attempt to call the original method
|
|
127
126
|
result = original_method(*args, **kwargs)
|
|
128
127
|
if kwargs.get("stream") is False:
|
|
128
|
+
span.set_attribute("llm.model", result.model)
|
|
129
129
|
if hasattr(result, "choices") and result.choices is not None:
|
|
130
130
|
responses = [
|
|
131
131
|
{
|
|
132
|
-
"message":
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
and choice.message.
|
|
139
|
-
else
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
132
|
+
"message": {
|
|
133
|
+
"role": choice.message.role
|
|
134
|
+
if choice.message and choice.message.role
|
|
135
|
+
else "assistant",
|
|
136
|
+
"content": (
|
|
137
|
+
choice.message.content
|
|
138
|
+
if choice.message and choice.message.content
|
|
139
|
+
else (
|
|
140
|
+
choice.message.function_call.arguments
|
|
141
|
+
if choice.message
|
|
142
|
+
and choice.message.function_call.arguments
|
|
143
|
+
else ""
|
|
144
|
+
)
|
|
145
|
+
),
|
|
146
|
+
**(
|
|
147
|
+
{
|
|
148
|
+
"content_filter_results": choice[
|
|
149
|
+
"content_filter_results"
|
|
150
|
+
]
|
|
151
|
+
}
|
|
152
|
+
if "content_filter_results" in choice
|
|
153
|
+
else {}
|
|
154
|
+
),
|
|
155
|
+
}
|
|
151
156
|
}
|
|
152
157
|
for choice in result.choices
|
|
153
158
|
]
|
|
@@ -167,8 +172,8 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
167
172
|
usage = result.usage
|
|
168
173
|
if usage is not None:
|
|
169
174
|
usage_dict = {
|
|
170
|
-
"
|
|
171
|
-
"
|
|
175
|
+
"input_tokens": result.usage.prompt_tokens,
|
|
176
|
+
"output_tokens": usage.completion_tokens,
|
|
172
177
|
"total_tokens": usage.total_tokens,
|
|
173
178
|
}
|
|
174
179
|
span.set_attribute("llm.token.counts", json.dumps(usage_dict))
|
|
@@ -244,8 +249,8 @@ def chat_completions_create(original_method, version, tracer):
|
|
|
244
249
|
"llm.token.counts",
|
|
245
250
|
json.dumps(
|
|
246
251
|
{
|
|
247
|
-
"
|
|
248
|
-
"
|
|
252
|
+
"input_tokens": prompt_tokens,
|
|
253
|
+
"output_tokens": completion_tokens,
|
|
249
254
|
"total_tokens": prompt_tokens + completion_tokens,
|
|
250
255
|
}
|
|
251
256
|
),
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "1.2.5"
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: langtrace-python-sdk
|
|
3
|
+
Version: 1.2.5
|
|
4
|
+
Summary: Python SDK for LangTrace
|
|
5
|
+
Project-URL: Homepage, https://github.com/Scale3-Labs/langtrace-python-sdk
|
|
6
|
+
Author-email: Scale3 Labs <engineering@scale3labs.com>
|
|
7
|
+
License: AGPL-3.0-or-later
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Classifier: License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Requires-Python: >=3.9
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
# What is Langtrace?
|
|
16
|
+
|
|
17
|
+
Langtrace stands as a developer-centric, open-source solution, fully compatible with OpenTelemetry. It enables developers to effortlessly trace, monitor, and debug their LLM applications, offering robust support for automatic instrumentation.
|
|
18
|
+
|
|
19
|
+
## Supported LLM Modules
|
|
20
|
+
|
|
21
|
+
Langtrace supports a comprehensive range of LLMs, VectorDBs, and frameworks, ensuring wide coverage for your development needs:
|
|
22
|
+
|
|
23
|
+
### LLMs
|
|
24
|
+
|
|
25
|
+
1. OpenAI
|
|
26
|
+
2. Anthropic
|
|
27
|
+
3. Azure OpenAI
|
|
28
|
+
|
|
29
|
+
### VectorDBs
|
|
30
|
+
|
|
31
|
+
1. Pinecone
|
|
32
|
+
2. Chromadb
|
|
33
|
+
|
|
34
|
+
### Frameworks
|
|
35
|
+
|
|
36
|
+
1. LangChain
|
|
37
|
+
2. LlamaIndex
|
|
38
|
+
3. Haystack
|
|
39
|
+
|
|
40
|
+
We are actively working to extend our support to additional libraries!
|
|
41
|
+
|
|
42
|
+
## Getting Started
|
|
43
|
+
|
|
44
|
+
To begin utilizing Langtrace, follow these straightforward steps:
|
|
45
|
+
|
|
46
|
+
1. Install the package using `pip install langtrace-python-sdk`.
|
|
47
|
+
2. Incorporate Langtrace into your project with `from langtrace_python_sdk import langtrace`.
|
|
48
|
+
- This import should precede any other LLM module imports (such as OpenAI, LlamaIndex, etc.) to ensure proper functionality.
|
|
49
|
+
3. Initialize Langtrace by adding `langtrace.init({ write_to_remote_url: false})` to your code.
|
|
50
|
+
4. Congratulations, you've completed the basic setup! You will now begin to see traces from your LLM modules logged directly to the console.
|
|
51
|
+
|
|
52
|
+
## Exporting Traces to Langtrace
|
|
53
|
+
|
|
54
|
+
To configure trace exporting, you have two options:
|
|
55
|
+
|
|
56
|
+
You'll need both a Langtrace `api_key` and a `remote_url`, which can be acquired by logging into your Langtrace account.
|
|
57
|
+
|
|
58
|
+
1. Direct Initialization: Utilize `langtrace.init(batch=True, api_key=<YOUR_API_KEY>, remote_url=<YOUR_REMOTE_URL>)`.
|
|
59
|
+
2. Environment Variables: Set `API_KEY` and `URL`, then add `LangTrace.init(batch=True)` at the beginning of your file.
|
|
60
|
+
|
|
61
|
+
## Langtrace Cloud
|
|
62
|
+
|
|
63
|
+
Currently under development 🚧
|
|
@@ -1,22 +1,6 @@
|
|
|
1
|
-
examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
examples/anthropic_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
examples/anthropic_example/completion.py,sha256=IDZVPUpqp31LZtA-wJUBSdG-vEGPtqyOToxo25ifvXU,659
|
|
4
|
-
examples/chroma_example/__init__.py,sha256=Tq6pae7fHA7dwuDslabB5MTNedL21gu2RaZicpxSyLU,25
|
|
5
|
-
examples/chroma_example/basic.py,sha256=pmDTgborQYxMRpcJfyu4XlJDguBcb9FR5gNLksrzEZk,883
|
|
6
|
-
examples/langchain_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
-
examples/langchain_example/basic.py,sha256=iQpOatVVzbJYVoGlDaXlJG5C7uQG-aoePBAWh0Dhi2Y,2073
|
|
8
|
-
examples/langchain_example/tool.py,sha256=8T8_IDbgA58XbsfyH5_xhA8ZKQfyfyFxF8wor-PsRjA,2556
|
|
9
|
-
examples/llamaindex_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
-
examples/llamaindex_example/basic.py,sha256=ewjyxzx4ntuID6D5Myo1UFj_F6KGd5yT0yQntCPQ3Rw,688
|
|
11
|
-
examples/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
|
-
examples/openai/chat_completion.py,sha256=QmmnKFZTrYCySbQHvWkIn1UDF4Ii_DsM-WPaRrnH1F8,1331
|
|
13
|
-
examples/openai/embeddings_create.py,sha256=AhDNAqg-WzRYLJAE_b2RKGjuVCh4aZSU7MxcZv2kCHQ,518
|
|
14
|
-
examples/openai/function_calling.py,sha256=Y5IZ2eb78dHaWRqOb3k6K0tn6-Zca_TLsV8qwuu1iYA,2369
|
|
15
|
-
examples/openai/images_generate.py,sha256=ZioxTuHKE_yYlhpESqXKVzdkiwdegkmLVB7N8T2LU00,506
|
|
16
|
-
examples/pinecone_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
|
-
examples/pinecone_example/basic.py,sha256=8Tpf0hYMgJdjVpISu15BW7IpjrnwZXrBuCGoC9xE7Fg,927
|
|
18
1
|
langtrace_python_sdk/__init__.py,sha256=SlHg447-nQBbw8exRNJP_OyHUZ39Sldb7aaQ35hIRm8,262
|
|
19
2
|
langtrace_python_sdk/langtrace.py,sha256=XS4lhorNLPgsdKB2hTFsHR31_kUgr9cHZTc4dLgJs_g,3251
|
|
3
|
+
langtrace_python_sdk/version.py,sha256=jBmZf3HLbiQlWiolOsAA6J5-BbxXD2bqFqEqDH3lfqo,22
|
|
20
4
|
langtrace_python_sdk/constants/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
5
|
langtrace_python_sdk/constants/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
22
6
|
langtrace_python_sdk/constants/instrumentation/anthropic.py,sha256=YX3llt3zwDY6XrYk3CB8WEVqgrzRXEw_ffyk56JoF3k,126
|
|
@@ -29,7 +13,7 @@ langtrace_python_sdk/extensions/langtrace_exporter.py,sha256=6dMS0MjYCmZbuj3CMb9
|
|
|
29
13
|
langtrace_python_sdk/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
30
14
|
langtrace_python_sdk/instrumentation/anthropic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
31
15
|
langtrace_python_sdk/instrumentation/anthropic/instrumentation.py,sha256=1shNkDE7Yb-JFVlEZHMcDbJ6zW8SkSFdxf_yFo9wTNA,1101
|
|
32
|
-
langtrace_python_sdk/instrumentation/anthropic/patch.py,sha256=
|
|
16
|
+
langtrace_python_sdk/instrumentation/anthropic/patch.py,sha256=w4Jcz5McG5NHXlr1IrwJmqj2P2d5vt7uxkpa2OWpbXU,6520
|
|
33
17
|
langtrace_python_sdk/instrumentation/chroma/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
18
|
langtrace_python_sdk/instrumentation/chroma/instrumentation.py,sha256=fSR1ZjoViwjbPYkUWJnRpFwWtwERyf4UqnMiDl8eGmY,1223
|
|
35
19
|
langtrace_python_sdk/instrumentation/chroma/patch.py,sha256=NGHZR5TGaL0JgZX184L8Nt5U3hpDdD-htHUkMgQuT5A,1999
|
|
@@ -47,15 +31,14 @@ langtrace_python_sdk/instrumentation/llamaindex/instrumentation.py,sha256=udDxk8
|
|
|
47
31
|
langtrace_python_sdk/instrumentation/llamaindex/patch.py,sha256=BJyX9AFiDvCZ3EjgU3roSdhb4CNZdKfajPogvBv33vc,1739
|
|
48
32
|
langtrace_python_sdk/instrumentation/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
49
33
|
langtrace_python_sdk/instrumentation/openai/instrumentation.py,sha256=6RmbONTwgr8_do92l3ll7-ia4maXMxQVCwPBRRa-3NA,1358
|
|
50
|
-
langtrace_python_sdk/instrumentation/openai/patch.py,sha256=
|
|
34
|
+
langtrace_python_sdk/instrumentation/openai/patch.py,sha256=FdHW7JEcizQkl3qg2bq7SuLNB4jWJqn64cXnMIg2pno,14219
|
|
51
35
|
langtrace_python_sdk/instrumentation/pinecone/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
36
|
langtrace_python_sdk/instrumentation/pinecone/instrumentation.py,sha256=PBkCMiaDr-WXkGfHt9iXtYqyl7a-xIO52pRmUFuMURo,1726
|
|
53
37
|
langtrace_python_sdk/instrumentation/pinecone/patch.py,sha256=uDKUBjyOVCDL44YxXVR80VSZGgg-ZNCcQulBUwrgYPk,1896
|
|
54
38
|
langtrace_python_sdk/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
55
39
|
langtrace_python_sdk/utils/llm.py,sha256=4z2e-md_ELXCEuOIRVWracR6qH2pmsOxCqpkuF9_3Nw,1589
|
|
56
40
|
langtrace_python_sdk/utils/with_root_span.py,sha256=LgFVwHq7KZ6sj2d783NZXr2fFWqI1rqVWSjJR68Ad1E,1044
|
|
57
|
-
langtrace_python_sdk-1.
|
|
58
|
-
langtrace_python_sdk-1.
|
|
59
|
-
langtrace_python_sdk-1.
|
|
60
|
-
langtrace_python_sdk-1.
|
|
61
|
-
langtrace_python_sdk-1.1.29.dist-info/RECORD,,
|
|
41
|
+
langtrace_python_sdk-1.2.5.dist-info/METADATA,sha256=c15XCywpwvDGdX-252msSpUxP59N4T9HRI2tFVzm5Es,2283
|
|
42
|
+
langtrace_python_sdk-1.2.5.dist-info/WHEEL,sha256=uNdcs2TADwSd5pVaP0Z_kcjcvvTUklh2S7bxZMF8Uj0,87
|
|
43
|
+
langtrace_python_sdk-1.2.5.dist-info/licenses/LICENSE,sha256=VD-pauwiiia-Xi2zgKvalKRIFSJJjqRCQw6aIpK2T9U,33892
|
|
44
|
+
langtrace_python_sdk-1.2.5.dist-info/RECORD,,
|
examples/__init__.py
DELETED
|
File without changes
|
|
File without changes
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
"""Example of using the anthropic API to create a message."""
|
|
2
|
-
|
|
3
|
-
import anthropic
|
|
4
|
-
from dotenv import find_dotenv, load_dotenv
|
|
5
|
-
|
|
6
|
-
from langtrace_python_sdk import langtrace
|
|
7
|
-
|
|
8
|
-
_ = load_dotenv(find_dotenv())
|
|
9
|
-
|
|
10
|
-
langtrace.init(batch=False, log_spans_to_console=True, write_to_remote_url=False)
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def messages_create():
|
|
14
|
-
|
|
15
|
-
client = anthropic.Anthropic()
|
|
16
|
-
|
|
17
|
-
message = client.messages.create(
|
|
18
|
-
model="claude-3-opus-20240229",
|
|
19
|
-
max_tokens=1000,
|
|
20
|
-
temperature=0.0,
|
|
21
|
-
system="Respond only in Yoda-speak.",
|
|
22
|
-
messages=[{"role": "user", "content": "How are you today?"}],
|
|
23
|
-
stream=True,
|
|
24
|
-
)
|
|
25
|
-
|
|
26
|
-
for response in message:
|
|
27
|
-
pass
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
from .basic import basic
|
examples/chroma_example/basic.py
DELETED
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
import chromadb
|
|
2
|
-
from chromadb.utils import embedding_functions
|
|
3
|
-
from dotenv import find_dotenv, load_dotenv
|
|
4
|
-
|
|
5
|
-
from langtrace_python_sdk import langtrace
|
|
6
|
-
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
7
|
-
|
|
8
|
-
_ = load_dotenv(find_dotenv())
|
|
9
|
-
|
|
10
|
-
langtrace.init(batch=False, log_spans_to_console=True, write_to_remote_url=False)
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@with_langtrace_root_span()
|
|
14
|
-
def basic():
|
|
15
|
-
chroma_client = chromadb.Client()
|
|
16
|
-
embedder = embedding_functions.DefaultEmbeddingFunction()
|
|
17
|
-
collection = chroma_client.create_collection(
|
|
18
|
-
name="my6_collection", embedding_function=embedder
|
|
19
|
-
)
|
|
20
|
-
collection.add(
|
|
21
|
-
documents=["This is a document", "This is another document"],
|
|
22
|
-
metadatas=[{"source": "my_source"}, {"source": "my_source"}],
|
|
23
|
-
ids=["id1", "id2"],
|
|
24
|
-
)
|
|
25
|
-
results = collection.query(query_texts=["This is a query document"], n_results=2)
|
|
File without changes
|
|
@@ -1,66 +0,0 @@
|
|
|
1
|
-
from dotenv import find_dotenv, load_dotenv
|
|
2
|
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
3
|
-
from langchain_community.document_loaders import PyPDFLoader
|
|
4
|
-
from langchain_community.vectorstores.faiss import FAISS
|
|
5
|
-
from langchain_core.output_parsers import StrOutputParser
|
|
6
|
-
from langchain_core.prompts.chat import ChatPromptTemplate
|
|
7
|
-
from langchain_core.runnables import RunnablePassthrough
|
|
8
|
-
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
|
9
|
-
|
|
10
|
-
from langtrace_python_sdk import langtrace
|
|
11
|
-
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
12
|
-
|
|
13
|
-
_ = load_dotenv(find_dotenv())
|
|
14
|
-
|
|
15
|
-
langtrace.init(batch=True, log_spans_to_console=True, write_to_remote_url=False)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
@with_langtrace_root_span()
|
|
19
|
-
def basic():
|
|
20
|
-
llm = ChatOpenAI()
|
|
21
|
-
prompt = ChatPromptTemplate.from_messages(
|
|
22
|
-
[
|
|
23
|
-
("system", "You are world class technical documentation writer."),
|
|
24
|
-
("user", "{input}"),
|
|
25
|
-
]
|
|
26
|
-
)
|
|
27
|
-
output_parser = StrOutputParser()
|
|
28
|
-
chain = prompt | llm | output_parser
|
|
29
|
-
res = chain.invoke({"input": "how can langsmith help with testing?"})
|
|
30
|
-
print(res)
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
@with_langtrace_root_span()
|
|
34
|
-
def rag():
|
|
35
|
-
vectorstore = FAISS.from_texts(
|
|
36
|
-
["harrison worked at kensho"], embedding=OpenAIEmbeddings()
|
|
37
|
-
)
|
|
38
|
-
retriever = vectorstore.as_retriever()
|
|
39
|
-
|
|
40
|
-
template = """Answer the question based only on the following context:{context}
|
|
41
|
-
|
|
42
|
-
Question: {question}
|
|
43
|
-
"""
|
|
44
|
-
prompt = ChatPromptTemplate.from_template(template)
|
|
45
|
-
|
|
46
|
-
model = ChatOpenAI()
|
|
47
|
-
|
|
48
|
-
chain = (
|
|
49
|
-
{"context": retriever, "question": RunnablePassthrough()}
|
|
50
|
-
| prompt
|
|
51
|
-
| model
|
|
52
|
-
| StrOutputParser()
|
|
53
|
-
)
|
|
54
|
-
|
|
55
|
-
res = chain.invoke("where did harrison work?")
|
|
56
|
-
# print(res)
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
@with_langtrace_root_span()
|
|
60
|
-
def load_and_split():
|
|
61
|
-
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
|
|
62
|
-
loader = PyPDFLoader(url)
|
|
63
|
-
data = loader.load()
|
|
64
|
-
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
|
65
|
-
docs = text_splitter.split_documents(data)
|
|
66
|
-
# print(docs)
|
|
@@ -1,85 +0,0 @@
|
|
|
1
|
-
from dotenv import find_dotenv, load_dotenv
|
|
2
|
-
from langchain import hub
|
|
3
|
-
from langchain.agents import AgentExecutor, create_openai_functions_agent
|
|
4
|
-
from langchain.chains import LLMMathChain
|
|
5
|
-
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
6
|
-
from langchain_core.tools import Tool
|
|
7
|
-
from langchain_openai import ChatOpenAI
|
|
8
|
-
|
|
9
|
-
from langtrace_python_sdk import langtrace
|
|
10
|
-
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
11
|
-
|
|
12
|
-
_ = load_dotenv(find_dotenv())
|
|
13
|
-
|
|
14
|
-
langtrace.init(batch=True, log_spans_to_console=True, write_to_remote_url=False)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
llm = ChatOpenAI(temperature=0, model="gpt-4")
|
|
18
|
-
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
|
|
19
|
-
|
|
20
|
-
primes = {998: 7901, 999: 7907, 1000: 7919}
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class CalculatorInput(BaseModel):
|
|
24
|
-
question: str = Field()
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
class PrimeInput(BaseModel):
|
|
28
|
-
n: int = Field()
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
def is_prime(n: int) -> bool:
|
|
32
|
-
if n <= 1 or (n % 2 == 0 and n > 2):
|
|
33
|
-
return False
|
|
34
|
-
for i in range(3, int(n**0.5) + 1, 2):
|
|
35
|
-
if n % i == 0:
|
|
36
|
-
return False
|
|
37
|
-
return True
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def get_prime(n: int, primes: dict = primes) -> str:
|
|
41
|
-
return str(primes.get(int(n)))
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
async def aget_prime(n: int, primes: dict = primes) -> str:
|
|
45
|
-
return str(primes.get(int(n)))
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
@with_langtrace_root_span()
|
|
49
|
-
def tool_example():
|
|
50
|
-
|
|
51
|
-
tools = [
|
|
52
|
-
Tool(
|
|
53
|
-
name="GetPrime",
|
|
54
|
-
func=get_prime,
|
|
55
|
-
description="A tool that returns the `n`th prime number",
|
|
56
|
-
args_schema=PrimeInput,
|
|
57
|
-
coroutine=aget_prime,
|
|
58
|
-
),
|
|
59
|
-
Tool.from_function(
|
|
60
|
-
func=llm_math_chain.run,
|
|
61
|
-
name="Calculator",
|
|
62
|
-
description="Useful for when you need to compute mathematical expressions",
|
|
63
|
-
args_schema=CalculatorInput,
|
|
64
|
-
coroutine=llm_math_chain.arun,
|
|
65
|
-
),
|
|
66
|
-
]
|
|
67
|
-
|
|
68
|
-
prompt = hub.pull("hwchase17/openai-functions-agent")
|
|
69
|
-
|
|
70
|
-
agent = create_openai_functions_agent(llm, tools, prompt)
|
|
71
|
-
|
|
72
|
-
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
|
73
|
-
|
|
74
|
-
question = "What is the product of the 998th, 999th and 1000th prime numbers?"
|
|
75
|
-
|
|
76
|
-
for step in agent_executor.iter({"input": question}):
|
|
77
|
-
if output := step.get("intermediate_step"):
|
|
78
|
-
action, value = output[0]
|
|
79
|
-
if action.tool == "GetPrime":
|
|
80
|
-
print(f"Checking whether {value} is prime...")
|
|
81
|
-
assert is_prime(int(value))
|
|
82
|
-
# Ask user if they want to continue
|
|
83
|
-
_continue = input("Should the agent continue (Y/n)?:\n") or "Y"
|
|
84
|
-
if _continue.lower() != "y":
|
|
85
|
-
break
|
|
File without changes
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
from dotenv import find_dotenv, load_dotenv
|
|
2
|
-
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
|
|
3
|
-
|
|
4
|
-
from langtrace_python_sdk import langtrace
|
|
5
|
-
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
6
|
-
|
|
7
|
-
_ = load_dotenv(find_dotenv())
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
langtrace.init(batch=True, log_spans_to_console=True, write_to_remote_url=False)
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@with_langtrace_root_span()
|
|
14
|
-
def basic():
|
|
15
|
-
documents = SimpleDirectoryReader(
|
|
16
|
-
"src/examples/llamaindex_example/data"
|
|
17
|
-
).load_data()
|
|
18
|
-
index = VectorStoreIndex.from_documents(documents)
|
|
19
|
-
query_engine = index.as_query_engine()
|
|
20
|
-
response = query_engine.query("What did the author do in college?")
|
|
21
|
-
print(response)
|
examples/openai/__init__.py
DELETED
|
File without changes
|
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
from dotenv import find_dotenv, load_dotenv
|
|
2
|
-
from openai import OpenAI
|
|
3
|
-
|
|
4
|
-
from langtrace_python_sdk import langtrace
|
|
5
|
-
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
6
|
-
|
|
7
|
-
_ = load_dotenv(find_dotenv())
|
|
8
|
-
|
|
9
|
-
langtrace.init(batch=False, log_spans_to_console=True, write_to_remote_url=False)
|
|
10
|
-
|
|
11
|
-
client = OpenAI()
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
@with_langtrace_root_span()
|
|
15
|
-
def chat_completion():
|
|
16
|
-
response = client.chat.completions.create(
|
|
17
|
-
model="gpt-4",
|
|
18
|
-
messages=[{"role": "user", "content": "Say this is a test three times"}],
|
|
19
|
-
stream=True,
|
|
20
|
-
)
|
|
21
|
-
# print(stream)
|
|
22
|
-
# stream = client.chat.completions.create(
|
|
23
|
-
# model="gpt-4",
|
|
24
|
-
# messages=[{"role": "user", "content": "Say this is a test three times"}, {"role": "assistant", "content": "This is a test. This is a test. This is a test"},
|
|
25
|
-
# {"role": "user", "content": "Say this is a mock 4 times"}],
|
|
26
|
-
# stream=False,
|
|
27
|
-
# )
|
|
28
|
-
|
|
29
|
-
# result = []
|
|
30
|
-
# for chunk in response:
|
|
31
|
-
# if chunk.choices[0].delta.content is not None:
|
|
32
|
-
# content = [
|
|
33
|
-
# choice.delta.content if choice.delta and
|
|
34
|
-
# choice.delta.content else ""
|
|
35
|
-
# for choice in chunk.choices]
|
|
36
|
-
# result.append(
|
|
37
|
-
# content[0] if len(content) > 0 else "")
|
|
38
|
-
|
|
39
|
-
# print("".join(result))
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
from dotenv import find_dotenv, load_dotenv
|
|
2
|
-
from openai import OpenAI
|
|
3
|
-
|
|
4
|
-
from langtrace_python_sdk import langtrace
|
|
5
|
-
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
6
|
-
|
|
7
|
-
_ = load_dotenv(find_dotenv())
|
|
8
|
-
|
|
9
|
-
langtrace.init(batch=True, log_spans_to_console=True, write_to_remote_url=False)
|
|
10
|
-
client = OpenAI()
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@with_langtrace_root_span()
|
|
14
|
-
def embeddings_create():
|
|
15
|
-
result = client.embeddings.create(
|
|
16
|
-
model="text-embedding-ada-002",
|
|
17
|
-
input="Once upon a time, there was a frog.",
|
|
18
|
-
)
|
|
@@ -1,66 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
|
|
3
|
-
from dotenv import find_dotenv, load_dotenv
|
|
4
|
-
from openai import OpenAI
|
|
5
|
-
|
|
6
|
-
from langtrace_python_sdk import langtrace
|
|
7
|
-
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
8
|
-
|
|
9
|
-
_ = load_dotenv(find_dotenv())
|
|
10
|
-
|
|
11
|
-
langtrace.init(batch=True, log_spans_to_console=True, write_to_remote_url=False)
|
|
12
|
-
|
|
13
|
-
client = OpenAI()
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
student_custom_functions = [
|
|
17
|
-
{
|
|
18
|
-
"name": "extract_student_info",
|
|
19
|
-
"description": "Get the student information from the body of the input text",
|
|
20
|
-
"parameters": {
|
|
21
|
-
"type": "object",
|
|
22
|
-
"properties": {
|
|
23
|
-
"name": {"type": "string", "description": "Name of the person"},
|
|
24
|
-
"major": {"type": "string", "description": "Major subject."},
|
|
25
|
-
"school": {"type": "string", "description": "The university name."},
|
|
26
|
-
"grades": {"type": "integer", "description": "GPA of the student."},
|
|
27
|
-
"club": {
|
|
28
|
-
"type": "string",
|
|
29
|
-
"description": "School club for extracurricular activities. ",
|
|
30
|
-
},
|
|
31
|
-
},
|
|
32
|
-
},
|
|
33
|
-
}
|
|
34
|
-
]
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
@with_langtrace_root_span()
|
|
38
|
-
def function_calling():
|
|
39
|
-
response = client.chat.completions.create(
|
|
40
|
-
model="gpt-3.5-turbo",
|
|
41
|
-
messages=[
|
|
42
|
-
{
|
|
43
|
-
"role": "user",
|
|
44
|
-
"content": "David Nguyen is a sophomore majoring in computer science at Stanford University. He is Asian American and has a 3.8 GPA. David is known for his programming skills and is an active member of the university's Robotics Club. He hopes to pursue a career in artificial intelligence after graduating.",
|
|
45
|
-
}
|
|
46
|
-
],
|
|
47
|
-
functions=student_custom_functions,
|
|
48
|
-
function_call="auto",
|
|
49
|
-
stream=False,
|
|
50
|
-
)
|
|
51
|
-
|
|
52
|
-
# result = []
|
|
53
|
-
# for chunk in response:
|
|
54
|
-
# if chunk.choices[0].delta.function_call is not None:
|
|
55
|
-
# content = [
|
|
56
|
-
# choice.delta.function_call.arguments if choice.delta.function_call and
|
|
57
|
-
# choice.delta.function_call.arguments else ""
|
|
58
|
-
# for choice in chunk.choices]
|
|
59
|
-
# result.append(
|
|
60
|
-
# content[0] if len(content) > 0 else "")
|
|
61
|
-
|
|
62
|
-
# print("".join(result))
|
|
63
|
-
|
|
64
|
-
# Loading the response as a JSON object
|
|
65
|
-
json_response = json.loads(response.choices[0].message.function_call.arguments)
|
|
66
|
-
print(json_response)
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
from dotenv import find_dotenv, load_dotenv
|
|
2
|
-
from openai import OpenAI
|
|
3
|
-
|
|
4
|
-
from langtrace_python_sdk import langtrace
|
|
5
|
-
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
6
|
-
|
|
7
|
-
_ = load_dotenv(find_dotenv())
|
|
8
|
-
|
|
9
|
-
langtrace.init(batch=True, log_spans_to_console=True, write_to_remote_url=False)
|
|
10
|
-
|
|
11
|
-
client = OpenAI()
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
@with_langtrace_root_span()
|
|
15
|
-
def images_generate():
|
|
16
|
-
result = client.images.generate(
|
|
17
|
-
model="dall-e-3",
|
|
18
|
-
prompt="A cute baby sea otter",
|
|
19
|
-
)
|
|
20
|
-
print(result)
|
|
File without changes
|
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
This example demonstrates how to use Pinecone with Langtrace.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
from dotenv import find_dotenv, load_dotenv
|
|
6
|
-
from openai import OpenAI
|
|
7
|
-
from pinecone import Pinecone
|
|
8
|
-
|
|
9
|
-
from langtrace_python_sdk import langtrace
|
|
10
|
-
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
|
|
11
|
-
|
|
12
|
-
_ = load_dotenv(find_dotenv())
|
|
13
|
-
|
|
14
|
-
langtrace.init(batch=True, log_spans_to_console=True, write_to_remote_url=False)
|
|
15
|
-
|
|
16
|
-
client = OpenAI()
|
|
17
|
-
pinecone = Pinecone()
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
@with_langtrace_root_span()
|
|
21
|
-
def basic():
|
|
22
|
-
result = client.embeddings.create(
|
|
23
|
-
model="text-embedding-ada-002",
|
|
24
|
-
input="Some random text string goes here",
|
|
25
|
-
encoding_format="float",
|
|
26
|
-
)
|
|
27
|
-
|
|
28
|
-
embedding = result.data[0].embedding
|
|
29
|
-
|
|
30
|
-
unique_id = "randomid"
|
|
31
|
-
data_to_upsert = {"id": unique_id, "values": embedding}
|
|
32
|
-
|
|
33
|
-
index = pinecone.Index("test-index")
|
|
34
|
-
index.upsert(vectors=[data_to_upsert])
|
|
35
|
-
|
|
36
|
-
resp = index.query(vector=embedding, top_k=1)
|
|
@@ -1,220 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: langtrace-python-sdk
|
|
3
|
-
Version: 1.1.29
|
|
4
|
-
Summary: Python SDK for LangTrace
|
|
5
|
-
Home-page: https://github.com/Scale3-Labs/langtrace-python-sdk
|
|
6
|
-
Author: Scale3 Labs
|
|
7
|
-
Author-email: engineering@scale3labs.com
|
|
8
|
-
Maintainer: ['Ali Waleed', 'Darshit Suratwala', 'Dylan Zuber', 'Karthik Kalyanaraman', 'Obinna Okafor', 'Rohit Kadhe', 'Yemi Adejumobi']
|
|
9
|
-
License: AGPL-3.0-or-later
|
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
|
11
|
-
Classifier: License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)
|
|
12
|
-
Classifier: Operating System :: OS Independent
|
|
13
|
-
Requires-Python: >=3.6
|
|
14
|
-
Description-Content-Type: text/markdown
|
|
15
|
-
License-File: LICENSE
|
|
16
|
-
Requires-Dist: bcrypt ==4.1.2
|
|
17
|
-
Requires-Dist: aiohttp ==3.9.3
|
|
18
|
-
Requires-Dist: aiosignal ==1.3.1
|
|
19
|
-
Requires-Dist: annotated-types ==0.6.0
|
|
20
|
-
Requires-Dist: anthropic ==0.19.1
|
|
21
|
-
Requires-Dist: anyio ==4.3.0
|
|
22
|
-
Requires-Dist: asgiref ==3.7.2
|
|
23
|
-
Requires-Dist: async-timeout ==4.0.3
|
|
24
|
-
Requires-Dist: attrs ==23.2.0
|
|
25
|
-
Requires-Dist: backoff ==2.2.1
|
|
26
|
-
Requires-Dist: beautifulsoup4 ==4.12.3
|
|
27
|
-
Requires-Dist: bs4 ==0.0.2
|
|
28
|
-
Requires-Dist: build ==1.0.3
|
|
29
|
-
Requires-Dist: cachetools ==5.3.3
|
|
30
|
-
Requires-Dist: certifi ==2024.2.2
|
|
31
|
-
Requires-Dist: charset-normalizer ==3.3.2
|
|
32
|
-
Requires-Dist: chroma-hnswlib ==0.7.3
|
|
33
|
-
Requires-Dist: chromadb ==0.4.23
|
|
34
|
-
Requires-Dist: click ==8.1.7
|
|
35
|
-
Requires-Dist: coloredlogs ==15.0.1
|
|
36
|
-
Requires-Dist: dataclasses-json ==0.6.4
|
|
37
|
-
Requires-Dist: Deprecated ==1.2.14
|
|
38
|
-
Requires-Dist: dirtyjson ==1.0.8
|
|
39
|
-
Requires-Dist: distro ==1.9.0
|
|
40
|
-
Requires-Dist: docarray ==0.40.0
|
|
41
|
-
Requires-Dist: exceptiongroup ==1.2.0
|
|
42
|
-
Requires-Dist: faiss-cpu ==1.7.4
|
|
43
|
-
Requires-Dist: fastapi ==0.110.0
|
|
44
|
-
Requires-Dist: filelock ==3.13.1
|
|
45
|
-
Requires-Dist: flatbuffers ==23.5.26
|
|
46
|
-
Requires-Dist: frozenlist ==1.4.1
|
|
47
|
-
Requires-Dist: fsspec ==2024.2.0
|
|
48
|
-
Requires-Dist: google-auth ==2.28.1
|
|
49
|
-
Requires-Dist: googleapis-common-protos ==1.62.0
|
|
50
|
-
Requires-Dist: greenlet ==3.0.3
|
|
51
|
-
Requires-Dist: grpcio ==1.62.0
|
|
52
|
-
Requires-Dist: h11 ==0.14.0
|
|
53
|
-
Requires-Dist: httpcore ==1.0.4
|
|
54
|
-
Requires-Dist: httptools ==0.6.1
|
|
55
|
-
Requires-Dist: httpx ==0.27.0
|
|
56
|
-
Requires-Dist: huggingface-hub ==0.20.3
|
|
57
|
-
Requires-Dist: humanfriendly ==10.0
|
|
58
|
-
Requires-Dist: idna ==3.6
|
|
59
|
-
Requires-Dist: importlib-metadata ==6.11.0
|
|
60
|
-
Requires-Dist: importlib-resources ==6.1.2
|
|
61
|
-
Requires-Dist: joblib ==1.3.2
|
|
62
|
-
Requires-Dist: jsonpatch ==1.33
|
|
63
|
-
Requires-Dist: jsonpointer ==2.4
|
|
64
|
-
Requires-Dist: kubernetes ==29.0.0
|
|
65
|
-
Requires-Dist: langchain ==0.1.9
|
|
66
|
-
Requires-Dist: langchain-community ==0.0.24
|
|
67
|
-
Requires-Dist: langchain-core ==0.1.27
|
|
68
|
-
Requires-Dist: langchain-openai ==0.0.7
|
|
69
|
-
Requires-Dist: langchainhub ==0.1.14
|
|
70
|
-
Requires-Dist: langsmith ==0.1.9
|
|
71
|
-
Requires-Dist: llama-index ==0.10.13.post1
|
|
72
|
-
Requires-Dist: llama-index-agent-openai ==0.1.5
|
|
73
|
-
Requires-Dist: llama-index-cli ==0.1.5
|
|
74
|
-
Requires-Dist: llama-index-core ==0.10.13
|
|
75
|
-
Requires-Dist: llama-index-embeddings-openai ==0.1.6
|
|
76
|
-
Requires-Dist: llama-index-indices-managed-llama-cloud ==0.1.3
|
|
77
|
-
Requires-Dist: llama-index-legacy ==0.9.48
|
|
78
|
-
Requires-Dist: llama-index-llms-openai ==0.1.6
|
|
79
|
-
Requires-Dist: llama-index-multi-modal-llms-openai ==0.1.4
|
|
80
|
-
Requires-Dist: llama-index-program-openai ==0.1.4
|
|
81
|
-
Requires-Dist: llama-index-question-gen-openai ==0.1.3
|
|
82
|
-
Requires-Dist: llama-index-readers-file ==0.1.6
|
|
83
|
-
Requires-Dist: llama-index-readers-llama-parse ==0.1.3
|
|
84
|
-
Requires-Dist: llama-index-vector-stores-chroma ==0.1.4
|
|
85
|
-
Requires-Dist: llama-parse ==0.3.4
|
|
86
|
-
Requires-Dist: llamaindex-py-client ==0.1.13
|
|
87
|
-
Requires-Dist: markdown-it-py ==3.0.0
|
|
88
|
-
Requires-Dist: marshmallow ==3.21.0
|
|
89
|
-
Requires-Dist: mdurl ==0.1.2
|
|
90
|
-
Requires-Dist: mmh3 ==4.1.0
|
|
91
|
-
Requires-Dist: monotonic ==1.6
|
|
92
|
-
Requires-Dist: mpmath ==1.3.0
|
|
93
|
-
Requires-Dist: multidict ==6.0.5
|
|
94
|
-
Requires-Dist: mypy-extensions ==1.0.0
|
|
95
|
-
Requires-Dist: nest-asyncio ==1.6.0
|
|
96
|
-
Requires-Dist: networkx ==3.2.1
|
|
97
|
-
Requires-Dist: nltk ==3.8.1
|
|
98
|
-
Requires-Dist: numexpr ==2.9.0
|
|
99
|
-
Requires-Dist: numpy ==1.26.4
|
|
100
|
-
Requires-Dist: oauthlib ==3.2.2
|
|
101
|
-
Requires-Dist: onnxruntime ==1.17.1
|
|
102
|
-
Requires-Dist: openai ==1.12.0
|
|
103
|
-
Requires-Dist: opentelemetry-api ==1.23.0
|
|
104
|
-
Requires-Dist: opentelemetry-distro ==0.44b0
|
|
105
|
-
Requires-Dist: opentelemetry-exporter-otlp ==1.23.0
|
|
106
|
-
Requires-Dist: opentelemetry-exporter-otlp-proto-common ==1.23.0
|
|
107
|
-
Requires-Dist: opentelemetry-exporter-otlp-proto-grpc ==1.23.0
|
|
108
|
-
Requires-Dist: opentelemetry-exporter-otlp-proto-http ==1.23.0
|
|
109
|
-
Requires-Dist: opentelemetry-instrumentation ==0.44b0
|
|
110
|
-
Requires-Dist: opentelemetry-instrumentation-asgi ==0.44b0
|
|
111
|
-
Requires-Dist: opentelemetry-instrumentation-fastapi ==0.44b0
|
|
112
|
-
Requires-Dist: opentelemetry-proto ==1.23.0
|
|
113
|
-
Requires-Dist: opentelemetry-sdk ==1.23.0
|
|
114
|
-
Requires-Dist: opentelemetry-semantic-conventions ==0.44b0
|
|
115
|
-
Requires-Dist: opentelemetry-util-http ==0.44b0
|
|
116
|
-
Requires-Dist: orjson ==3.9.15
|
|
117
|
-
Requires-Dist: overrides ==7.7.0
|
|
118
|
-
Requires-Dist: packaging ==23.2
|
|
119
|
-
Requires-Dist: pandas ==2.2.1
|
|
120
|
-
Requires-Dist: pillow ==10.2.0
|
|
121
|
-
Requires-Dist: pinecone-client ==3.1.0
|
|
122
|
-
Requires-Dist: posthog ==3.4.2
|
|
123
|
-
Requires-Dist: protobuf ==4.25.3
|
|
124
|
-
Requires-Dist: pulsar-client ==3.4.0
|
|
125
|
-
Requires-Dist: pyasn1 ==0.5.1
|
|
126
|
-
Requires-Dist: pyasn1-modules ==0.3.0
|
|
127
|
-
Requires-Dist: pydantic ==2.6.2
|
|
128
|
-
Requires-Dist: pydantic-core ==2.16.3
|
|
129
|
-
Requires-Dist: Pygments ==2.17.2
|
|
130
|
-
Requires-Dist: PyMuPDF ==1.23.25
|
|
131
|
-
Requires-Dist: PyMuPDFb ==1.23.22
|
|
132
|
-
Requires-Dist: pypdf ==4.0.2
|
|
133
|
-
Requires-Dist: PyPika ==0.48.9
|
|
134
|
-
Requires-Dist: pyproject-hooks ==1.0.0
|
|
135
|
-
Requires-Dist: python-dateutil ==2.8.2
|
|
136
|
-
Requires-Dist: python-dotenv ==1.0.1
|
|
137
|
-
Requires-Dist: pytz ==2024.1
|
|
138
|
-
Requires-Dist: PyYAML ==6.0.1
|
|
139
|
-
Requires-Dist: regex ==2023.12.25
|
|
140
|
-
Requires-Dist: requests ==2.31.0
|
|
141
|
-
Requires-Dist: requests-oauthlib ==1.3.1
|
|
142
|
-
Requires-Dist: rich ==13.7.0
|
|
143
|
-
Requires-Dist: rsa ==4.9
|
|
144
|
-
Requires-Dist: six ==1.16.0
|
|
145
|
-
Requires-Dist: sniffio ==1.3.0
|
|
146
|
-
Requires-Dist: soupsieve ==2.5
|
|
147
|
-
Requires-Dist: SQLAlchemy ==2.0.27
|
|
148
|
-
Requires-Dist: starlette ==0.36.3
|
|
149
|
-
Requires-Dist: sympy ==1.12
|
|
150
|
-
Requires-Dist: tenacity ==8.2.3
|
|
151
|
-
Requires-Dist: tiktoken ==0.6.0
|
|
152
|
-
Requires-Dist: tokenizers ==0.15.2
|
|
153
|
-
Requires-Dist: tomli ==2.0.1
|
|
154
|
-
Requires-Dist: tqdm ==4.66.2
|
|
155
|
-
Requires-Dist: trace-attributes ==1.0.24
|
|
156
|
-
Requires-Dist: typer ==0.9.0
|
|
157
|
-
Requires-Dist: types-requests ==2.31.0.20240218
|
|
158
|
-
Requires-Dist: typing ==3.7.4.3
|
|
159
|
-
Requires-Dist: typing-inspect ==0.9.0
|
|
160
|
-
Requires-Dist: typing-extensions ==4.9.0
|
|
161
|
-
Requires-Dist: tzdata ==2024.1
|
|
162
|
-
Requires-Dist: urllib3 ==2.2.1
|
|
163
|
-
Requires-Dist: uvicorn ==0.27.1
|
|
164
|
-
Requires-Dist: uvloop ==0.19.0
|
|
165
|
-
Requires-Dist: watchfiles ==0.21.0
|
|
166
|
-
Requires-Dist: websocket-client ==1.7.0
|
|
167
|
-
Requires-Dist: websockets ==12.0
|
|
168
|
-
Requires-Dist: wrapt ==1.16.0
|
|
169
|
-
Requires-Dist: yarl ==1.9.4
|
|
170
|
-
Requires-Dist: zipp ==3.17.0
|
|
171
|
-
|
|
172
|
-
# What is Langtrace?
|
|
173
|
-
|
|
174
|
-
Langtrace stands as a developer-centric, open-source solution, fully compatible with OpenTelemetry. It enables developers to effortlessly trace, monitor, and debug their LLM applications, offering robust support for automatic instrumentation.
|
|
175
|
-
|
|
176
|
-
## Supported LLM Modules
|
|
177
|
-
|
|
178
|
-
Langtrace supports a comprehensive range of LLMs, VectorDBs, and frameworks, ensuring wide coverage for your development needs:
|
|
179
|
-
|
|
180
|
-
### LLMs
|
|
181
|
-
|
|
182
|
-
1. OpenAI
|
|
183
|
-
2. Anthropic
|
|
184
|
-
3. Azure OpenAI
|
|
185
|
-
|
|
186
|
-
### VectorDBs
|
|
187
|
-
|
|
188
|
-
1. Pinecone
|
|
189
|
-
2. Chromadb
|
|
190
|
-
|
|
191
|
-
### Frameworks
|
|
192
|
-
|
|
193
|
-
1. LangChain
|
|
194
|
-
2. LlamaIndex
|
|
195
|
-
3. Haystack
|
|
196
|
-
|
|
197
|
-
We are actively working to extend our support to additional libraries!
|
|
198
|
-
|
|
199
|
-
## Getting Started
|
|
200
|
-
|
|
201
|
-
To begin utilizing Langtrace, follow these straightforward steps:
|
|
202
|
-
|
|
203
|
-
1. Install the package using `pip install langtrace-python-sdk`.
|
|
204
|
-
2. Incorporate Langtrace into your project with `from langtrace_python_sdk import langtrace`.
|
|
205
|
-
- This import should precede any other LLM module imports (such as OpenAI, LlamaIndex, etc.) to ensure proper functionality.
|
|
206
|
-
3. Initialize Langtrace by adding `langtrace.init({ write_to_remote_url: false})` to your code.
|
|
207
|
-
4. Congratulations, you've completed the basic setup! You will now begin to see traces from your LLM modules logged directly to the console.
|
|
208
|
-
|
|
209
|
-
## Exporting Traces to Langtrace
|
|
210
|
-
|
|
211
|
-
To configure trace exporting, you have two options:
|
|
212
|
-
|
|
213
|
-
You'll need both a Langtrace `api_key` and a `remote_url`, which can be acquired by logging into your Langtrace account.
|
|
214
|
-
|
|
215
|
-
1. Direct Initialization: Utilize `langtrace.init(batch=True, api_key=<YOUR_API_KEY>, remote_url=<YOUR_REMOTE_URL>)`.
|
|
216
|
-
2. Environment Variables: Set `API_KEY` and `URL`, then add `LangTrace.init(batch=True)` at the beginning of your file.
|
|
217
|
-
|
|
218
|
-
## Langtrace Cloud
|
|
219
|
-
|
|
220
|
-
Currently under development 🚧
|
{langtrace_python_sdk-1.1.29.dist-info → langtrace_python_sdk-1.2.5.dist-info/licenses}/LICENSE
RENAMED
|
File without changes
|