langtrace-python-sdk 1.0.10__py3-none-any.whl → 1.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
examples/__init__.py ADDED
File without changes
File without changes
@@ -0,0 +1,27 @@
1
+ import chromadb
2
+ from chromadb.utils import embedding_functions
3
+ from dotenv import find_dotenv, load_dotenv
4
+
5
+ from examples.setup import setup_instrumentation
6
+ from instrumentation.with_root_span import with_langtrace_root_span
7
+
8
+ _ = load_dotenv(find_dotenv())
9
+
10
+ setup_instrumentation()
11
+
12
+
13
+ @with_langtrace_root_span()
14
+ def basic():
15
+ chroma_client = chromadb.Client()
16
+ embedder = embedding_functions.DefaultEmbeddingFunction()
17
+ collection = chroma_client.create_collection(
18
+ name="my6_collection", embedding_function=embedder)
19
+ collection.add(
20
+ documents=["This is a document", "This is another document"],
21
+ metadatas=[{"source": "my_source"}, {"source": "my_source"}],
22
+ ids=["id1", "id2"]
23
+ )
24
+ results = collection.query(
25
+ query_texts=["This is a query document"],
26
+ n_results=2
27
+ )
File without changes
@@ -0,0 +1,65 @@
1
+ from dotenv import find_dotenv, load_dotenv
2
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
3
+ from langchain_community.document_loaders import PyPDFLoader
4
+ from langchain_community.vectorstores.faiss import FAISS
5
+ from langchain_core.output_parsers import StrOutputParser
6
+ from langchain_core.prompts.chat import ChatPromptTemplate
7
+ from langchain_core.runnables import RunnablePassthrough
8
+ from langchain_openai import ChatOpenAI, OpenAIEmbeddings
9
+
10
+ from examples.setup import setup_instrumentation
11
+ from instrumentation.with_root_span import with_langtrace_root_span
12
+
13
+ _ = load_dotenv(find_dotenv())
14
+
15
+ setup_instrumentation()
16
+
17
+
18
+ @with_langtrace_root_span()
19
+ def basic():
20
+ llm = ChatOpenAI()
21
+ prompt = ChatPromptTemplate.from_messages([
22
+ ("system", "You are world class technical documentation writer."),
23
+ ("user", "{input}")
24
+ ])
25
+ output_parser = StrOutputParser()
26
+ chain = prompt | llm | output_parser
27
+ res = chain.invoke({"input": "how can langsmith help with testing?"})
28
+ print(res)
29
+
30
+
31
+ @with_langtrace_root_span()
32
+ def rag():
33
+ vectorstore = FAISS.from_texts(
34
+ ["harrison worked at kensho"], embedding=OpenAIEmbeddings()
35
+ )
36
+ retriever = vectorstore.as_retriever()
37
+
38
+ template = """Answer the question based only on the following context:{context}
39
+
40
+ Question: {question}
41
+ """
42
+ prompt = ChatPromptTemplate.from_template(template)
43
+
44
+ model = ChatOpenAI()
45
+
46
+ chain = (
47
+ {"context": retriever, "question": RunnablePassthrough()}
48
+ | prompt
49
+ | model
50
+ | StrOutputParser()
51
+ )
52
+
53
+ res = chain.invoke("where did harrison work?")
54
+ # print(res)
55
+
56
+
57
+ @with_langtrace_root_span()
58
+ def load_and_split():
59
+ url = 'https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf'
60
+ loader = PyPDFLoader(url)
61
+ data = loader.load()
62
+ text_splitter = RecursiveCharacterTextSplitter(
63
+ chunk_size=500, chunk_overlap=0)
64
+ docs = text_splitter.split_documents(data)
65
+ # print(docs)
@@ -0,0 +1,85 @@
1
+ from dotenv import find_dotenv, load_dotenv
2
+ from langchain import hub
3
+ from langchain.agents import AgentExecutor, create_openai_functions_agent
4
+ from langchain.chains import LLMMathChain
5
+ from langchain_core.pydantic_v1 import BaseModel, Field
6
+ from langchain_core.tools import Tool
7
+ from langchain_openai import ChatOpenAI
8
+
9
+ from examples.setup import setup_instrumentation
10
+ from instrumentation.with_root_span import with_langtrace_root_span
11
+
12
+ _ = load_dotenv(find_dotenv())
13
+
14
+ setup_instrumentation()
15
+
16
+
17
+ llm = ChatOpenAI(temperature=0, model="gpt-4")
18
+ llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
19
+
20
+ primes = {998: 7901, 999: 7907, 1000: 7919}
21
+
22
+
23
+ class CalculatorInput(BaseModel):
24
+ question: str = Field()
25
+
26
+
27
+ class PrimeInput(BaseModel):
28
+ n: int = Field()
29
+
30
+
31
+ def is_prime(n: int) -> bool:
32
+ if n <= 1 or (n % 2 == 0 and n > 2):
33
+ return False
34
+ for i in range(3, int(n**0.5) + 1, 2):
35
+ if n % i == 0:
36
+ return False
37
+ return True
38
+
39
+
40
+ def get_prime(n: int, primes: dict = primes) -> str:
41
+ return str(primes.get(int(n)))
42
+
43
+
44
+ async def aget_prime(n: int, primes: dict = primes) -> str:
45
+ return str(primes.get(int(n)))
46
+
47
+
48
+ @with_langtrace_root_span()
49
+ def tool_example():
50
+
51
+ tools = [
52
+ Tool(
53
+ name="GetPrime",
54
+ func=get_prime,
55
+ description="A tool that returns the `n`th prime number",
56
+ args_schema=PrimeInput,
57
+ coroutine=aget_prime,
58
+ ),
59
+ Tool.from_function(
60
+ func=llm_math_chain.run,
61
+ name="Calculator",
62
+ description="Useful for when you need to compute mathematical expressions",
63
+ args_schema=CalculatorInput,
64
+ coroutine=llm_math_chain.arun,
65
+ ),
66
+ ]
67
+
68
+ prompt = hub.pull("hwchase17/openai-functions-agent")
69
+
70
+ agent = create_openai_functions_agent(llm, tools, prompt)
71
+
72
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
73
+
74
+ question = "What is the product of the 998th, 999th and 1000th prime numbers?"
75
+
76
+ for step in agent_executor.iter({"input": question}):
77
+ if output := step.get("intermediate_step"):
78
+ action, value = output[0]
79
+ if action.tool == "GetPrime":
80
+ print(f"Checking whether {value} is prime...")
81
+ assert is_prime(int(value))
82
+ # Ask user if they want to continue
83
+ _continue = input("Should the agent continue (Y/n)?:\n") or "Y"
84
+ if _continue.lower() != "y":
85
+ break
File without changes
@@ -0,0 +1,19 @@
1
+ from dotenv import find_dotenv, load_dotenv
2
+ from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
3
+
4
+ from examples.setup import setup_instrumentation
5
+ from instrumentation.with_root_span import with_langtrace_root_span
6
+
7
+ _ = load_dotenv(find_dotenv())
8
+
9
+ setup_instrumentation()
10
+
11
+
12
+ @with_langtrace_root_span()
13
+ def basic():
14
+ documents = SimpleDirectoryReader(
15
+ "src/examples/llamaindex_example/data").load_data()
16
+ index = VectorStoreIndex.from_documents(documents)
17
+ query_engine = index.as_query_engine()
18
+ response = query_engine.query("What did the author do in college?")
19
+ print(response)
File without changes
@@ -0,0 +1,39 @@
1
+ from dotenv import find_dotenv, load_dotenv
2
+ from openai import OpenAI
3
+
4
+ from examples.setup import setup_instrumentation
5
+ from instrumentation.with_root_span import with_langtrace_root_span
6
+
7
+ _ = load_dotenv(find_dotenv())
8
+
9
+ setup_instrumentation()
10
+
11
+ client = OpenAI()
12
+
13
+
14
+ @with_langtrace_root_span()
15
+ def chat_completion():
16
+ response = client.chat.completions.create(
17
+ model="gpt-4",
18
+ messages=[{"role": "user", "content": "Say this is a test three times"}],
19
+ stream=True,
20
+ )
21
+ # print(stream)
22
+ # stream = client.chat.completions.create(
23
+ # model="gpt-4",
24
+ # messages=[{"role": "user", "content": "Say this is a test three times"}, {"role": "assistant", "content": "This is a test. This is a test. This is a test"},
25
+ # {"role": "user", "content": "Say this is a mock 4 times"}],
26
+ # stream=False,
27
+ # )
28
+
29
+ result = []
30
+ for chunk in response:
31
+ if chunk.choices[0].delta.function_call is not None:
32
+ content = [
33
+ choice.delta.function_call.arguments if choice.delta.function_call and
34
+ choice.delta.function_call.arguments else ""
35
+ for choice in chunk.choices]
36
+ result.append(
37
+ content[0] if len(content) > 0 else "")
38
+
39
+ print("".join(result))
@@ -0,0 +1,19 @@
1
+ from dotenv import find_dotenv, load_dotenv
2
+ from openai import OpenAI
3
+
4
+ from examples.setup import setup_instrumentation
5
+ from instrumentation.with_root_span import with_langtrace_root_span
6
+
7
+ _ = load_dotenv(find_dotenv())
8
+
9
+ setup_instrumentation()
10
+
11
+ client = OpenAI()
12
+
13
+
14
+ @with_langtrace_root_span()
15
+ def embeddings_create():
16
+ result = client.embeddings.create(
17
+ model="text-embedding-ada-002",
18
+ input="Once upon a time, there was a frog.",
19
+ )
@@ -0,0 +1,75 @@
1
+ import json
2
+
3
+ from dotenv import find_dotenv, load_dotenv
4
+ from openai import OpenAI
5
+
6
+ from examples.setup import setup_instrumentation
7
+ from instrumentation.with_root_span import with_langtrace_root_span
8
+
9
+ _ = load_dotenv(find_dotenv())
10
+
11
+ setup_instrumentation()
12
+
13
+ client = OpenAI()
14
+
15
+
16
+ student_custom_functions = [
17
+ {
18
+ 'name': 'extract_student_info',
19
+ 'description': 'Get the student information from the body of the input text',
20
+ 'parameters': {
21
+ 'type': 'object',
22
+ 'properties': {
23
+ 'name': {
24
+ 'type': 'string',
25
+ 'description': 'Name of the person'
26
+ },
27
+ 'major': {
28
+ 'type': 'string',
29
+ 'description': 'Major subject.'
30
+ },
31
+ 'school': {
32
+ 'type': 'string',
33
+ 'description': 'The university name.'
34
+ },
35
+ 'grades': {
36
+ 'type': 'integer',
37
+ 'description': 'GPA of the student.'
38
+ },
39
+ 'club': {
40
+ 'type': 'string',
41
+ 'description': 'School club for extracurricular activities. '
42
+ }
43
+
44
+ }
45
+ }
46
+ }
47
+ ]
48
+
49
+
50
+ @with_langtrace_root_span()
51
+ def function_calling():
52
+ response = client.chat.completions.create(
53
+ model='gpt-3.5-turbo',
54
+ messages=[{'role': 'user', 'content': "David Nguyen is a sophomore majoring in computer science at Stanford University. He is Asian American and has a 3.8 GPA. David is known for his programming skills and is an active member of the university's Robotics Club. He hopes to pursue a career in artificial intelligence after graduating."}],
55
+ functions=student_custom_functions,
56
+ function_call='auto',
57
+ stream=False
58
+ )
59
+
60
+ # result = []
61
+ # for chunk in response:
62
+ # if chunk.choices[0].delta.function_call is not None:
63
+ # content = [
64
+ # choice.delta.function_call.arguments if choice.delta.function_call and
65
+ # choice.delta.function_call.arguments else ""
66
+ # for choice in chunk.choices]
67
+ # result.append(
68
+ # content[0] if len(content) > 0 else "")
69
+
70
+ # print("".join(result))
71
+
72
+ # Loading the response as a JSON object
73
+ json_response = json.loads(
74
+ response.choices[0].message.function_call.arguments)
75
+ print(json_response)
@@ -0,0 +1,20 @@
1
+ from dotenv import find_dotenv, load_dotenv
2
+ from openai import OpenAI
3
+
4
+ from examples.setup import setup_instrumentation
5
+ from instrumentation.with_root_span import with_langtrace_root_span
6
+
7
+ _ = load_dotenv(find_dotenv())
8
+
9
+ setup_instrumentation()
10
+
11
+ client = OpenAI()
12
+
13
+
14
+ @with_langtrace_root_span()
15
+ def images_generate():
16
+ result = client.images.generate(
17
+ model="dall-e-3",
18
+ prompt="A cute baby sea otter",
19
+ )
20
+ print(result)
File without changes
@@ -0,0 +1,32 @@
1
+ from dotenv import find_dotenv, load_dotenv
2
+ from openai import OpenAI
3
+ from pinecone import Pinecone
4
+
5
+ from examples.setup import setup_instrumentation
6
+ from instrumentation.with_root_span import with_langtrace_root_span
7
+
8
+ _ = load_dotenv(find_dotenv())
9
+
10
+ setup_instrumentation()
11
+
12
+ client = OpenAI()
13
+ pinecone = Pinecone()
14
+
15
+
16
+ @with_langtrace_root_span()
17
+ def basic():
18
+ result = client.embeddings.create(
19
+ model="text-embedding-ada-002",
20
+ input="Some random text string goes here",
21
+ encoding_format="float"
22
+ )
23
+
24
+ embedding = result.data[0].embedding
25
+
26
+ unique_id = "randomid"
27
+ data_to_upsert = {"id": unique_id, "values": embedding}
28
+
29
+ index = pinecone.Index("test-index")
30
+ index.upsert(vectors=[data_to_upsert])
31
+
32
+ resp = index.query(vector=embedding, top_k=1)
examples/setup.py ADDED
@@ -0,0 +1,50 @@
1
+
2
+ from opentelemetry import trace
3
+ from opentelemetry.sdk.trace import TracerProvider
4
+ from opentelemetry.sdk.trace.export import (ConsoleSpanExporter,
5
+ SimpleSpanProcessor)
6
+
7
+ from instrumentation.chroma.instrumentation import ChromaInstrumentation
8
+ from instrumentation.langchain.instrumentation import LangchainInstrumentation
9
+ from instrumentation.langchain_community.instrumentation import \
10
+ LangchainCommunityInstrumentation
11
+ from instrumentation.langchain_core.instrumentation import \
12
+ LangchainCoreInstrumentation
13
+ from instrumentation.llamaindex.instrumentation import \
14
+ LlamaindexInstrumentation
15
+ from instrumentation.openai.instrumentation import OpenAIInstrumentation
16
+ from instrumentation.pinecone.instrumentation import PineconeInstrumentation
17
+
18
+
19
+ def setup_instrumentation():
20
+
21
+ # Set up OpenTelemetry tracing
22
+ tracer_provider = TracerProvider()
23
+
24
+ # Use the ConsoleSpanExporter to print traces to the console
25
+ console_exporter = ConsoleSpanExporter()
26
+ tracer_provider.add_span_processor(SimpleSpanProcessor(console_exporter))
27
+
28
+ # Initialize tracer
29
+ trace.set_tracer_provider(tracer_provider)
30
+
31
+ # Initialize and enable your custom OpenAI instrumentation
32
+ # Create an instance of OpenAIInstrumentation
33
+ openai_instrumentation = OpenAIInstrumentation()
34
+ pinecone_instrumentation = PineconeInstrumentation()
35
+ llamaindex_instrumentation = LlamaindexInstrumentation()
36
+ chroma_instrumentation = ChromaInstrumentation()
37
+ langchain_instrumentation = LangchainInstrumentation()
38
+ langchain_core_instrumentation = LangchainCoreInstrumentation()
39
+ langchain_community_instrumentation = LangchainCommunityInstrumentation()
40
+
41
+ # Call the instrument method with some arguments
42
+ openai_instrumentation.instrument()
43
+ pinecone_instrumentation.instrument()
44
+ llamaindex_instrumentation.instrument()
45
+ chroma_instrumentation.instrument()
46
+ langchain_instrumentation.instrument()
47
+ langchain_core_instrumentation.instrument()
48
+ langchain_community_instrumentation.instrument()
49
+
50
+ print("setup complete")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langtrace-python-sdk
3
- Version: 1.0.10
3
+ Version: 1.0.11
4
4
  Summary: LangTrace - Python SDK
5
5
  Home-page: https://github.com/Scale3-Labs/langtrace-python-sdk
6
6
  Author: Ali Waleed
@@ -1,3 +1,19 @@
1
+ examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ examples/setup.py,sha256=07Sy36lUuNuPU5QPAs2BEMm-YKSosruzKJPl0QKc_rc,2105
3
+ examples/chroma_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ examples/chroma_example/basic.py,sha256=HD8vGSsvcXN21AuWyUx09CePphXrIp9PSNllueMqvEI,836
5
+ examples/langchain_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ examples/langchain_example/basic.py,sha256=PBYotFDJlkH9y17U8WRbH3FxqgfBPHra2k_C7hHPiMc,1993
7
+ examples/langchain_example/tool.py,sha256=g7mFuXal6TtCvo-KTYqWeSEpuyQMR2LuHF6WQdKRwbw,2494
8
+ examples/llamaindex_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ examples/llamaindex_example/basic.py,sha256=b1y1VnYE7eaeSXLk9rJ4EZApCj-tMPPMTY-fc_M7X7c,620
10
+ examples/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ examples/openai/chat_completion.py,sha256=3ojkccgntJp_IXnNb7f7w9sXPtj_mDNOE7IyLB6SsS0,1300
12
+ examples/openai/embeddings_create.py,sha256=01ApMJ2F_8-IWanDAD8SWlrcjg8b3mJzaddsGhRvrck,457
13
+ examples/openai/function_calling.py,sha256=MzxNNvPNdUWOw8Ugs_gMGuwyQN4yPJnja46rE_8W_ZY,2475
14
+ examples/openai/images_generate.py,sha256=GD2Bj902xqS0pfZr0pZtPXCGHEHWZM_CjGJ0DdoP_pY,444
15
+ examples/pinecone_example/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
+ examples/pinecone_example/basic.py,sha256=lF8TcDvW1v6j-QKvl3B9r1t0ShnvCcOgJ5F74qy6VO8,793
1
17
  instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
18
  instrumentation/constants.py,sha256=YdC62dsYpbbBdMHhfUbaK-cbDM4w4eau0ClUdWVmjmU,336
3
19
  instrumentation/with_root_span.py,sha256=CRie2ljHhnHN8bUGDwBM-F18-c6xyoI_238KP8BEO-U,969
@@ -30,8 +46,8 @@ instrumentation/pinecone/apis.py,sha256=XpKNUfyzEE3HkBN10Qv1w_t1PT-J39pHlotrdU-w
30
46
  instrumentation/pinecone/instrumentation.py,sha256=yfOxKkMtW6GEUQ0E9AWSBdaa07MHzV3o6Q09cAvoWIU,1708
31
47
  instrumentation/pinecone/patch.py,sha256=fr07o97CqGc8sUEyMtSiT6watZiTPStRPOrxOzhJGLo,1840
32
48
  instrumentation/pinecone/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
- langtrace_python_sdk-1.0.10.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
34
- langtrace_python_sdk-1.0.10.dist-info/METADATA,sha256=PD7RbdnqjA8lCTuz7rc89PgW6VcrpMxV_DntIHak4bY,6149
35
- langtrace_python_sdk-1.0.10.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
36
- langtrace_python_sdk-1.0.10.dist-info/top_level.txt,sha256=mdFAULSZuqUiDveRElCIPMvwAkRAYXP4bm_dEI4A96Q,16
37
- langtrace_python_sdk-1.0.10.dist-info/RECORD,,
49
+ langtrace_python_sdk-1.0.11.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
50
+ langtrace_python_sdk-1.0.11.dist-info/METADATA,sha256=Zgg1c6Ykg62bBICe3rPmogg4GWjTe-WwtnGWQyVJ_mA,6149
51
+ langtrace_python_sdk-1.0.11.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
52
+ langtrace_python_sdk-1.0.11.dist-info/top_level.txt,sha256=FJhLokAk3p9qB-lzJKTfQ1CtAXoxvyL8zkfffVrFSdI,25
53
+ langtrace_python_sdk-1.0.11.dist-info/RECORD,,